]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-3.0.7-201110180733.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.0.7-201110180733.patch
1 diff -urNp linux-3.0.7/arch/alpha/include/asm/elf.h linux-3.0.7/arch/alpha/include/asm/elf.h
2 --- linux-3.0.7/arch/alpha/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
3 +++ linux-3.0.7/arch/alpha/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
4 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8 +#ifdef CONFIG_PAX_ASLR
9 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10 +
11 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13 +#endif
14 +
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18 diff -urNp linux-3.0.7/arch/alpha/include/asm/pgtable.h linux-3.0.7/arch/alpha/include/asm/pgtable.h
19 --- linux-3.0.7/arch/alpha/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
20 +++ linux-3.0.7/arch/alpha/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
21 @@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25 +
26 +#ifdef CONFIG_PAX_PAGEEXEC
27 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30 +#else
31 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
32 +# define PAGE_COPY_NOEXEC PAGE_COPY
33 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
34 +#endif
35 +
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39 diff -urNp linux-3.0.7/arch/alpha/kernel/module.c linux-3.0.7/arch/alpha/kernel/module.c
40 --- linux-3.0.7/arch/alpha/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
41 +++ linux-3.0.7/arch/alpha/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
42 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46 - gp = (u64)me->module_core + me->core_size - 0x8000;
47 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51 diff -urNp linux-3.0.7/arch/alpha/kernel/osf_sys.c linux-3.0.7/arch/alpha/kernel/osf_sys.c
52 --- linux-3.0.7/arch/alpha/kernel/osf_sys.c 2011-07-21 22:17:23.000000000 -0400
53 +++ linux-3.0.7/arch/alpha/kernel/osf_sys.c 2011-08-23 21:47:55.000000000 -0400
54 @@ -1145,7 +1145,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58 - if (!vma || addr + len <= vma->vm_start)
59 + if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63 @@ -1181,6 +1181,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67 +#ifdef CONFIG_PAX_RANDMMAP
68 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69 +#endif
70 +
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74 @@ -1188,8 +1192,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79 - len, limit);
80 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81 +
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85 diff -urNp linux-3.0.7/arch/alpha/mm/fault.c linux-3.0.7/arch/alpha/mm/fault.c
86 --- linux-3.0.7/arch/alpha/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
87 +++ linux-3.0.7/arch/alpha/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
88 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92 +#ifdef CONFIG_PAX_PAGEEXEC
93 +/*
94 + * PaX: decide what to do with offenders (regs->pc = fault address)
95 + *
96 + * returns 1 when task should be killed
97 + * 2 when patched PLT trampoline was detected
98 + * 3 when unpatched PLT trampoline was detected
99 + */
100 +static int pax_handle_fetch_fault(struct pt_regs *regs)
101 +{
102 +
103 +#ifdef CONFIG_PAX_EMUPLT
104 + int err;
105 +
106 + do { /* PaX: patched PLT emulation #1 */
107 + unsigned int ldah, ldq, jmp;
108 +
109 + err = get_user(ldah, (unsigned int *)regs->pc);
110 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112 +
113 + if (err)
114 + break;
115 +
116 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118 + jmp == 0x6BFB0000U)
119 + {
120 + unsigned long r27, addr;
121 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123 +
124 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125 + err = get_user(r27, (unsigned long *)addr);
126 + if (err)
127 + break;
128 +
129 + regs->r27 = r27;
130 + regs->pc = r27;
131 + return 2;
132 + }
133 + } while (0);
134 +
135 + do { /* PaX: patched PLT emulation #2 */
136 + unsigned int ldah, lda, br;
137 +
138 + err = get_user(ldah, (unsigned int *)regs->pc);
139 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
140 + err |= get_user(br, (unsigned int *)(regs->pc+8));
141 +
142 + if (err)
143 + break;
144 +
145 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
147 + (br & 0xFFE00000U) == 0xC3E00000U)
148 + {
149 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152 +
153 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155 + return 2;
156 + }
157 + } while (0);
158 +
159 + do { /* PaX: unpatched PLT emulation */
160 + unsigned int br;
161 +
162 + err = get_user(br, (unsigned int *)regs->pc);
163 +
164 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165 + unsigned int br2, ldq, nop, jmp;
166 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167 +
168 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169 + err = get_user(br2, (unsigned int *)addr);
170 + err |= get_user(ldq, (unsigned int *)(addr+4));
171 + err |= get_user(nop, (unsigned int *)(addr+8));
172 + err |= get_user(jmp, (unsigned int *)(addr+12));
173 + err |= get_user(resolver, (unsigned long *)(addr+16));
174 +
175 + if (err)
176 + break;
177 +
178 + if (br2 == 0xC3600000U &&
179 + ldq == 0xA77B000CU &&
180 + nop == 0x47FF041FU &&
181 + jmp == 0x6B7B0000U)
182 + {
183 + regs->r28 = regs->pc+4;
184 + regs->r27 = addr+16;
185 + regs->pc = resolver;
186 + return 3;
187 + }
188 + }
189 + } while (0);
190 +#endif
191 +
192 + return 1;
193 +}
194 +
195 +void pax_report_insns(void *pc, void *sp)
196 +{
197 + unsigned long i;
198 +
199 + printk(KERN_ERR "PAX: bytes at PC: ");
200 + for (i = 0; i < 5; i++) {
201 + unsigned int c;
202 + if (get_user(c, (unsigned int *)pc+i))
203 + printk(KERN_CONT "???????? ");
204 + else
205 + printk(KERN_CONT "%08x ", c);
206 + }
207 + printk("\n");
208 +}
209 +#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217 - if (!(vma->vm_flags & VM_EXEC))
218 + if (!(vma->vm_flags & VM_EXEC)) {
219 +
220 +#ifdef CONFIG_PAX_PAGEEXEC
221 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222 + goto bad_area;
223 +
224 + up_read(&mm->mmap_sem);
225 + switch (pax_handle_fetch_fault(regs)) {
226 +
227 +#ifdef CONFIG_PAX_EMUPLT
228 + case 2:
229 + case 3:
230 + return;
231 +#endif
232 +
233 + }
234 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235 + do_group_exit(SIGKILL);
236 +#else
237 goto bad_area;
238 +#endif
239 +
240 + }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244 diff -urNp linux-3.0.7/arch/arm/include/asm/elf.h linux-3.0.7/arch/arm/include/asm/elf.h
245 --- linux-3.0.7/arch/arm/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
246 +++ linux-3.0.7/arch/arm/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
247 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253 +
254 +#ifdef CONFIG_PAX_ASLR
255 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256 +
257 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259 +#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263 @@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t
264 extern void elf_set_personality(const struct elf32_hdr *);
265 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
266
267 -struct mm_struct;
268 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
269 -#define arch_randomize_brk arch_randomize_brk
270 -
271 extern int vectors_user_mapping(void);
272 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
273 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
274 diff -urNp linux-3.0.7/arch/arm/include/asm/kmap_types.h linux-3.0.7/arch/arm/include/asm/kmap_types.h
275 --- linux-3.0.7/arch/arm/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
276 +++ linux-3.0.7/arch/arm/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
277 @@ -21,6 +21,7 @@ enum km_type {
278 KM_L1_CACHE,
279 KM_L2_CACHE,
280 KM_KDB,
281 + KM_CLEARPAGE,
282 KM_TYPE_NR
283 };
284
285 diff -urNp linux-3.0.7/arch/arm/include/asm/uaccess.h linux-3.0.7/arch/arm/include/asm/uaccess.h
286 --- linux-3.0.7/arch/arm/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
287 +++ linux-3.0.7/arch/arm/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
288 @@ -22,6 +22,8 @@
289 #define VERIFY_READ 0
290 #define VERIFY_WRITE 1
291
292 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
293 +
294 /*
295 * The exception table consists of pairs of addresses: the first is the
296 * address of an instruction that is allowed to fault, and the second is
297 @@ -387,8 +389,23 @@ do { \
298
299
300 #ifdef CONFIG_MMU
301 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
302 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
303 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
304 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
305 +
306 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
307 +{
308 + if (!__builtin_constant_p(n))
309 + check_object_size(to, n, false);
310 + return ___copy_from_user(to, from, n);
311 +}
312 +
313 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
314 +{
315 + if (!__builtin_constant_p(n))
316 + check_object_size(from, n, true);
317 + return ___copy_to_user(to, from, n);
318 +}
319 +
320 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
321 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
322 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
323 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
324
325 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
326 {
327 + if ((long)n < 0)
328 + return n;
329 +
330 if (access_ok(VERIFY_READ, from, n))
331 n = __copy_from_user(to, from, n);
332 else /* security hole - plug it */
333 @@ -412,6 +432,9 @@ static inline unsigned long __must_check
334
335 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
336 {
337 + if ((long)n < 0)
338 + return n;
339 +
340 if (access_ok(VERIFY_WRITE, to, n))
341 n = __copy_to_user(to, from, n);
342 return n;
343 diff -urNp linux-3.0.7/arch/arm/kernel/armksyms.c linux-3.0.7/arch/arm/kernel/armksyms.c
344 --- linux-3.0.7/arch/arm/kernel/armksyms.c 2011-07-21 22:17:23.000000000 -0400
345 +++ linux-3.0.7/arch/arm/kernel/armksyms.c 2011-08-23 21:47:55.000000000 -0400
346 @@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
347 #ifdef CONFIG_MMU
348 EXPORT_SYMBOL(copy_page);
349
350 -EXPORT_SYMBOL(__copy_from_user);
351 -EXPORT_SYMBOL(__copy_to_user);
352 +EXPORT_SYMBOL(___copy_from_user);
353 +EXPORT_SYMBOL(___copy_to_user);
354 EXPORT_SYMBOL(__clear_user);
355
356 EXPORT_SYMBOL(__get_user_1);
357 diff -urNp linux-3.0.7/arch/arm/kernel/process.c linux-3.0.7/arch/arm/kernel/process.c
358 --- linux-3.0.7/arch/arm/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
359 +++ linux-3.0.7/arch/arm/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
360 @@ -28,7 +28,6 @@
361 #include <linux/tick.h>
362 #include <linux/utsname.h>
363 #include <linux/uaccess.h>
364 -#include <linux/random.h>
365 #include <linux/hw_breakpoint.h>
366
367 #include <asm/cacheflush.h>
368 @@ -479,12 +478,6 @@ unsigned long get_wchan(struct task_stru
369 return 0;
370 }
371
372 -unsigned long arch_randomize_brk(struct mm_struct *mm)
373 -{
374 - unsigned long range_end = mm->brk + 0x02000000;
375 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
376 -}
377 -
378 #ifdef CONFIG_MMU
379 /*
380 * The vectors page is always readable from user space for the
381 diff -urNp linux-3.0.7/arch/arm/kernel/traps.c linux-3.0.7/arch/arm/kernel/traps.c
382 --- linux-3.0.7/arch/arm/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
383 +++ linux-3.0.7/arch/arm/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
384 @@ -257,6 +257,8 @@ static int __die(const char *str, int er
385
386 static DEFINE_SPINLOCK(die_lock);
387
388 +extern void gr_handle_kernel_exploit(void);
389 +
390 /*
391 * This function is protected against re-entrancy.
392 */
393 @@ -284,6 +286,9 @@ void die(const char *str, struct pt_regs
394 panic("Fatal exception in interrupt");
395 if (panic_on_oops)
396 panic("Fatal exception");
397 +
398 + gr_handle_kernel_exploit();
399 +
400 if (ret != NOTIFY_STOP)
401 do_exit(SIGSEGV);
402 }
403 diff -urNp linux-3.0.7/arch/arm/lib/copy_from_user.S linux-3.0.7/arch/arm/lib/copy_from_user.S
404 --- linux-3.0.7/arch/arm/lib/copy_from_user.S 2011-07-21 22:17:23.000000000 -0400
405 +++ linux-3.0.7/arch/arm/lib/copy_from_user.S 2011-08-23 21:47:55.000000000 -0400
406 @@ -16,7 +16,7 @@
407 /*
408 * Prototype:
409 *
410 - * size_t __copy_from_user(void *to, const void *from, size_t n)
411 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
412 *
413 * Purpose:
414 *
415 @@ -84,11 +84,11 @@
416
417 .text
418
419 -ENTRY(__copy_from_user)
420 +ENTRY(___copy_from_user)
421
422 #include "copy_template.S"
423
424 -ENDPROC(__copy_from_user)
425 +ENDPROC(___copy_from_user)
426
427 .pushsection .fixup,"ax"
428 .align 0
429 diff -urNp linux-3.0.7/arch/arm/lib/copy_to_user.S linux-3.0.7/arch/arm/lib/copy_to_user.S
430 --- linux-3.0.7/arch/arm/lib/copy_to_user.S 2011-07-21 22:17:23.000000000 -0400
431 +++ linux-3.0.7/arch/arm/lib/copy_to_user.S 2011-08-23 21:47:55.000000000 -0400
432 @@ -16,7 +16,7 @@
433 /*
434 * Prototype:
435 *
436 - * size_t __copy_to_user(void *to, const void *from, size_t n)
437 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
438 *
439 * Purpose:
440 *
441 @@ -88,11 +88,11 @@
442 .text
443
444 ENTRY(__copy_to_user_std)
445 -WEAK(__copy_to_user)
446 +WEAK(___copy_to_user)
447
448 #include "copy_template.S"
449
450 -ENDPROC(__copy_to_user)
451 +ENDPROC(___copy_to_user)
452 ENDPROC(__copy_to_user_std)
453
454 .pushsection .fixup,"ax"
455 diff -urNp linux-3.0.7/arch/arm/lib/uaccess.S linux-3.0.7/arch/arm/lib/uaccess.S
456 --- linux-3.0.7/arch/arm/lib/uaccess.S 2011-07-21 22:17:23.000000000 -0400
457 +++ linux-3.0.7/arch/arm/lib/uaccess.S 2011-08-23 21:47:55.000000000 -0400
458 @@ -20,7 +20,7 @@
459
460 #define PAGE_SHIFT 12
461
462 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
463 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
464 * Purpose : copy a block to user memory from kernel memory
465 * Params : to - user memory
466 * : from - kernel memory
467 @@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
468 sub r2, r2, ip
469 b .Lc2u_dest_aligned
470
471 -ENTRY(__copy_to_user)
472 +ENTRY(___copy_to_user)
473 stmfd sp!, {r2, r4 - r7, lr}
474 cmp r2, #4
475 blt .Lc2u_not_enough
476 @@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
477 ldrgtb r3, [r1], #0
478 USER( T(strgtb) r3, [r0], #1) @ May fault
479 b .Lc2u_finished
480 -ENDPROC(__copy_to_user)
481 +ENDPROC(___copy_to_user)
482
483 .pushsection .fixup,"ax"
484 .align 0
485 9001: ldmfd sp!, {r0, r4 - r7, pc}
486 .popsection
487
488 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
489 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
490 * Purpose : copy a block from user memory to kernel memory
491 * Params : to - kernel memory
492 * : from - user memory
493 @@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
494 sub r2, r2, ip
495 b .Lcfu_dest_aligned
496
497 -ENTRY(__copy_from_user)
498 +ENTRY(___copy_from_user)
499 stmfd sp!, {r0, r2, r4 - r7, lr}
500 cmp r2, #4
501 blt .Lcfu_not_enough
502 @@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
503 USER( T(ldrgtb) r3, [r1], #1) @ May fault
504 strgtb r3, [r0], #1
505 b .Lcfu_finished
506 -ENDPROC(__copy_from_user)
507 +ENDPROC(___copy_from_user)
508
509 .pushsection .fixup,"ax"
510 .align 0
511 diff -urNp linux-3.0.7/arch/arm/lib/uaccess_with_memcpy.c linux-3.0.7/arch/arm/lib/uaccess_with_memcpy.c
512 --- linux-3.0.7/arch/arm/lib/uaccess_with_memcpy.c 2011-07-21 22:17:23.000000000 -0400
513 +++ linux-3.0.7/arch/arm/lib/uaccess_with_memcpy.c 2011-08-23 21:47:55.000000000 -0400
514 @@ -103,7 +103,7 @@ out:
515 }
516
517 unsigned long
518 -__copy_to_user(void __user *to, const void *from, unsigned long n)
519 +___copy_to_user(void __user *to, const void *from, unsigned long n)
520 {
521 /*
522 * This test is stubbed out of the main function above to keep
523 diff -urNp linux-3.0.7/arch/arm/mach-ux500/mbox-db5500.c linux-3.0.7/arch/arm/mach-ux500/mbox-db5500.c
524 --- linux-3.0.7/arch/arm/mach-ux500/mbox-db5500.c 2011-07-21 22:17:23.000000000 -0400
525 +++ linux-3.0.7/arch/arm/mach-ux500/mbox-db5500.c 2011-08-23 21:48:14.000000000 -0400
526 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
527 return sprintf(buf, "0x%X\n", mbox_value);
528 }
529
530 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
531 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
532
533 static int mbox_show(struct seq_file *s, void *data)
534 {
535 diff -urNp linux-3.0.7/arch/arm/mm/fault.c linux-3.0.7/arch/arm/mm/fault.c
536 --- linux-3.0.7/arch/arm/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
537 +++ linux-3.0.7/arch/arm/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
538 @@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
539 }
540 #endif
541
542 +#ifdef CONFIG_PAX_PAGEEXEC
543 + if (fsr & FSR_LNX_PF) {
544 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
545 + do_group_exit(SIGKILL);
546 + }
547 +#endif
548 +
549 tsk->thread.address = addr;
550 tsk->thread.error_code = fsr;
551 tsk->thread.trap_no = 14;
552 @@ -379,6 +386,33 @@ do_page_fault(unsigned long addr, unsign
553 }
554 #endif /* CONFIG_MMU */
555
556 +#ifdef CONFIG_PAX_PAGEEXEC
557 +void pax_report_insns(void *pc, void *sp)
558 +{
559 + long i;
560 +
561 + printk(KERN_ERR "PAX: bytes at PC: ");
562 + for (i = 0; i < 20; i++) {
563 + unsigned char c;
564 + if (get_user(c, (__force unsigned char __user *)pc+i))
565 + printk(KERN_CONT "?? ");
566 + else
567 + printk(KERN_CONT "%02x ", c);
568 + }
569 + printk("\n");
570 +
571 + printk(KERN_ERR "PAX: bytes at SP-4: ");
572 + for (i = -1; i < 20; i++) {
573 + unsigned long c;
574 + if (get_user(c, (__force unsigned long __user *)sp+i))
575 + printk(KERN_CONT "???????? ");
576 + else
577 + printk(KERN_CONT "%08lx ", c);
578 + }
579 + printk("\n");
580 +}
581 +#endif
582 +
583 /*
584 * First Level Translation Fault Handler
585 *
586 diff -urNp linux-3.0.7/arch/arm/mm/mmap.c linux-3.0.7/arch/arm/mm/mmap.c
587 --- linux-3.0.7/arch/arm/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
588 +++ linux-3.0.7/arch/arm/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
589 @@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
590 if (len > TASK_SIZE)
591 return -ENOMEM;
592
593 +#ifdef CONFIG_PAX_RANDMMAP
594 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
595 +#endif
596 +
597 if (addr) {
598 if (do_align)
599 addr = COLOUR_ALIGN(addr, pgoff);
600 @@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
601 addr = PAGE_ALIGN(addr);
602
603 vma = find_vma(mm, addr);
604 - if (TASK_SIZE - len >= addr &&
605 - (!vma || addr + len <= vma->vm_start))
606 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
607 return addr;
608 }
609 if (len > mm->cached_hole_size) {
610 - start_addr = addr = mm->free_area_cache;
611 + start_addr = addr = mm->free_area_cache;
612 } else {
613 - start_addr = addr = TASK_UNMAPPED_BASE;
614 - mm->cached_hole_size = 0;
615 + start_addr = addr = mm->mmap_base;
616 + mm->cached_hole_size = 0;
617 }
618 /* 8 bits of randomness in 20 address space bits */
619 if ((current->flags & PF_RANDOMIZE) &&
620 @@ -100,14 +103,14 @@ full_search:
621 * Start a new search - just in case we missed
622 * some holes.
623 */
624 - if (start_addr != TASK_UNMAPPED_BASE) {
625 - start_addr = addr = TASK_UNMAPPED_BASE;
626 + if (start_addr != mm->mmap_base) {
627 + start_addr = addr = mm->mmap_base;
628 mm->cached_hole_size = 0;
629 goto full_search;
630 }
631 return -ENOMEM;
632 }
633 - if (!vma || addr + len <= vma->vm_start) {
634 + if (check_heap_stack_gap(vma, addr, len)) {
635 /*
636 * Remember the place where we stopped the search:
637 */
638 diff -urNp linux-3.0.7/arch/avr32/include/asm/elf.h linux-3.0.7/arch/avr32/include/asm/elf.h
639 --- linux-3.0.7/arch/avr32/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
640 +++ linux-3.0.7/arch/avr32/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
641 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
642 the loader. We need to make sure that it is out of the way of the program
643 that it will "exec", and that there is sufficient room for the brk. */
644
645 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
646 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
647
648 +#ifdef CONFIG_PAX_ASLR
649 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
650 +
651 +#define PAX_DELTA_MMAP_LEN 15
652 +#define PAX_DELTA_STACK_LEN 15
653 +#endif
654
655 /* This yields a mask that user programs can use to figure out what
656 instruction set this CPU supports. This could be done in user space,
657 diff -urNp linux-3.0.7/arch/avr32/include/asm/kmap_types.h linux-3.0.7/arch/avr32/include/asm/kmap_types.h
658 --- linux-3.0.7/arch/avr32/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
659 +++ linux-3.0.7/arch/avr32/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
660 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
661 D(11) KM_IRQ1,
662 D(12) KM_SOFTIRQ0,
663 D(13) KM_SOFTIRQ1,
664 -D(14) KM_TYPE_NR
665 +D(14) KM_CLEARPAGE,
666 +D(15) KM_TYPE_NR
667 };
668
669 #undef D
670 diff -urNp linux-3.0.7/arch/avr32/mm/fault.c linux-3.0.7/arch/avr32/mm/fault.c
671 --- linux-3.0.7/arch/avr32/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
672 +++ linux-3.0.7/arch/avr32/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
673 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
674
675 int exception_trace = 1;
676
677 +#ifdef CONFIG_PAX_PAGEEXEC
678 +void pax_report_insns(void *pc, void *sp)
679 +{
680 + unsigned long i;
681 +
682 + printk(KERN_ERR "PAX: bytes at PC: ");
683 + for (i = 0; i < 20; i++) {
684 + unsigned char c;
685 + if (get_user(c, (unsigned char *)pc+i))
686 + printk(KERN_CONT "???????? ");
687 + else
688 + printk(KERN_CONT "%02x ", c);
689 + }
690 + printk("\n");
691 +}
692 +#endif
693 +
694 /*
695 * This routine handles page faults. It determines the address and the
696 * problem, and then passes it off to one of the appropriate routines.
697 @@ -156,6 +173,16 @@ bad_area:
698 up_read(&mm->mmap_sem);
699
700 if (user_mode(regs)) {
701 +
702 +#ifdef CONFIG_PAX_PAGEEXEC
703 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
704 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
705 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
706 + do_group_exit(SIGKILL);
707 + }
708 + }
709 +#endif
710 +
711 if (exception_trace && printk_ratelimit())
712 printk("%s%s[%d]: segfault at %08lx pc %08lx "
713 "sp %08lx ecr %lu\n",
714 diff -urNp linux-3.0.7/arch/frv/include/asm/kmap_types.h linux-3.0.7/arch/frv/include/asm/kmap_types.h
715 --- linux-3.0.7/arch/frv/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
716 +++ linux-3.0.7/arch/frv/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
717 @@ -23,6 +23,7 @@ enum km_type {
718 KM_IRQ1,
719 KM_SOFTIRQ0,
720 KM_SOFTIRQ1,
721 + KM_CLEARPAGE,
722 KM_TYPE_NR
723 };
724
725 diff -urNp linux-3.0.7/arch/frv/mm/elf-fdpic.c linux-3.0.7/arch/frv/mm/elf-fdpic.c
726 --- linux-3.0.7/arch/frv/mm/elf-fdpic.c 2011-07-21 22:17:23.000000000 -0400
727 +++ linux-3.0.7/arch/frv/mm/elf-fdpic.c 2011-08-23 21:47:55.000000000 -0400
728 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
729 if (addr) {
730 addr = PAGE_ALIGN(addr);
731 vma = find_vma(current->mm, addr);
732 - if (TASK_SIZE - len >= addr &&
733 - (!vma || addr + len <= vma->vm_start))
734 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
735 goto success;
736 }
737
738 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
739 for (; vma; vma = vma->vm_next) {
740 if (addr > limit)
741 break;
742 - if (addr + len <= vma->vm_start)
743 + if (check_heap_stack_gap(vma, addr, len))
744 goto success;
745 addr = vma->vm_end;
746 }
747 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
748 for (; vma; vma = vma->vm_next) {
749 if (addr > limit)
750 break;
751 - if (addr + len <= vma->vm_start)
752 + if (check_heap_stack_gap(vma, addr, len))
753 goto success;
754 addr = vma->vm_end;
755 }
756 diff -urNp linux-3.0.7/arch/ia64/include/asm/elf.h linux-3.0.7/arch/ia64/include/asm/elf.h
757 --- linux-3.0.7/arch/ia64/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
758 +++ linux-3.0.7/arch/ia64/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
759 @@ -42,6 +42,13 @@
760 */
761 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
762
763 +#ifdef CONFIG_PAX_ASLR
764 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
765 +
766 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
767 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
768 +#endif
769 +
770 #define PT_IA_64_UNWIND 0x70000001
771
772 /* IA-64 relocations: */
773 diff -urNp linux-3.0.7/arch/ia64/include/asm/pgtable.h linux-3.0.7/arch/ia64/include/asm/pgtable.h
774 --- linux-3.0.7/arch/ia64/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
775 +++ linux-3.0.7/arch/ia64/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
776 @@ -12,7 +12,7 @@
777 * David Mosberger-Tang <davidm@hpl.hp.com>
778 */
779
780 -
781 +#include <linux/const.h>
782 #include <asm/mman.h>
783 #include <asm/page.h>
784 #include <asm/processor.h>
785 @@ -143,6 +143,17 @@
786 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
787 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
788 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
789 +
790 +#ifdef CONFIG_PAX_PAGEEXEC
791 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
792 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
793 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
794 +#else
795 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
796 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
797 +# define PAGE_COPY_NOEXEC PAGE_COPY
798 +#endif
799 +
800 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
801 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
802 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
803 diff -urNp linux-3.0.7/arch/ia64/include/asm/spinlock.h linux-3.0.7/arch/ia64/include/asm/spinlock.h
804 --- linux-3.0.7/arch/ia64/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
805 +++ linux-3.0.7/arch/ia64/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
806 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
807 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
808
809 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
810 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
811 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
812 }
813
814 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
815 diff -urNp linux-3.0.7/arch/ia64/include/asm/uaccess.h linux-3.0.7/arch/ia64/include/asm/uaccess.h
816 --- linux-3.0.7/arch/ia64/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
817 +++ linux-3.0.7/arch/ia64/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
818 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
819 const void *__cu_from = (from); \
820 long __cu_len = (n); \
821 \
822 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
823 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
824 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
825 __cu_len; \
826 })
827 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
828 long __cu_len = (n); \
829 \
830 __chk_user_ptr(__cu_from); \
831 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
832 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
833 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
834 __cu_len; \
835 })
836 diff -urNp linux-3.0.7/arch/ia64/kernel/module.c linux-3.0.7/arch/ia64/kernel/module.c
837 --- linux-3.0.7/arch/ia64/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
838 +++ linux-3.0.7/arch/ia64/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
839 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
840 void
841 module_free (struct module *mod, void *module_region)
842 {
843 - if (mod && mod->arch.init_unw_table &&
844 - module_region == mod->module_init) {
845 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
846 unw_remove_unwind_table(mod->arch.init_unw_table);
847 mod->arch.init_unw_table = NULL;
848 }
849 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
850 }
851
852 static inline int
853 +in_init_rx (const struct module *mod, uint64_t addr)
854 +{
855 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
856 +}
857 +
858 +static inline int
859 +in_init_rw (const struct module *mod, uint64_t addr)
860 +{
861 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
862 +}
863 +
864 +static inline int
865 in_init (const struct module *mod, uint64_t addr)
866 {
867 - return addr - (uint64_t) mod->module_init < mod->init_size;
868 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
869 +}
870 +
871 +static inline int
872 +in_core_rx (const struct module *mod, uint64_t addr)
873 +{
874 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
875 +}
876 +
877 +static inline int
878 +in_core_rw (const struct module *mod, uint64_t addr)
879 +{
880 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
881 }
882
883 static inline int
884 in_core (const struct module *mod, uint64_t addr)
885 {
886 - return addr - (uint64_t) mod->module_core < mod->core_size;
887 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
888 }
889
890 static inline int
891 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
892 break;
893
894 case RV_BDREL:
895 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
896 + if (in_init_rx(mod, val))
897 + val -= (uint64_t) mod->module_init_rx;
898 + else if (in_init_rw(mod, val))
899 + val -= (uint64_t) mod->module_init_rw;
900 + else if (in_core_rx(mod, val))
901 + val -= (uint64_t) mod->module_core_rx;
902 + else if (in_core_rw(mod, val))
903 + val -= (uint64_t) mod->module_core_rw;
904 break;
905
906 case RV_LTV:
907 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
908 * addresses have been selected...
909 */
910 uint64_t gp;
911 - if (mod->core_size > MAX_LTOFF)
912 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
913 /*
914 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
915 * at the end of the module.
916 */
917 - gp = mod->core_size - MAX_LTOFF / 2;
918 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
919 else
920 - gp = mod->core_size / 2;
921 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
922 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
923 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
924 mod->arch.gp = gp;
925 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
926 }
927 diff -urNp linux-3.0.7/arch/ia64/kernel/sys_ia64.c linux-3.0.7/arch/ia64/kernel/sys_ia64.c
928 --- linux-3.0.7/arch/ia64/kernel/sys_ia64.c 2011-07-21 22:17:23.000000000 -0400
929 +++ linux-3.0.7/arch/ia64/kernel/sys_ia64.c 2011-08-23 21:47:55.000000000 -0400
930 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
931 if (REGION_NUMBER(addr) == RGN_HPAGE)
932 addr = 0;
933 #endif
934 +
935 +#ifdef CONFIG_PAX_RANDMMAP
936 + if (mm->pax_flags & MF_PAX_RANDMMAP)
937 + addr = mm->free_area_cache;
938 + else
939 +#endif
940 +
941 if (!addr)
942 addr = mm->free_area_cache;
943
944 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
945 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
946 /* At this point: (!vma || addr < vma->vm_end). */
947 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
948 - if (start_addr != TASK_UNMAPPED_BASE) {
949 + if (start_addr != mm->mmap_base) {
950 /* Start a new search --- just in case we missed some holes. */
951 - addr = TASK_UNMAPPED_BASE;
952 + addr = mm->mmap_base;
953 goto full_search;
954 }
955 return -ENOMEM;
956 }
957 - if (!vma || addr + len <= vma->vm_start) {
958 + if (check_heap_stack_gap(vma, addr, len)) {
959 /* Remember the address where we stopped this search: */
960 mm->free_area_cache = addr + len;
961 return addr;
962 diff -urNp linux-3.0.7/arch/ia64/kernel/vmlinux.lds.S linux-3.0.7/arch/ia64/kernel/vmlinux.lds.S
963 --- linux-3.0.7/arch/ia64/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
964 +++ linux-3.0.7/arch/ia64/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
965 @@ -199,7 +199,7 @@ SECTIONS {
966 /* Per-cpu data: */
967 . = ALIGN(PERCPU_PAGE_SIZE);
968 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
969 - __phys_per_cpu_start = __per_cpu_load;
970 + __phys_per_cpu_start = per_cpu_load;
971 /*
972 * ensure percpu data fits
973 * into percpu page size
974 diff -urNp linux-3.0.7/arch/ia64/mm/fault.c linux-3.0.7/arch/ia64/mm/fault.c
975 --- linux-3.0.7/arch/ia64/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
976 +++ linux-3.0.7/arch/ia64/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
977 @@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned
978 return pte_present(pte);
979 }
980
981 +#ifdef CONFIG_PAX_PAGEEXEC
982 +void pax_report_insns(void *pc, void *sp)
983 +{
984 + unsigned long i;
985 +
986 + printk(KERN_ERR "PAX: bytes at PC: ");
987 + for (i = 0; i < 8; i++) {
988 + unsigned int c;
989 + if (get_user(c, (unsigned int *)pc+i))
990 + printk(KERN_CONT "???????? ");
991 + else
992 + printk(KERN_CONT "%08x ", c);
993 + }
994 + printk("\n");
995 +}
996 +#endif
997 +
998 void __kprobes
999 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1000 {
1001 @@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long addres
1002 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1003 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1004
1005 - if ((vma->vm_flags & mask) != mask)
1006 + if ((vma->vm_flags & mask) != mask) {
1007 +
1008 +#ifdef CONFIG_PAX_PAGEEXEC
1009 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1010 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1011 + goto bad_area;
1012 +
1013 + up_read(&mm->mmap_sem);
1014 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1015 + do_group_exit(SIGKILL);
1016 + }
1017 +#endif
1018 +
1019 goto bad_area;
1020
1021 + }
1022 +
1023 /*
1024 * If for any reason at all we couldn't handle the fault, make
1025 * sure we exit gracefully rather than endlessly redo the
1026 diff -urNp linux-3.0.7/arch/ia64/mm/hugetlbpage.c linux-3.0.7/arch/ia64/mm/hugetlbpage.c
1027 --- linux-3.0.7/arch/ia64/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
1028 +++ linux-3.0.7/arch/ia64/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
1029 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1030 /* At this point: (!vmm || addr < vmm->vm_end). */
1031 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1032 return -ENOMEM;
1033 - if (!vmm || (addr + len) <= vmm->vm_start)
1034 + if (check_heap_stack_gap(vmm, addr, len))
1035 return addr;
1036 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1037 }
1038 diff -urNp linux-3.0.7/arch/ia64/mm/init.c linux-3.0.7/arch/ia64/mm/init.c
1039 --- linux-3.0.7/arch/ia64/mm/init.c 2011-07-21 22:17:23.000000000 -0400
1040 +++ linux-3.0.7/arch/ia64/mm/init.c 2011-08-23 21:47:55.000000000 -0400
1041 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1042 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1043 vma->vm_end = vma->vm_start + PAGE_SIZE;
1044 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1045 +
1046 +#ifdef CONFIG_PAX_PAGEEXEC
1047 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1048 + vma->vm_flags &= ~VM_EXEC;
1049 +
1050 +#ifdef CONFIG_PAX_MPROTECT
1051 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1052 + vma->vm_flags &= ~VM_MAYEXEC;
1053 +#endif
1054 +
1055 + }
1056 +#endif
1057 +
1058 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1059 down_write(&current->mm->mmap_sem);
1060 if (insert_vm_struct(current->mm, vma)) {
1061 diff -urNp linux-3.0.7/arch/m32r/lib/usercopy.c linux-3.0.7/arch/m32r/lib/usercopy.c
1062 --- linux-3.0.7/arch/m32r/lib/usercopy.c 2011-07-21 22:17:23.000000000 -0400
1063 +++ linux-3.0.7/arch/m32r/lib/usercopy.c 2011-08-23 21:47:55.000000000 -0400
1064 @@ -14,6 +14,9 @@
1065 unsigned long
1066 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1067 {
1068 + if ((long)n < 0)
1069 + return n;
1070 +
1071 prefetch(from);
1072 if (access_ok(VERIFY_WRITE, to, n))
1073 __copy_user(to,from,n);
1074 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1075 unsigned long
1076 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1077 {
1078 + if ((long)n < 0)
1079 + return n;
1080 +
1081 prefetchw(to);
1082 if (access_ok(VERIFY_READ, from, n))
1083 __copy_user_zeroing(to,from,n);
1084 diff -urNp linux-3.0.7/arch/mips/include/asm/elf.h linux-3.0.7/arch/mips/include/asm/elf.h
1085 --- linux-3.0.7/arch/mips/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1086 +++ linux-3.0.7/arch/mips/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1087 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
1088 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1089 #endif
1090
1091 +#ifdef CONFIG_PAX_ASLR
1092 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1093 +
1094 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1095 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1096 +#endif
1097 +
1098 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1099 struct linux_binprm;
1100 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1101 int uses_interp);
1102
1103 -struct mm_struct;
1104 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1105 -#define arch_randomize_brk arch_randomize_brk
1106 -
1107 #endif /* _ASM_ELF_H */
1108 diff -urNp linux-3.0.7/arch/mips/include/asm/page.h linux-3.0.7/arch/mips/include/asm/page.h
1109 --- linux-3.0.7/arch/mips/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1110 +++ linux-3.0.7/arch/mips/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1111 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1112 #ifdef CONFIG_CPU_MIPS32
1113 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1114 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1115 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1116 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1117 #else
1118 typedef struct { unsigned long long pte; } pte_t;
1119 #define pte_val(x) ((x).pte)
1120 diff -urNp linux-3.0.7/arch/mips/include/asm/system.h linux-3.0.7/arch/mips/include/asm/system.h
1121 --- linux-3.0.7/arch/mips/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1122 +++ linux-3.0.7/arch/mips/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1123 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1124 */
1125 #define __ARCH_WANT_UNLOCKED_CTXSW
1126
1127 -extern unsigned long arch_align_stack(unsigned long sp);
1128 +#define arch_align_stack(x) ((x) & ~0xfUL)
1129
1130 #endif /* _ASM_SYSTEM_H */
1131 diff -urNp linux-3.0.7/arch/mips/kernel/binfmt_elfn32.c linux-3.0.7/arch/mips/kernel/binfmt_elfn32.c
1132 --- linux-3.0.7/arch/mips/kernel/binfmt_elfn32.c 2011-07-21 22:17:23.000000000 -0400
1133 +++ linux-3.0.7/arch/mips/kernel/binfmt_elfn32.c 2011-08-23 21:47:55.000000000 -0400
1134 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1135 #undef ELF_ET_DYN_BASE
1136 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1137
1138 +#ifdef CONFIG_PAX_ASLR
1139 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1140 +
1141 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1142 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1143 +#endif
1144 +
1145 #include <asm/processor.h>
1146 #include <linux/module.h>
1147 #include <linux/elfcore.h>
1148 diff -urNp linux-3.0.7/arch/mips/kernel/binfmt_elfo32.c linux-3.0.7/arch/mips/kernel/binfmt_elfo32.c
1149 --- linux-3.0.7/arch/mips/kernel/binfmt_elfo32.c 2011-07-21 22:17:23.000000000 -0400
1150 +++ linux-3.0.7/arch/mips/kernel/binfmt_elfo32.c 2011-08-23 21:47:55.000000000 -0400
1151 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1152 #undef ELF_ET_DYN_BASE
1153 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1154
1155 +#ifdef CONFIG_PAX_ASLR
1156 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1157 +
1158 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1159 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1160 +#endif
1161 +
1162 #include <asm/processor.h>
1163
1164 /*
1165 diff -urNp linux-3.0.7/arch/mips/kernel/process.c linux-3.0.7/arch/mips/kernel/process.c
1166 --- linux-3.0.7/arch/mips/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
1167 +++ linux-3.0.7/arch/mips/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
1168 @@ -473,15 +473,3 @@ unsigned long get_wchan(struct task_stru
1169 out:
1170 return pc;
1171 }
1172 -
1173 -/*
1174 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1175 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1176 - */
1177 -unsigned long arch_align_stack(unsigned long sp)
1178 -{
1179 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1180 - sp -= get_random_int() & ~PAGE_MASK;
1181 -
1182 - return sp & ALMASK;
1183 -}
1184 diff -urNp linux-3.0.7/arch/mips/mm/fault.c linux-3.0.7/arch/mips/mm/fault.c
1185 --- linux-3.0.7/arch/mips/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1186 +++ linux-3.0.7/arch/mips/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1187 @@ -28,6 +28,23 @@
1188 #include <asm/highmem.h> /* For VMALLOC_END */
1189 #include <linux/kdebug.h>
1190
1191 +#ifdef CONFIG_PAX_PAGEEXEC
1192 +void pax_report_insns(void *pc, void *sp)
1193 +{
1194 + unsigned long i;
1195 +
1196 + printk(KERN_ERR "PAX: bytes at PC: ");
1197 + for (i = 0; i < 5; i++) {
1198 + unsigned int c;
1199 + if (get_user(c, (unsigned int *)pc+i))
1200 + printk(KERN_CONT "???????? ");
1201 + else
1202 + printk(KERN_CONT "%08x ", c);
1203 + }
1204 + printk("\n");
1205 +}
1206 +#endif
1207 +
1208 /*
1209 * This routine handles page faults. It determines the address,
1210 * and the problem, and then passes it off to one of the appropriate
1211 diff -urNp linux-3.0.7/arch/mips/mm/mmap.c linux-3.0.7/arch/mips/mm/mmap.c
1212 --- linux-3.0.7/arch/mips/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
1213 +++ linux-3.0.7/arch/mips/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
1214 @@ -48,14 +48,18 @@ unsigned long arch_get_unmapped_area(str
1215 do_color_align = 0;
1216 if (filp || (flags & MAP_SHARED))
1217 do_color_align = 1;
1218 +
1219 +#ifdef CONFIG_PAX_RANDMMAP
1220 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1221 +#endif
1222 +
1223 if (addr) {
1224 if (do_color_align)
1225 addr = COLOUR_ALIGN(addr, pgoff);
1226 else
1227 addr = PAGE_ALIGN(addr);
1228 vmm = find_vma(current->mm, addr);
1229 - if (TASK_SIZE - len >= addr &&
1230 - (!vmm || addr + len <= vmm->vm_start))
1231 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1232 return addr;
1233 }
1234 addr = current->mm->mmap_base;
1235 @@ -68,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
1236 /* At this point: (!vmm || addr < vmm->vm_end). */
1237 if (TASK_SIZE - len < addr)
1238 return -ENOMEM;
1239 - if (!vmm || addr + len <= vmm->vm_start)
1240 + if (check_heap_stack_gap(vmm, addr, len))
1241 return addr;
1242 addr = vmm->vm_end;
1243 if (do_color_align)
1244 @@ -93,30 +97,3 @@ void arch_pick_mmap_layout(struct mm_str
1245 mm->get_unmapped_area = arch_get_unmapped_area;
1246 mm->unmap_area = arch_unmap_area;
1247 }
1248 -
1249 -static inline unsigned long brk_rnd(void)
1250 -{
1251 - unsigned long rnd = get_random_int();
1252 -
1253 - rnd = rnd << PAGE_SHIFT;
1254 - /* 8MB for 32bit, 256MB for 64bit */
1255 - if (TASK_IS_32BIT_ADDR)
1256 - rnd = rnd & 0x7ffffful;
1257 - else
1258 - rnd = rnd & 0xffffffful;
1259 -
1260 - return rnd;
1261 -}
1262 -
1263 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1264 -{
1265 - unsigned long base = mm->brk;
1266 - unsigned long ret;
1267 -
1268 - ret = PAGE_ALIGN(base + brk_rnd());
1269 -
1270 - if (ret < mm->brk)
1271 - return mm->brk;
1272 -
1273 - return ret;
1274 -}
1275 diff -urNp linux-3.0.7/arch/parisc/include/asm/elf.h linux-3.0.7/arch/parisc/include/asm/elf.h
1276 --- linux-3.0.7/arch/parisc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1277 +++ linux-3.0.7/arch/parisc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1278 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1279
1280 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1281
1282 +#ifdef CONFIG_PAX_ASLR
1283 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1284 +
1285 +#define PAX_DELTA_MMAP_LEN 16
1286 +#define PAX_DELTA_STACK_LEN 16
1287 +#endif
1288 +
1289 /* This yields a mask that user programs can use to figure out what
1290 instruction set this CPU supports. This could be done in user space,
1291 but it's not easy, and we've already done it here. */
1292 diff -urNp linux-3.0.7/arch/parisc/include/asm/pgtable.h linux-3.0.7/arch/parisc/include/asm/pgtable.h
1293 --- linux-3.0.7/arch/parisc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1294 +++ linux-3.0.7/arch/parisc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1295 @@ -210,6 +210,17 @@ struct vm_area_struct;
1296 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1297 #define PAGE_COPY PAGE_EXECREAD
1298 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1299 +
1300 +#ifdef CONFIG_PAX_PAGEEXEC
1301 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1302 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1303 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1304 +#else
1305 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1306 +# define PAGE_COPY_NOEXEC PAGE_COPY
1307 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1308 +#endif
1309 +
1310 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1311 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1312 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1313 diff -urNp linux-3.0.7/arch/parisc/kernel/module.c linux-3.0.7/arch/parisc/kernel/module.c
1314 --- linux-3.0.7/arch/parisc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
1315 +++ linux-3.0.7/arch/parisc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
1316 @@ -98,16 +98,38 @@
1317
1318 /* three functions to determine where in the module core
1319 * or init pieces the location is */
1320 +static inline int in_init_rx(struct module *me, void *loc)
1321 +{
1322 + return (loc >= me->module_init_rx &&
1323 + loc < (me->module_init_rx + me->init_size_rx));
1324 +}
1325 +
1326 +static inline int in_init_rw(struct module *me, void *loc)
1327 +{
1328 + return (loc >= me->module_init_rw &&
1329 + loc < (me->module_init_rw + me->init_size_rw));
1330 +}
1331 +
1332 static inline int in_init(struct module *me, void *loc)
1333 {
1334 - return (loc >= me->module_init &&
1335 - loc <= (me->module_init + me->init_size));
1336 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1337 +}
1338 +
1339 +static inline int in_core_rx(struct module *me, void *loc)
1340 +{
1341 + return (loc >= me->module_core_rx &&
1342 + loc < (me->module_core_rx + me->core_size_rx));
1343 +}
1344 +
1345 +static inline int in_core_rw(struct module *me, void *loc)
1346 +{
1347 + return (loc >= me->module_core_rw &&
1348 + loc < (me->module_core_rw + me->core_size_rw));
1349 }
1350
1351 static inline int in_core(struct module *me, void *loc)
1352 {
1353 - return (loc >= me->module_core &&
1354 - loc <= (me->module_core + me->core_size));
1355 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1356 }
1357
1358 static inline int in_local(struct module *me, void *loc)
1359 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_
1360 }
1361
1362 /* align things a bit */
1363 - me->core_size = ALIGN(me->core_size, 16);
1364 - me->arch.got_offset = me->core_size;
1365 - me->core_size += gots * sizeof(struct got_entry);
1366 -
1367 - me->core_size = ALIGN(me->core_size, 16);
1368 - me->arch.fdesc_offset = me->core_size;
1369 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1370 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1371 + me->arch.got_offset = me->core_size_rw;
1372 + me->core_size_rw += gots * sizeof(struct got_entry);
1373 +
1374 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1375 + me->arch.fdesc_offset = me->core_size_rw;
1376 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1377
1378 me->arch.got_max = gots;
1379 me->arch.fdesc_max = fdescs;
1380 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module
1381
1382 BUG_ON(value == 0);
1383
1384 - got = me->module_core + me->arch.got_offset;
1385 + got = me->module_core_rw + me->arch.got_offset;
1386 for (i = 0; got[i].addr; i++)
1387 if (got[i].addr == value)
1388 goto out;
1389 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module
1390 #ifdef CONFIG_64BIT
1391 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1392 {
1393 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1394 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1395
1396 if (!value) {
1397 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1398 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module
1399
1400 /* Create new one */
1401 fdesc->addr = value;
1402 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1403 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1404 return (Elf_Addr)fdesc;
1405 }
1406 #endif /* CONFIG_64BIT */
1407 @@ -857,7 +879,7 @@ register_unwind_table(struct module *me,
1408
1409 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1410 end = table + sechdrs[me->arch.unwind_section].sh_size;
1411 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1412 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1413
1414 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1415 me->arch.unwind_section, table, end, gp);
1416 diff -urNp linux-3.0.7/arch/parisc/kernel/sys_parisc.c linux-3.0.7/arch/parisc/kernel/sys_parisc.c
1417 --- linux-3.0.7/arch/parisc/kernel/sys_parisc.c 2011-07-21 22:17:23.000000000 -0400
1418 +++ linux-3.0.7/arch/parisc/kernel/sys_parisc.c 2011-08-23 21:47:55.000000000 -0400
1419 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1420 /* At this point: (!vma || addr < vma->vm_end). */
1421 if (TASK_SIZE - len < addr)
1422 return -ENOMEM;
1423 - if (!vma || addr + len <= vma->vm_start)
1424 + if (check_heap_stack_gap(vma, addr, len))
1425 return addr;
1426 addr = vma->vm_end;
1427 }
1428 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1429 /* At this point: (!vma || addr < vma->vm_end). */
1430 if (TASK_SIZE - len < addr)
1431 return -ENOMEM;
1432 - if (!vma || addr + len <= vma->vm_start)
1433 + if (check_heap_stack_gap(vma, addr, len))
1434 return addr;
1435 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1436 if (addr < vma->vm_end) /* handle wraparound */
1437 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1438 if (flags & MAP_FIXED)
1439 return addr;
1440 if (!addr)
1441 - addr = TASK_UNMAPPED_BASE;
1442 + addr = current->mm->mmap_base;
1443
1444 if (filp) {
1445 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1446 diff -urNp linux-3.0.7/arch/parisc/kernel/traps.c linux-3.0.7/arch/parisc/kernel/traps.c
1447 --- linux-3.0.7/arch/parisc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
1448 +++ linux-3.0.7/arch/parisc/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
1449 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1450
1451 down_read(&current->mm->mmap_sem);
1452 vma = find_vma(current->mm,regs->iaoq[0]);
1453 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1454 - && (vma->vm_flags & VM_EXEC)) {
1455 -
1456 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1457 fault_address = regs->iaoq[0];
1458 fault_space = regs->iasq[0];
1459
1460 diff -urNp linux-3.0.7/arch/parisc/mm/fault.c linux-3.0.7/arch/parisc/mm/fault.c
1461 --- linux-3.0.7/arch/parisc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1462 +++ linux-3.0.7/arch/parisc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1463 @@ -15,6 +15,7 @@
1464 #include <linux/sched.h>
1465 #include <linux/interrupt.h>
1466 #include <linux/module.h>
1467 +#include <linux/unistd.h>
1468
1469 #include <asm/uaccess.h>
1470 #include <asm/traps.h>
1471 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1472 static unsigned long
1473 parisc_acctyp(unsigned long code, unsigned int inst)
1474 {
1475 - if (code == 6 || code == 16)
1476 + if (code == 6 || code == 7 || code == 16)
1477 return VM_EXEC;
1478
1479 switch (inst & 0xf0000000) {
1480 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1481 }
1482 #endif
1483
1484 +#ifdef CONFIG_PAX_PAGEEXEC
1485 +/*
1486 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1487 + *
1488 + * returns 1 when task should be killed
1489 + * 2 when rt_sigreturn trampoline was detected
1490 + * 3 when unpatched PLT trampoline was detected
1491 + */
1492 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1493 +{
1494 +
1495 +#ifdef CONFIG_PAX_EMUPLT
1496 + int err;
1497 +
1498 + do { /* PaX: unpatched PLT emulation */
1499 + unsigned int bl, depwi;
1500 +
1501 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1502 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1503 +
1504 + if (err)
1505 + break;
1506 +
1507 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1508 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1509 +
1510 + err = get_user(ldw, (unsigned int *)addr);
1511 + err |= get_user(bv, (unsigned int *)(addr+4));
1512 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1513 +
1514 + if (err)
1515 + break;
1516 +
1517 + if (ldw == 0x0E801096U &&
1518 + bv == 0xEAC0C000U &&
1519 + ldw2 == 0x0E881095U)
1520 + {
1521 + unsigned int resolver, map;
1522 +
1523 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1524 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1525 + if (err)
1526 + break;
1527 +
1528 + regs->gr[20] = instruction_pointer(regs)+8;
1529 + regs->gr[21] = map;
1530 + regs->gr[22] = resolver;
1531 + regs->iaoq[0] = resolver | 3UL;
1532 + regs->iaoq[1] = regs->iaoq[0] + 4;
1533 + return 3;
1534 + }
1535 + }
1536 + } while (0);
1537 +#endif
1538 +
1539 +#ifdef CONFIG_PAX_EMUTRAMP
1540 +
1541 +#ifndef CONFIG_PAX_EMUSIGRT
1542 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1543 + return 1;
1544 +#endif
1545 +
1546 + do { /* PaX: rt_sigreturn emulation */
1547 + unsigned int ldi1, ldi2, bel, nop;
1548 +
1549 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1550 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1551 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1552 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1553 +
1554 + if (err)
1555 + break;
1556 +
1557 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1558 + ldi2 == 0x3414015AU &&
1559 + bel == 0xE4008200U &&
1560 + nop == 0x08000240U)
1561 + {
1562 + regs->gr[25] = (ldi1 & 2) >> 1;
1563 + regs->gr[20] = __NR_rt_sigreturn;
1564 + regs->gr[31] = regs->iaoq[1] + 16;
1565 + regs->sr[0] = regs->iasq[1];
1566 + regs->iaoq[0] = 0x100UL;
1567 + regs->iaoq[1] = regs->iaoq[0] + 4;
1568 + regs->iasq[0] = regs->sr[2];
1569 + regs->iasq[1] = regs->sr[2];
1570 + return 2;
1571 + }
1572 + } while (0);
1573 +#endif
1574 +
1575 + return 1;
1576 +}
1577 +
1578 +void pax_report_insns(void *pc, void *sp)
1579 +{
1580 + unsigned long i;
1581 +
1582 + printk(KERN_ERR "PAX: bytes at PC: ");
1583 + for (i = 0; i < 5; i++) {
1584 + unsigned int c;
1585 + if (get_user(c, (unsigned int *)pc+i))
1586 + printk(KERN_CONT "???????? ");
1587 + else
1588 + printk(KERN_CONT "%08x ", c);
1589 + }
1590 + printk("\n");
1591 +}
1592 +#endif
1593 +
1594 int fixup_exception(struct pt_regs *regs)
1595 {
1596 const struct exception_table_entry *fix;
1597 @@ -192,8 +303,33 @@ good_area:
1598
1599 acc_type = parisc_acctyp(code,regs->iir);
1600
1601 - if ((vma->vm_flags & acc_type) != acc_type)
1602 + if ((vma->vm_flags & acc_type) != acc_type) {
1603 +
1604 +#ifdef CONFIG_PAX_PAGEEXEC
1605 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1606 + (address & ~3UL) == instruction_pointer(regs))
1607 + {
1608 + up_read(&mm->mmap_sem);
1609 + switch (pax_handle_fetch_fault(regs)) {
1610 +
1611 +#ifdef CONFIG_PAX_EMUPLT
1612 + case 3:
1613 + return;
1614 +#endif
1615 +
1616 +#ifdef CONFIG_PAX_EMUTRAMP
1617 + case 2:
1618 + return;
1619 +#endif
1620 +
1621 + }
1622 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1623 + do_group_exit(SIGKILL);
1624 + }
1625 +#endif
1626 +
1627 goto bad_area;
1628 + }
1629
1630 /*
1631 * If for any reason at all we couldn't handle the fault, make
1632 diff -urNp linux-3.0.7/arch/powerpc/include/asm/elf.h linux-3.0.7/arch/powerpc/include/asm/elf.h
1633 --- linux-3.0.7/arch/powerpc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1634 +++ linux-3.0.7/arch/powerpc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1635 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1636 the loader. We need to make sure that it is out of the way of the program
1637 that it will "exec", and that there is sufficient room for the brk. */
1638
1639 -extern unsigned long randomize_et_dyn(unsigned long base);
1640 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1641 +#define ELF_ET_DYN_BASE (0x20000000)
1642 +
1643 +#ifdef CONFIG_PAX_ASLR
1644 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1645 +
1646 +#ifdef __powerpc64__
1647 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
1648 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
1649 +#else
1650 +#define PAX_DELTA_MMAP_LEN 15
1651 +#define PAX_DELTA_STACK_LEN 15
1652 +#endif
1653 +#endif
1654
1655 /*
1656 * Our registers are always unsigned longs, whether we're a 32 bit
1657 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1658 (0x7ff >> (PAGE_SHIFT - 12)) : \
1659 (0x3ffff >> (PAGE_SHIFT - 12)))
1660
1661 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1662 -#define arch_randomize_brk arch_randomize_brk
1663 -
1664 #endif /* __KERNEL__ */
1665
1666 /*
1667 diff -urNp linux-3.0.7/arch/powerpc/include/asm/kmap_types.h linux-3.0.7/arch/powerpc/include/asm/kmap_types.h
1668 --- linux-3.0.7/arch/powerpc/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
1669 +++ linux-3.0.7/arch/powerpc/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
1670 @@ -27,6 +27,7 @@ enum km_type {
1671 KM_PPC_SYNC_PAGE,
1672 KM_PPC_SYNC_ICACHE,
1673 KM_KDB,
1674 + KM_CLEARPAGE,
1675 KM_TYPE_NR
1676 };
1677
1678 diff -urNp linux-3.0.7/arch/powerpc/include/asm/mman.h linux-3.0.7/arch/powerpc/include/asm/mman.h
1679 --- linux-3.0.7/arch/powerpc/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
1680 +++ linux-3.0.7/arch/powerpc/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
1681 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm
1682 }
1683 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
1684
1685 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
1686 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
1687 {
1688 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
1689 }
1690 diff -urNp linux-3.0.7/arch/powerpc/include/asm/page_64.h linux-3.0.7/arch/powerpc/include/asm/page_64.h
1691 --- linux-3.0.7/arch/powerpc/include/asm/page_64.h 2011-07-21 22:17:23.000000000 -0400
1692 +++ linux-3.0.7/arch/powerpc/include/asm/page_64.h 2011-08-23 21:47:55.000000000 -0400
1693 @@ -155,15 +155,18 @@ do { \
1694 * stack by default, so in the absence of a PT_GNU_STACK program header
1695 * we turn execute permission off.
1696 */
1697 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1698 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1699 +#define VM_STACK_DEFAULT_FLAGS32 \
1700 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1701 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1702
1703 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1704 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1705
1706 +#ifndef CONFIG_PAX_PAGEEXEC
1707 #define VM_STACK_DEFAULT_FLAGS \
1708 (is_32bit_task() ? \
1709 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1710 +#endif
1711
1712 #include <asm-generic/getorder.h>
1713
1714 diff -urNp linux-3.0.7/arch/powerpc/include/asm/page.h linux-3.0.7/arch/powerpc/include/asm/page.h
1715 --- linux-3.0.7/arch/powerpc/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1716 +++ linux-3.0.7/arch/powerpc/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1717 @@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
1718 * and needs to be executable. This means the whole heap ends
1719 * up being executable.
1720 */
1721 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1722 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1723 +#define VM_DATA_DEFAULT_FLAGS32 \
1724 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1725 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1726
1727 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1728 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1729 @@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
1730 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1731 #endif
1732
1733 +#define ktla_ktva(addr) (addr)
1734 +#define ktva_ktla(addr) (addr)
1735 +
1736 #ifndef __ASSEMBLY__
1737
1738 #undef STRICT_MM_TYPECHECKS
1739 diff -urNp linux-3.0.7/arch/powerpc/include/asm/pgtable.h linux-3.0.7/arch/powerpc/include/asm/pgtable.h
1740 --- linux-3.0.7/arch/powerpc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1741 +++ linux-3.0.7/arch/powerpc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1742 @@ -2,6 +2,7 @@
1743 #define _ASM_POWERPC_PGTABLE_H
1744 #ifdef __KERNEL__
1745
1746 +#include <linux/const.h>
1747 #ifndef __ASSEMBLY__
1748 #include <asm/processor.h> /* For TASK_SIZE */
1749 #include <asm/mmu.h>
1750 diff -urNp linux-3.0.7/arch/powerpc/include/asm/pte-hash32.h linux-3.0.7/arch/powerpc/include/asm/pte-hash32.h
1751 --- linux-3.0.7/arch/powerpc/include/asm/pte-hash32.h 2011-07-21 22:17:23.000000000 -0400
1752 +++ linux-3.0.7/arch/powerpc/include/asm/pte-hash32.h 2011-08-23 21:47:55.000000000 -0400
1753 @@ -21,6 +21,7 @@
1754 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
1755 #define _PAGE_USER 0x004 /* usermode access allowed */
1756 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
1757 +#define _PAGE_EXEC _PAGE_GUARDED
1758 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
1759 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
1760 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
1761 diff -urNp linux-3.0.7/arch/powerpc/include/asm/reg.h linux-3.0.7/arch/powerpc/include/asm/reg.h
1762 --- linux-3.0.7/arch/powerpc/include/asm/reg.h 2011-07-21 22:17:23.000000000 -0400
1763 +++ linux-3.0.7/arch/powerpc/include/asm/reg.h 2011-08-23 21:47:55.000000000 -0400
1764 @@ -209,6 +209,7 @@
1765 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
1766 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
1767 #define DSISR_NOHPTE 0x40000000 /* no translation found */
1768 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
1769 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
1770 #define DSISR_ISSTORE 0x02000000 /* access was a store */
1771 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
1772 diff -urNp linux-3.0.7/arch/powerpc/include/asm/system.h linux-3.0.7/arch/powerpc/include/asm/system.h
1773 --- linux-3.0.7/arch/powerpc/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1774 +++ linux-3.0.7/arch/powerpc/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1775 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
1776 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1777 #endif
1778
1779 -extern unsigned long arch_align_stack(unsigned long sp);
1780 +#define arch_align_stack(x) ((x) & ~0xfUL)
1781
1782 /* Used in very early kernel initialization. */
1783 extern unsigned long reloc_offset(void);
1784 diff -urNp linux-3.0.7/arch/powerpc/include/asm/uaccess.h linux-3.0.7/arch/powerpc/include/asm/uaccess.h
1785 --- linux-3.0.7/arch/powerpc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
1786 +++ linux-3.0.7/arch/powerpc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
1787 @@ -13,6 +13,8 @@
1788 #define VERIFY_READ 0
1789 #define VERIFY_WRITE 1
1790
1791 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
1792 +
1793 /*
1794 * The fs value determines whether argument validity checking should be
1795 * performed or not. If get_fs() == USER_DS, checking is performed, with
1796 @@ -327,52 +329,6 @@ do { \
1797 extern unsigned long __copy_tofrom_user(void __user *to,
1798 const void __user *from, unsigned long size);
1799
1800 -#ifndef __powerpc64__
1801 -
1802 -static inline unsigned long copy_from_user(void *to,
1803 - const void __user *from, unsigned long n)
1804 -{
1805 - unsigned long over;
1806 -
1807 - if (access_ok(VERIFY_READ, from, n))
1808 - return __copy_tofrom_user((__force void __user *)to, from, n);
1809 - if ((unsigned long)from < TASK_SIZE) {
1810 - over = (unsigned long)from + n - TASK_SIZE;
1811 - return __copy_tofrom_user((__force void __user *)to, from,
1812 - n - over) + over;
1813 - }
1814 - return n;
1815 -}
1816 -
1817 -static inline unsigned long copy_to_user(void __user *to,
1818 - const void *from, unsigned long n)
1819 -{
1820 - unsigned long over;
1821 -
1822 - if (access_ok(VERIFY_WRITE, to, n))
1823 - return __copy_tofrom_user(to, (__force void __user *)from, n);
1824 - if ((unsigned long)to < TASK_SIZE) {
1825 - over = (unsigned long)to + n - TASK_SIZE;
1826 - return __copy_tofrom_user(to, (__force void __user *)from,
1827 - n - over) + over;
1828 - }
1829 - return n;
1830 -}
1831 -
1832 -#else /* __powerpc64__ */
1833 -
1834 -#define __copy_in_user(to, from, size) \
1835 - __copy_tofrom_user((to), (from), (size))
1836 -
1837 -extern unsigned long copy_from_user(void *to, const void __user *from,
1838 - unsigned long n);
1839 -extern unsigned long copy_to_user(void __user *to, const void *from,
1840 - unsigned long n);
1841 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
1842 - unsigned long n);
1843 -
1844 -#endif /* __powerpc64__ */
1845 -
1846 static inline unsigned long __copy_from_user_inatomic(void *to,
1847 const void __user *from, unsigned long n)
1848 {
1849 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
1850 if (ret == 0)
1851 return 0;
1852 }
1853 +
1854 + if (!__builtin_constant_p(n))
1855 + check_object_size(to, n, false);
1856 +
1857 return __copy_tofrom_user((__force void __user *)to, from, n);
1858 }
1859
1860 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
1861 if (ret == 0)
1862 return 0;
1863 }
1864 +
1865 + if (!__builtin_constant_p(n))
1866 + check_object_size(from, n, true);
1867 +
1868 return __copy_tofrom_user(to, (__force const void __user *)from, n);
1869 }
1870
1871 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
1872 return __copy_to_user_inatomic(to, from, size);
1873 }
1874
1875 +#ifndef __powerpc64__
1876 +
1877 +static inline unsigned long __must_check copy_from_user(void *to,
1878 + const void __user *from, unsigned long n)
1879 +{
1880 + unsigned long over;
1881 +
1882 + if ((long)n < 0)
1883 + return n;
1884 +
1885 + if (access_ok(VERIFY_READ, from, n)) {
1886 + if (!__builtin_constant_p(n))
1887 + check_object_size(to, n, false);
1888 + return __copy_tofrom_user((__force void __user *)to, from, n);
1889 + }
1890 + if ((unsigned long)from < TASK_SIZE) {
1891 + over = (unsigned long)from + n - TASK_SIZE;
1892 + if (!__builtin_constant_p(n - over))
1893 + check_object_size(to, n - over, false);
1894 + return __copy_tofrom_user((__force void __user *)to, from,
1895 + n - over) + over;
1896 + }
1897 + return n;
1898 +}
1899 +
1900 +static inline unsigned long __must_check copy_to_user(void __user *to,
1901 + const void *from, unsigned long n)
1902 +{
1903 + unsigned long over;
1904 +
1905 + if ((long)n < 0)
1906 + return n;
1907 +
1908 + if (access_ok(VERIFY_WRITE, to, n)) {
1909 + if (!__builtin_constant_p(n))
1910 + check_object_size(from, n, true);
1911 + return __copy_tofrom_user(to, (__force void __user *)from, n);
1912 + }
1913 + if ((unsigned long)to < TASK_SIZE) {
1914 + over = (unsigned long)to + n - TASK_SIZE;
1915 + if (!__builtin_constant_p(n))
1916 + check_object_size(from, n - over, true);
1917 + return __copy_tofrom_user(to, (__force void __user *)from,
1918 + n - over) + over;
1919 + }
1920 + return n;
1921 +}
1922 +
1923 +#else /* __powerpc64__ */
1924 +
1925 +#define __copy_in_user(to, from, size) \
1926 + __copy_tofrom_user((to), (from), (size))
1927 +
1928 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1929 +{
1930 + if ((long)n < 0 || n > INT_MAX)
1931 + return n;
1932 +
1933 + if (!__builtin_constant_p(n))
1934 + check_object_size(to, n, false);
1935 +
1936 + if (likely(access_ok(VERIFY_READ, from, n)))
1937 + n = __copy_from_user(to, from, n);
1938 + else
1939 + memset(to, 0, n);
1940 + return n;
1941 +}
1942 +
1943 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1944 +{
1945 + if ((long)n < 0 || n > INT_MAX)
1946 + return n;
1947 +
1948 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
1949 + if (!__builtin_constant_p(n))
1950 + check_object_size(from, n, true);
1951 + n = __copy_to_user(to, from, n);
1952 + }
1953 + return n;
1954 +}
1955 +
1956 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
1957 + unsigned long n);
1958 +
1959 +#endif /* __powerpc64__ */
1960 +
1961 extern unsigned long __clear_user(void __user *addr, unsigned long size);
1962
1963 static inline unsigned long clear_user(void __user *addr, unsigned long size)
1964 diff -urNp linux-3.0.7/arch/powerpc/kernel/exceptions-64e.S linux-3.0.7/arch/powerpc/kernel/exceptions-64e.S
1965 --- linux-3.0.7/arch/powerpc/kernel/exceptions-64e.S 2011-07-21 22:17:23.000000000 -0400
1966 +++ linux-3.0.7/arch/powerpc/kernel/exceptions-64e.S 2011-08-23 21:47:55.000000000 -0400
1967 @@ -567,6 +567,7 @@ storage_fault_common:
1968 std r14,_DAR(r1)
1969 std r15,_DSISR(r1)
1970 addi r3,r1,STACK_FRAME_OVERHEAD
1971 + bl .save_nvgprs
1972 mr r4,r14
1973 mr r5,r15
1974 ld r14,PACA_EXGEN+EX_R14(r13)
1975 @@ -576,8 +577,7 @@ storage_fault_common:
1976 cmpdi r3,0
1977 bne- 1f
1978 b .ret_from_except_lite
1979 -1: bl .save_nvgprs
1980 - mr r5,r3
1981 +1: mr r5,r3
1982 addi r3,r1,STACK_FRAME_OVERHEAD
1983 ld r4,_DAR(r1)
1984 bl .bad_page_fault
1985 diff -urNp linux-3.0.7/arch/powerpc/kernel/exceptions-64s.S linux-3.0.7/arch/powerpc/kernel/exceptions-64s.S
1986 --- linux-3.0.7/arch/powerpc/kernel/exceptions-64s.S 2011-07-21 22:17:23.000000000 -0400
1987 +++ linux-3.0.7/arch/powerpc/kernel/exceptions-64s.S 2011-08-23 21:47:55.000000000 -0400
1988 @@ -956,10 +956,10 @@ handle_page_fault:
1989 11: ld r4,_DAR(r1)
1990 ld r5,_DSISR(r1)
1991 addi r3,r1,STACK_FRAME_OVERHEAD
1992 + bl .save_nvgprs
1993 bl .do_page_fault
1994 cmpdi r3,0
1995 beq+ 13f
1996 - bl .save_nvgprs
1997 mr r5,r3
1998 addi r3,r1,STACK_FRAME_OVERHEAD
1999 lwz r4,_DAR(r1)
2000 diff -urNp linux-3.0.7/arch/powerpc/kernel/module_32.c linux-3.0.7/arch/powerpc/kernel/module_32.c
2001 --- linux-3.0.7/arch/powerpc/kernel/module_32.c 2011-07-21 22:17:23.000000000 -0400
2002 +++ linux-3.0.7/arch/powerpc/kernel/module_32.c 2011-08-23 21:47:55.000000000 -0400
2003 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2004 me->arch.core_plt_section = i;
2005 }
2006 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2007 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2008 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2009 return -ENOEXEC;
2010 }
2011
2012 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2013
2014 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2015 /* Init, or core PLT? */
2016 - if (location >= mod->module_core
2017 - && location < mod->module_core + mod->core_size)
2018 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2019 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2020 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2021 - else
2022 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2023 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2024 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2025 + else {
2026 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2027 + return ~0UL;
2028 + }
2029
2030 /* Find this entry, or if that fails, the next avail. entry */
2031 while (entry->jump[0]) {
2032 diff -urNp linux-3.0.7/arch/powerpc/kernel/module.c linux-3.0.7/arch/powerpc/kernel/module.c
2033 --- linux-3.0.7/arch/powerpc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2034 +++ linux-3.0.7/arch/powerpc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2035 @@ -31,11 +31,24 @@
2036
2037 LIST_HEAD(module_bug_list);
2038
2039 +#ifdef CONFIG_PAX_KERNEXEC
2040 void *module_alloc(unsigned long size)
2041 {
2042 if (size == 0)
2043 return NULL;
2044
2045 + return vmalloc(size);
2046 +}
2047 +
2048 +void *module_alloc_exec(unsigned long size)
2049 +#else
2050 +void *module_alloc(unsigned long size)
2051 +#endif
2052 +
2053 +{
2054 + if (size == 0)
2055 + return NULL;
2056 +
2057 return vmalloc_exec(size);
2058 }
2059
2060 @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2061 vfree(module_region);
2062 }
2063
2064 +#ifdef CONFIG_PAX_KERNEXEC
2065 +void module_free_exec(struct module *mod, void *module_region)
2066 +{
2067 + module_free(mod, module_region);
2068 +}
2069 +#endif
2070 +
2071 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2072 const Elf_Shdr *sechdrs,
2073 const char *name)
2074 diff -urNp linux-3.0.7/arch/powerpc/kernel/process.c linux-3.0.7/arch/powerpc/kernel/process.c
2075 --- linux-3.0.7/arch/powerpc/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2076 +++ linux-3.0.7/arch/powerpc/kernel/process.c 2011-08-23 21:48:14.000000000 -0400
2077 @@ -676,8 +676,8 @@ void show_regs(struct pt_regs * regs)
2078 * Lookup NIP late so we have the best change of getting the
2079 * above info out without failing
2080 */
2081 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2082 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2083 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2084 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2085 #endif
2086 show_stack(current, (unsigned long *) regs->gpr[1]);
2087 if (!user_mode(regs))
2088 @@ -1183,10 +1183,10 @@ void show_stack(struct task_struct *tsk,
2089 newsp = stack[0];
2090 ip = stack[STACK_FRAME_LR_SAVE];
2091 if (!firstframe || ip != lr) {
2092 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2093 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2094 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2095 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2096 - printk(" (%pS)",
2097 + printk(" (%pA)",
2098 (void *)current->ret_stack[curr_frame].ret);
2099 curr_frame--;
2100 }
2101 @@ -1206,7 +1206,7 @@ void show_stack(struct task_struct *tsk,
2102 struct pt_regs *regs = (struct pt_regs *)
2103 (sp + STACK_FRAME_OVERHEAD);
2104 lr = regs->link;
2105 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2106 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2107 regs->trap, (void *)regs->nip, (void *)lr);
2108 firstframe = 1;
2109 }
2110 @@ -1281,58 +1281,3 @@ void thread_info_cache_init(void)
2111 }
2112
2113 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2114 -
2115 -unsigned long arch_align_stack(unsigned long sp)
2116 -{
2117 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2118 - sp -= get_random_int() & ~PAGE_MASK;
2119 - return sp & ~0xf;
2120 -}
2121 -
2122 -static inline unsigned long brk_rnd(void)
2123 -{
2124 - unsigned long rnd = 0;
2125 -
2126 - /* 8MB for 32bit, 1GB for 64bit */
2127 - if (is_32bit_task())
2128 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2129 - else
2130 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2131 -
2132 - return rnd << PAGE_SHIFT;
2133 -}
2134 -
2135 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2136 -{
2137 - unsigned long base = mm->brk;
2138 - unsigned long ret;
2139 -
2140 -#ifdef CONFIG_PPC_STD_MMU_64
2141 - /*
2142 - * If we are using 1TB segments and we are allowed to randomise
2143 - * the heap, we can put it above 1TB so it is backed by a 1TB
2144 - * segment. Otherwise the heap will be in the bottom 1TB
2145 - * which always uses 256MB segments and this may result in a
2146 - * performance penalty.
2147 - */
2148 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2149 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2150 -#endif
2151 -
2152 - ret = PAGE_ALIGN(base + brk_rnd());
2153 -
2154 - if (ret < mm->brk)
2155 - return mm->brk;
2156 -
2157 - return ret;
2158 -}
2159 -
2160 -unsigned long randomize_et_dyn(unsigned long base)
2161 -{
2162 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2163 -
2164 - if (ret < base)
2165 - return base;
2166 -
2167 - return ret;
2168 -}
2169 diff -urNp linux-3.0.7/arch/powerpc/kernel/signal_32.c linux-3.0.7/arch/powerpc/kernel/signal_32.c
2170 --- linux-3.0.7/arch/powerpc/kernel/signal_32.c 2011-07-21 22:17:23.000000000 -0400
2171 +++ linux-3.0.7/arch/powerpc/kernel/signal_32.c 2011-08-23 21:47:55.000000000 -0400
2172 @@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig
2173 /* Save user registers on the stack */
2174 frame = &rt_sf->uc.uc_mcontext;
2175 addr = frame;
2176 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2177 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2178 if (save_user_regs(regs, frame, 0, 1))
2179 goto badframe;
2180 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2181 diff -urNp linux-3.0.7/arch/powerpc/kernel/signal_64.c linux-3.0.7/arch/powerpc/kernel/signal_64.c
2182 --- linux-3.0.7/arch/powerpc/kernel/signal_64.c 2011-07-21 22:17:23.000000000 -0400
2183 +++ linux-3.0.7/arch/powerpc/kernel/signal_64.c 2011-08-23 21:47:55.000000000 -0400
2184 @@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct
2185 current->thread.fpscr.val = 0;
2186
2187 /* Set up to return from userspace. */
2188 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2189 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2190 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2191 } else {
2192 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2193 diff -urNp linux-3.0.7/arch/powerpc/kernel/traps.c linux-3.0.7/arch/powerpc/kernel/traps.c
2194 --- linux-3.0.7/arch/powerpc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
2195 +++ linux-3.0.7/arch/powerpc/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
2196 @@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2197 static inline void pmac_backlight_unblank(void) { }
2198 #endif
2199
2200 +extern void gr_handle_kernel_exploit(void);
2201 +
2202 int die(const char *str, struct pt_regs *regs, long err)
2203 {
2204 static struct {
2205 @@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs
2206 if (panic_on_oops)
2207 panic("Fatal exception");
2208
2209 + gr_handle_kernel_exploit();
2210 +
2211 oops_exit();
2212 do_exit(err);
2213
2214 diff -urNp linux-3.0.7/arch/powerpc/kernel/vdso.c linux-3.0.7/arch/powerpc/kernel/vdso.c
2215 --- linux-3.0.7/arch/powerpc/kernel/vdso.c 2011-07-21 22:17:23.000000000 -0400
2216 +++ linux-3.0.7/arch/powerpc/kernel/vdso.c 2011-08-23 21:47:55.000000000 -0400
2217 @@ -36,6 +36,7 @@
2218 #include <asm/firmware.h>
2219 #include <asm/vdso.h>
2220 #include <asm/vdso_datapage.h>
2221 +#include <asm/mman.h>
2222
2223 #include "setup.h"
2224
2225 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2226 vdso_base = VDSO32_MBASE;
2227 #endif
2228
2229 - current->mm->context.vdso_base = 0;
2230 + current->mm->context.vdso_base = ~0UL;
2231
2232 /* vDSO has a problem and was disabled, just don't "enable" it for the
2233 * process
2234 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2235 vdso_base = get_unmapped_area(NULL, vdso_base,
2236 (vdso_pages << PAGE_SHIFT) +
2237 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2238 - 0, 0);
2239 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2240 if (IS_ERR_VALUE(vdso_base)) {
2241 rc = vdso_base;
2242 goto fail_mmapsem;
2243 diff -urNp linux-3.0.7/arch/powerpc/lib/usercopy_64.c linux-3.0.7/arch/powerpc/lib/usercopy_64.c
2244 --- linux-3.0.7/arch/powerpc/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
2245 +++ linux-3.0.7/arch/powerpc/lib/usercopy_64.c 2011-08-23 21:47:55.000000000 -0400
2246 @@ -9,22 +9,6 @@
2247 #include <linux/module.h>
2248 #include <asm/uaccess.h>
2249
2250 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2251 -{
2252 - if (likely(access_ok(VERIFY_READ, from, n)))
2253 - n = __copy_from_user(to, from, n);
2254 - else
2255 - memset(to, 0, n);
2256 - return n;
2257 -}
2258 -
2259 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2260 -{
2261 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2262 - n = __copy_to_user(to, from, n);
2263 - return n;
2264 -}
2265 -
2266 unsigned long copy_in_user(void __user *to, const void __user *from,
2267 unsigned long n)
2268 {
2269 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2270 return n;
2271 }
2272
2273 -EXPORT_SYMBOL(copy_from_user);
2274 -EXPORT_SYMBOL(copy_to_user);
2275 EXPORT_SYMBOL(copy_in_user);
2276
2277 diff -urNp linux-3.0.7/arch/powerpc/mm/fault.c linux-3.0.7/arch/powerpc/mm/fault.c
2278 --- linux-3.0.7/arch/powerpc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
2279 +++ linux-3.0.7/arch/powerpc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
2280 @@ -32,6 +32,10 @@
2281 #include <linux/perf_event.h>
2282 #include <linux/magic.h>
2283 #include <linux/ratelimit.h>
2284 +#include <linux/slab.h>
2285 +#include <linux/pagemap.h>
2286 +#include <linux/compiler.h>
2287 +#include <linux/unistd.h>
2288
2289 #include <asm/firmware.h>
2290 #include <asm/page.h>
2291 @@ -43,6 +47,7 @@
2292 #include <asm/tlbflush.h>
2293 #include <asm/siginfo.h>
2294 #include <mm/mmu_decl.h>
2295 +#include <asm/ptrace.h>
2296
2297 #ifdef CONFIG_KPROBES
2298 static inline int notify_page_fault(struct pt_regs *regs)
2299 @@ -66,6 +71,33 @@ static inline int notify_page_fault(stru
2300 }
2301 #endif
2302
2303 +#ifdef CONFIG_PAX_PAGEEXEC
2304 +/*
2305 + * PaX: decide what to do with offenders (regs->nip = fault address)
2306 + *
2307 + * returns 1 when task should be killed
2308 + */
2309 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2310 +{
2311 + return 1;
2312 +}
2313 +
2314 +void pax_report_insns(void *pc, void *sp)
2315 +{
2316 + unsigned long i;
2317 +
2318 + printk(KERN_ERR "PAX: bytes at PC: ");
2319 + for (i = 0; i < 5; i++) {
2320 + unsigned int c;
2321 + if (get_user(c, (unsigned int __user *)pc+i))
2322 + printk(KERN_CONT "???????? ");
2323 + else
2324 + printk(KERN_CONT "%08x ", c);
2325 + }
2326 + printk("\n");
2327 +}
2328 +#endif
2329 +
2330 /*
2331 * Check whether the instruction at regs->nip is a store using
2332 * an update addressing form which will update r1.
2333 @@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_re
2334 * indicate errors in DSISR but can validly be set in SRR1.
2335 */
2336 if (trap == 0x400)
2337 - error_code &= 0x48200000;
2338 + error_code &= 0x58200000;
2339 else
2340 is_write = error_code & DSISR_ISSTORE;
2341 #else
2342 @@ -259,7 +291,7 @@ good_area:
2343 * "undefined". Of those that can be set, this is the only
2344 * one which seems bad.
2345 */
2346 - if (error_code & 0x10000000)
2347 + if (error_code & DSISR_GUARDED)
2348 /* Guarded storage error. */
2349 goto bad_area;
2350 #endif /* CONFIG_8xx */
2351 @@ -274,7 +306,7 @@ good_area:
2352 * processors use the same I/D cache coherency mechanism
2353 * as embedded.
2354 */
2355 - if (error_code & DSISR_PROTFAULT)
2356 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2357 goto bad_area;
2358 #endif /* CONFIG_PPC_STD_MMU */
2359
2360 @@ -343,6 +375,23 @@ bad_area:
2361 bad_area_nosemaphore:
2362 /* User mode accesses cause a SIGSEGV */
2363 if (user_mode(regs)) {
2364 +
2365 +#ifdef CONFIG_PAX_PAGEEXEC
2366 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2367 +#ifdef CONFIG_PPC_STD_MMU
2368 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2369 +#else
2370 + if (is_exec && regs->nip == address) {
2371 +#endif
2372 + switch (pax_handle_fetch_fault(regs)) {
2373 + }
2374 +
2375 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2376 + do_group_exit(SIGKILL);
2377 + }
2378 + }
2379 +#endif
2380 +
2381 _exception(SIGSEGV, regs, code, address);
2382 return 0;
2383 }
2384 diff -urNp linux-3.0.7/arch/powerpc/mm/mmap_64.c linux-3.0.7/arch/powerpc/mm/mmap_64.c
2385 --- linux-3.0.7/arch/powerpc/mm/mmap_64.c 2011-07-21 22:17:23.000000000 -0400
2386 +++ linux-3.0.7/arch/powerpc/mm/mmap_64.c 2011-08-23 21:47:55.000000000 -0400
2387 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2388 */
2389 if (mmap_is_legacy()) {
2390 mm->mmap_base = TASK_UNMAPPED_BASE;
2391 +
2392 +#ifdef CONFIG_PAX_RANDMMAP
2393 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2394 + mm->mmap_base += mm->delta_mmap;
2395 +#endif
2396 +
2397 mm->get_unmapped_area = arch_get_unmapped_area;
2398 mm->unmap_area = arch_unmap_area;
2399 } else {
2400 mm->mmap_base = mmap_base();
2401 +
2402 +#ifdef CONFIG_PAX_RANDMMAP
2403 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2404 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2405 +#endif
2406 +
2407 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2408 mm->unmap_area = arch_unmap_area_topdown;
2409 }
2410 diff -urNp linux-3.0.7/arch/powerpc/mm/slice.c linux-3.0.7/arch/powerpc/mm/slice.c
2411 --- linux-3.0.7/arch/powerpc/mm/slice.c 2011-07-21 22:17:23.000000000 -0400
2412 +++ linux-3.0.7/arch/powerpc/mm/slice.c 2011-08-23 21:47:55.000000000 -0400
2413 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2414 if ((mm->task_size - len) < addr)
2415 return 0;
2416 vma = find_vma(mm, addr);
2417 - return (!vma || (addr + len) <= vma->vm_start);
2418 + return check_heap_stack_gap(vma, addr, len);
2419 }
2420
2421 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2422 @@ -256,7 +256,7 @@ full_search:
2423 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2424 continue;
2425 }
2426 - if (!vma || addr + len <= vma->vm_start) {
2427 + if (check_heap_stack_gap(vma, addr, len)) {
2428 /*
2429 * Remember the place where we stopped the search:
2430 */
2431 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2432 }
2433 }
2434
2435 - addr = mm->mmap_base;
2436 - while (addr > len) {
2437 + if (mm->mmap_base < len)
2438 + addr = -ENOMEM;
2439 + else
2440 + addr = mm->mmap_base - len;
2441 +
2442 + while (!IS_ERR_VALUE(addr)) {
2443 /* Go down by chunk size */
2444 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2445 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
2446
2447 /* Check for hit with different page size */
2448 mask = slice_range_to_mask(addr, len);
2449 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2450 * return with success:
2451 */
2452 vma = find_vma(mm, addr);
2453 - if (!vma || (addr + len) <= vma->vm_start) {
2454 + if (check_heap_stack_gap(vma, addr, len)) {
2455 /* remember the address as a hint for next time */
2456 if (use_cache)
2457 mm->free_area_cache = addr;
2458 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2459 mm->cached_hole_size = vma->vm_start - addr;
2460
2461 /* try just below the current vma->vm_start */
2462 - addr = vma->vm_start;
2463 + addr = skip_heap_stack_gap(vma, len);
2464 }
2465
2466 /*
2467 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2468 if (fixed && addr > (mm->task_size - len))
2469 return -EINVAL;
2470
2471 +#ifdef CONFIG_PAX_RANDMMAP
2472 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2473 + addr = 0;
2474 +#endif
2475 +
2476 /* If hint, make sure it matches our alignment restrictions */
2477 if (!fixed && addr) {
2478 addr = _ALIGN_UP(addr, 1ul << pshift);
2479 diff -urNp linux-3.0.7/arch/s390/include/asm/elf.h linux-3.0.7/arch/s390/include/asm/elf.h
2480 --- linux-3.0.7/arch/s390/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
2481 +++ linux-3.0.7/arch/s390/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
2482 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2483 the loader. We need to make sure that it is out of the way of the program
2484 that it will "exec", and that there is sufficient room for the brk. */
2485
2486 -extern unsigned long randomize_et_dyn(unsigned long base);
2487 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2488 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2489 +
2490 +#ifdef CONFIG_PAX_ASLR
2491 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2492 +
2493 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2494 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2495 +#endif
2496
2497 /* This yields a mask that user programs can use to figure out what
2498 instruction set this CPU supports. */
2499 @@ -210,7 +216,4 @@ struct linux_binprm;
2500 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2501 int arch_setup_additional_pages(struct linux_binprm *, int);
2502
2503 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2504 -#define arch_randomize_brk arch_randomize_brk
2505 -
2506 #endif
2507 diff -urNp linux-3.0.7/arch/s390/include/asm/system.h linux-3.0.7/arch/s390/include/asm/system.h
2508 --- linux-3.0.7/arch/s390/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2509 +++ linux-3.0.7/arch/s390/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2510 @@ -255,7 +255,7 @@ extern void (*_machine_restart)(char *co
2511 extern void (*_machine_halt)(void);
2512 extern void (*_machine_power_off)(void);
2513
2514 -extern unsigned long arch_align_stack(unsigned long sp);
2515 +#define arch_align_stack(x) ((x) & ~0xfUL)
2516
2517 static inline int tprot(unsigned long addr)
2518 {
2519 diff -urNp linux-3.0.7/arch/s390/include/asm/uaccess.h linux-3.0.7/arch/s390/include/asm/uaccess.h
2520 --- linux-3.0.7/arch/s390/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
2521 +++ linux-3.0.7/arch/s390/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
2522 @@ -235,6 +235,10 @@ static inline unsigned long __must_check
2523 copy_to_user(void __user *to, const void *from, unsigned long n)
2524 {
2525 might_fault();
2526 +
2527 + if ((long)n < 0)
2528 + return n;
2529 +
2530 if (access_ok(VERIFY_WRITE, to, n))
2531 n = __copy_to_user(to, from, n);
2532 return n;
2533 @@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void
2534 static inline unsigned long __must_check
2535 __copy_from_user(void *to, const void __user *from, unsigned long n)
2536 {
2537 + if ((long)n < 0)
2538 + return n;
2539 +
2540 if (__builtin_constant_p(n) && (n <= 256))
2541 return uaccess.copy_from_user_small(n, from, to);
2542 else
2543 @@ -294,6 +301,10 @@ copy_from_user(void *to, const void __us
2544 unsigned int sz = __compiletime_object_size(to);
2545
2546 might_fault();
2547 +
2548 + if ((long)n < 0)
2549 + return n;
2550 +
2551 if (unlikely(sz != -1 && sz < n)) {
2552 copy_from_user_overflow();
2553 return n;
2554 diff -urNp linux-3.0.7/arch/s390/kernel/module.c linux-3.0.7/arch/s390/kernel/module.c
2555 --- linux-3.0.7/arch/s390/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2556 +++ linux-3.0.7/arch/s390/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2557 @@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2558
2559 /* Increase core size by size of got & plt and set start
2560 offsets for got and plt. */
2561 - me->core_size = ALIGN(me->core_size, 4);
2562 - me->arch.got_offset = me->core_size;
2563 - me->core_size += me->arch.got_size;
2564 - me->arch.plt_offset = me->core_size;
2565 - me->core_size += me->arch.plt_size;
2566 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
2567 + me->arch.got_offset = me->core_size_rw;
2568 + me->core_size_rw += me->arch.got_size;
2569 + me->arch.plt_offset = me->core_size_rx;
2570 + me->core_size_rx += me->arch.plt_size;
2571 return 0;
2572 }
2573
2574 @@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2575 if (info->got_initialized == 0) {
2576 Elf_Addr *gotent;
2577
2578 - gotent = me->module_core + me->arch.got_offset +
2579 + gotent = me->module_core_rw + me->arch.got_offset +
2580 info->got_offset;
2581 *gotent = val;
2582 info->got_initialized = 1;
2583 @@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2584 else if (r_type == R_390_GOTENT ||
2585 r_type == R_390_GOTPLTENT)
2586 *(unsigned int *) loc =
2587 - (val + (Elf_Addr) me->module_core - loc) >> 1;
2588 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2589 else if (r_type == R_390_GOT64 ||
2590 r_type == R_390_GOTPLT64)
2591 *(unsigned long *) loc = val;
2592 @@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2593 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
2594 if (info->plt_initialized == 0) {
2595 unsigned int *ip;
2596 - ip = me->module_core + me->arch.plt_offset +
2597 + ip = me->module_core_rx + me->arch.plt_offset +
2598 info->plt_offset;
2599 #ifndef CONFIG_64BIT
2600 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2601 @@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2602 val - loc + 0xffffUL < 0x1ffffeUL) ||
2603 (r_type == R_390_PLT32DBL &&
2604 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2605 - val = (Elf_Addr) me->module_core +
2606 + val = (Elf_Addr) me->module_core_rx +
2607 me->arch.plt_offset +
2608 info->plt_offset;
2609 val += rela->r_addend - loc;
2610 @@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2611 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
2612 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
2613 val = val + rela->r_addend -
2614 - ((Elf_Addr) me->module_core + me->arch.got_offset);
2615 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2616 if (r_type == R_390_GOTOFF16)
2617 *(unsigned short *) loc = val;
2618 else if (r_type == R_390_GOTOFF32)
2619 @@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2620 break;
2621 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
2622 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
2623 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
2624 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2625 rela->r_addend - loc;
2626 if (r_type == R_390_GOTPC)
2627 *(unsigned int *) loc = val;
2628 diff -urNp linux-3.0.7/arch/s390/kernel/process.c linux-3.0.7/arch/s390/kernel/process.c
2629 --- linux-3.0.7/arch/s390/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2630 +++ linux-3.0.7/arch/s390/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2631 @@ -319,39 +319,3 @@ unsigned long get_wchan(struct task_stru
2632 }
2633 return 0;
2634 }
2635 -
2636 -unsigned long arch_align_stack(unsigned long sp)
2637 -{
2638 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2639 - sp -= get_random_int() & ~PAGE_MASK;
2640 - return sp & ~0xf;
2641 -}
2642 -
2643 -static inline unsigned long brk_rnd(void)
2644 -{
2645 - /* 8MB for 32bit, 1GB for 64bit */
2646 - if (is_32bit_task())
2647 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2648 - else
2649 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2650 -}
2651 -
2652 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2653 -{
2654 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2655 -
2656 - if (ret < mm->brk)
2657 - return mm->brk;
2658 - return ret;
2659 -}
2660 -
2661 -unsigned long randomize_et_dyn(unsigned long base)
2662 -{
2663 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2664 -
2665 - if (!(current->flags & PF_RANDOMIZE))
2666 - return base;
2667 - if (ret < base)
2668 - return base;
2669 - return ret;
2670 -}
2671 diff -urNp linux-3.0.7/arch/s390/kernel/setup.c linux-3.0.7/arch/s390/kernel/setup.c
2672 --- linux-3.0.7/arch/s390/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
2673 +++ linux-3.0.7/arch/s390/kernel/setup.c 2011-08-23 21:47:55.000000000 -0400
2674 @@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2675 }
2676 early_param("mem", early_parse_mem);
2677
2678 -unsigned int user_mode = HOME_SPACE_MODE;
2679 +unsigned int user_mode = SECONDARY_SPACE_MODE;
2680 EXPORT_SYMBOL_GPL(user_mode);
2681
2682 static int set_amode_and_uaccess(unsigned long user_amode,
2683 diff -urNp linux-3.0.7/arch/s390/mm/mmap.c linux-3.0.7/arch/s390/mm/mmap.c
2684 --- linux-3.0.7/arch/s390/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2685 +++ linux-3.0.7/arch/s390/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2686 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
2687 */
2688 if (mmap_is_legacy()) {
2689 mm->mmap_base = TASK_UNMAPPED_BASE;
2690 +
2691 +#ifdef CONFIG_PAX_RANDMMAP
2692 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2693 + mm->mmap_base += mm->delta_mmap;
2694 +#endif
2695 +
2696 mm->get_unmapped_area = arch_get_unmapped_area;
2697 mm->unmap_area = arch_unmap_area;
2698 } else {
2699 mm->mmap_base = mmap_base();
2700 +
2701 +#ifdef CONFIG_PAX_RANDMMAP
2702 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2703 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2704 +#endif
2705 +
2706 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2707 mm->unmap_area = arch_unmap_area_topdown;
2708 }
2709 @@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
2710 */
2711 if (mmap_is_legacy()) {
2712 mm->mmap_base = TASK_UNMAPPED_BASE;
2713 +
2714 +#ifdef CONFIG_PAX_RANDMMAP
2715 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2716 + mm->mmap_base += mm->delta_mmap;
2717 +#endif
2718 +
2719 mm->get_unmapped_area = s390_get_unmapped_area;
2720 mm->unmap_area = arch_unmap_area;
2721 } else {
2722 mm->mmap_base = mmap_base();
2723 +
2724 +#ifdef CONFIG_PAX_RANDMMAP
2725 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2726 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2727 +#endif
2728 +
2729 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
2730 mm->unmap_area = arch_unmap_area_topdown;
2731 }
2732 diff -urNp linux-3.0.7/arch/score/include/asm/system.h linux-3.0.7/arch/score/include/asm/system.h
2733 --- linux-3.0.7/arch/score/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2734 +++ linux-3.0.7/arch/score/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2735 @@ -17,7 +17,7 @@ do { \
2736 #define finish_arch_switch(prev) do {} while (0)
2737
2738 typedef void (*vi_handler_t)(void);
2739 -extern unsigned long arch_align_stack(unsigned long sp);
2740 +#define arch_align_stack(x) (x)
2741
2742 #define mb() barrier()
2743 #define rmb() barrier()
2744 diff -urNp linux-3.0.7/arch/score/kernel/process.c linux-3.0.7/arch/score/kernel/process.c
2745 --- linux-3.0.7/arch/score/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2746 +++ linux-3.0.7/arch/score/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2747 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
2748
2749 return task_pt_regs(task)->cp0_epc;
2750 }
2751 -
2752 -unsigned long arch_align_stack(unsigned long sp)
2753 -{
2754 - return sp;
2755 -}
2756 diff -urNp linux-3.0.7/arch/sh/mm/mmap.c linux-3.0.7/arch/sh/mm/mmap.c
2757 --- linux-3.0.7/arch/sh/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2758 +++ linux-3.0.7/arch/sh/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2759 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
2760 addr = PAGE_ALIGN(addr);
2761
2762 vma = find_vma(mm, addr);
2763 - if (TASK_SIZE - len >= addr &&
2764 - (!vma || addr + len <= vma->vm_start))
2765 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2766 return addr;
2767 }
2768
2769 @@ -106,7 +105,7 @@ full_search:
2770 }
2771 return -ENOMEM;
2772 }
2773 - if (likely(!vma || addr + len <= vma->vm_start)) {
2774 + if (likely(check_heap_stack_gap(vma, addr, len))) {
2775 /*
2776 * Remember the place where we stopped the search:
2777 */
2778 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
2779 addr = PAGE_ALIGN(addr);
2780
2781 vma = find_vma(mm, addr);
2782 - if (TASK_SIZE - len >= addr &&
2783 - (!vma || addr + len <= vma->vm_start))
2784 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2785 return addr;
2786 }
2787
2788 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
2789 /* make sure it can fit in the remaining address space */
2790 if (likely(addr > len)) {
2791 vma = find_vma(mm, addr-len);
2792 - if (!vma || addr <= vma->vm_start) {
2793 + if (check_heap_stack_gap(vma, addr - len, len)) {
2794 /* remember the address as a hint for next time */
2795 return (mm->free_area_cache = addr-len);
2796 }
2797 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
2798 if (unlikely(mm->mmap_base < len))
2799 goto bottomup;
2800
2801 - addr = mm->mmap_base-len;
2802 - if (do_colour_align)
2803 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2804 + addr = mm->mmap_base - len;
2805
2806 do {
2807 + if (do_colour_align)
2808 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2809 /*
2810 * Lookup failure means no vma is above this address,
2811 * else if new region fits below vma->vm_start,
2812 * return with success:
2813 */
2814 vma = find_vma(mm, addr);
2815 - if (likely(!vma || addr+len <= vma->vm_start)) {
2816 + if (likely(check_heap_stack_gap(vma, addr, len))) {
2817 /* remember the address as a hint for next time */
2818 return (mm->free_area_cache = addr);
2819 }
2820 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
2821 mm->cached_hole_size = vma->vm_start - addr;
2822
2823 /* try just below the current vma->vm_start */
2824 - addr = vma->vm_start-len;
2825 - if (do_colour_align)
2826 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2827 - } while (likely(len < vma->vm_start));
2828 + addr = skip_heap_stack_gap(vma, len);
2829 + } while (!IS_ERR_VALUE(addr));
2830
2831 bottomup:
2832 /*
2833 diff -urNp linux-3.0.7/arch/sparc/include/asm/atomic_64.h linux-3.0.7/arch/sparc/include/asm/atomic_64.h
2834 --- linux-3.0.7/arch/sparc/include/asm/atomic_64.h 2011-07-21 22:17:23.000000000 -0400
2835 +++ linux-3.0.7/arch/sparc/include/asm/atomic_64.h 2011-08-23 21:48:14.000000000 -0400
2836 @@ -14,18 +14,40 @@
2837 #define ATOMIC64_INIT(i) { (i) }
2838
2839 #define atomic_read(v) (*(volatile int *)&(v)->counter)
2840 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
2841 +{
2842 + return v->counter;
2843 +}
2844 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
2845 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
2846 +{
2847 + return v->counter;
2848 +}
2849
2850 #define atomic_set(v, i) (((v)->counter) = i)
2851 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
2852 +{
2853 + v->counter = i;
2854 +}
2855 #define atomic64_set(v, i) (((v)->counter) = i)
2856 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
2857 +{
2858 + v->counter = i;
2859 +}
2860
2861 extern void atomic_add(int, atomic_t *);
2862 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
2863 extern void atomic64_add(long, atomic64_t *);
2864 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
2865 extern void atomic_sub(int, atomic_t *);
2866 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
2867 extern void atomic64_sub(long, atomic64_t *);
2868 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
2869
2870 extern int atomic_add_ret(int, atomic_t *);
2871 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
2872 extern long atomic64_add_ret(long, atomic64_t *);
2873 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
2874 extern int atomic_sub_ret(int, atomic_t *);
2875 extern long atomic64_sub_ret(long, atomic64_t *);
2876
2877 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
2878 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
2879
2880 #define atomic_inc_return(v) atomic_add_ret(1, v)
2881 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
2882 +{
2883 + return atomic_add_ret_unchecked(1, v);
2884 +}
2885 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
2886 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
2887 +{
2888 + return atomic64_add_ret_unchecked(1, v);
2889 +}
2890
2891 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
2892 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
2893
2894 #define atomic_add_return(i, v) atomic_add_ret(i, v)
2895 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
2896 +{
2897 + return atomic_add_ret_unchecked(i, v);
2898 +}
2899 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
2900 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
2901 +{
2902 + return atomic64_add_ret_unchecked(i, v);
2903 +}
2904
2905 /*
2906 * atomic_inc_and_test - increment and test
2907 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
2908 * other cases.
2909 */
2910 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2911 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
2912 +{
2913 + return atomic_inc_return_unchecked(v) == 0;
2914 +}
2915 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2916
2917 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
2918 @@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi
2919 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
2920
2921 #define atomic_inc(v) atomic_add(1, v)
2922 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
2923 +{
2924 + atomic_add_unchecked(1, v);
2925 +}
2926 #define atomic64_inc(v) atomic64_add(1, v)
2927 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
2928 +{
2929 + atomic64_add_unchecked(1, v);
2930 +}
2931
2932 #define atomic_dec(v) atomic_sub(1, v)
2933 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
2934 +{
2935 + atomic_sub_unchecked(1, v);
2936 +}
2937 #define atomic64_dec(v) atomic64_sub(1, v)
2938 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
2939 +{
2940 + atomic64_sub_unchecked(1, v);
2941 +}
2942
2943 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
2944 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
2945
2946 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2947 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
2948 +{
2949 + return cmpxchg(&v->counter, old, new);
2950 +}
2951 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2952 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
2953 +{
2954 + return xchg(&v->counter, new);
2955 +}
2956
2957 static inline int atomic_add_unless(atomic_t *v, int a, int u)
2958 {
2959 - int c, old;
2960 + int c, old, new;
2961 c = atomic_read(v);
2962 for (;;) {
2963 - if (unlikely(c == (u)))
2964 + if (unlikely(c == u))
2965 break;
2966 - old = atomic_cmpxchg((v), c, c + (a));
2967 +
2968 + asm volatile("addcc %2, %0, %0\n"
2969 +
2970 +#ifdef CONFIG_PAX_REFCOUNT
2971 + "tvs %%icc, 6\n"
2972 +#endif
2973 +
2974 + : "=r" (new)
2975 + : "0" (c), "ir" (a)
2976 + : "cc");
2977 +
2978 + old = atomic_cmpxchg(v, c, new);
2979 if (likely(old == c))
2980 break;
2981 c = old;
2982 }
2983 - return c != (u);
2984 + return c != u;
2985 }
2986
2987 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
2988 @@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom
2989 #define atomic64_cmpxchg(v, o, n) \
2990 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
2991 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
2992 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
2993 +{
2994 + return xchg(&v->counter, new);
2995 +}
2996
2997 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
2998 {
2999 - long c, old;
3000 + long c, old, new;
3001 c = atomic64_read(v);
3002 for (;;) {
3003 - if (unlikely(c == (u)))
3004 + if (unlikely(c == u))
3005 break;
3006 - old = atomic64_cmpxchg((v), c, c + (a));
3007 +
3008 + asm volatile("addcc %2, %0, %0\n"
3009 +
3010 +#ifdef CONFIG_PAX_REFCOUNT
3011 + "tvs %%xcc, 6\n"
3012 +#endif
3013 +
3014 + : "=r" (new)
3015 + : "0" (c), "ir" (a)
3016 + : "cc");
3017 +
3018 + old = atomic64_cmpxchg(v, c, new);
3019 if (likely(old == c))
3020 break;
3021 c = old;
3022 }
3023 - return c != (u);
3024 + return c != u;
3025 }
3026
3027 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3028 diff -urNp linux-3.0.7/arch/sparc/include/asm/cache.h linux-3.0.7/arch/sparc/include/asm/cache.h
3029 --- linux-3.0.7/arch/sparc/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
3030 +++ linux-3.0.7/arch/sparc/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
3031 @@ -10,7 +10,7 @@
3032 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3033
3034 #define L1_CACHE_SHIFT 5
3035 -#define L1_CACHE_BYTES 32
3036 +#define L1_CACHE_BYTES 32UL
3037
3038 #ifdef CONFIG_SPARC32
3039 #define SMP_CACHE_BYTES_SHIFT 5
3040 diff -urNp linux-3.0.7/arch/sparc/include/asm/elf_32.h linux-3.0.7/arch/sparc/include/asm/elf_32.h
3041 --- linux-3.0.7/arch/sparc/include/asm/elf_32.h 2011-07-21 22:17:23.000000000 -0400
3042 +++ linux-3.0.7/arch/sparc/include/asm/elf_32.h 2011-08-23 21:47:55.000000000 -0400
3043 @@ -114,6 +114,13 @@ typedef struct {
3044
3045 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3046
3047 +#ifdef CONFIG_PAX_ASLR
3048 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3049 +
3050 +#define PAX_DELTA_MMAP_LEN 16
3051 +#define PAX_DELTA_STACK_LEN 16
3052 +#endif
3053 +
3054 /* This yields a mask that user programs can use to figure out what
3055 instruction set this cpu supports. This can NOT be done in userspace
3056 on Sparc. */
3057 diff -urNp linux-3.0.7/arch/sparc/include/asm/elf_64.h linux-3.0.7/arch/sparc/include/asm/elf_64.h
3058 --- linux-3.0.7/arch/sparc/include/asm/elf_64.h 2011-09-02 18:11:21.000000000 -0400
3059 +++ linux-3.0.7/arch/sparc/include/asm/elf_64.h 2011-08-23 21:47:55.000000000 -0400
3060 @@ -180,6 +180,13 @@ typedef struct {
3061 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3062 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3063
3064 +#ifdef CONFIG_PAX_ASLR
3065 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3066 +
3067 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3068 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3069 +#endif
3070 +
3071 extern unsigned long sparc64_elf_hwcap;
3072 #define ELF_HWCAP sparc64_elf_hwcap
3073
3074 diff -urNp linux-3.0.7/arch/sparc/include/asm/pgtable_32.h linux-3.0.7/arch/sparc/include/asm/pgtable_32.h
3075 --- linux-3.0.7/arch/sparc/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
3076 +++ linux-3.0.7/arch/sparc/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
3077 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3078 BTFIXUPDEF_INT(page_none)
3079 BTFIXUPDEF_INT(page_copy)
3080 BTFIXUPDEF_INT(page_readonly)
3081 +
3082 +#ifdef CONFIG_PAX_PAGEEXEC
3083 +BTFIXUPDEF_INT(page_shared_noexec)
3084 +BTFIXUPDEF_INT(page_copy_noexec)
3085 +BTFIXUPDEF_INT(page_readonly_noexec)
3086 +#endif
3087 +
3088 BTFIXUPDEF_INT(page_kernel)
3089
3090 #define PMD_SHIFT SUN4C_PMD_SHIFT
3091 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3092 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3093 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3094
3095 +#ifdef CONFIG_PAX_PAGEEXEC
3096 +extern pgprot_t PAGE_SHARED_NOEXEC;
3097 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3098 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3099 +#else
3100 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3101 +# define PAGE_COPY_NOEXEC PAGE_COPY
3102 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3103 +#endif
3104 +
3105 extern unsigned long page_kernel;
3106
3107 #ifdef MODULE
3108 diff -urNp linux-3.0.7/arch/sparc/include/asm/pgtsrmmu.h linux-3.0.7/arch/sparc/include/asm/pgtsrmmu.h
3109 --- linux-3.0.7/arch/sparc/include/asm/pgtsrmmu.h 2011-07-21 22:17:23.000000000 -0400
3110 +++ linux-3.0.7/arch/sparc/include/asm/pgtsrmmu.h 2011-08-23 21:47:55.000000000 -0400
3111 @@ -115,6 +115,13 @@
3112 SRMMU_EXEC | SRMMU_REF)
3113 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3114 SRMMU_EXEC | SRMMU_REF)
3115 +
3116 +#ifdef CONFIG_PAX_PAGEEXEC
3117 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3118 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3119 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3120 +#endif
3121 +
3122 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3123 SRMMU_DIRTY | SRMMU_REF)
3124
3125 diff -urNp linux-3.0.7/arch/sparc/include/asm/spinlock_64.h linux-3.0.7/arch/sparc/include/asm/spinlock_64.h
3126 --- linux-3.0.7/arch/sparc/include/asm/spinlock_64.h 2011-10-16 21:54:53.000000000 -0400
3127 +++ linux-3.0.7/arch/sparc/include/asm/spinlock_64.h 2011-10-16 21:55:27.000000000 -0400
3128 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3129
3130 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3131
3132 -static void inline arch_read_lock(arch_rwlock_t *lock)
3133 +static inline void arch_read_lock(arch_rwlock_t *lock)
3134 {
3135 unsigned long tmp1, tmp2;
3136
3137 __asm__ __volatile__ (
3138 "1: ldsw [%2], %0\n"
3139 " brlz,pn %0, 2f\n"
3140 -"4: add %0, 1, %1\n"
3141 +"4: addcc %0, 1, %1\n"
3142 +
3143 +#ifdef CONFIG_PAX_REFCOUNT
3144 +" tvs %%icc, 6\n"
3145 +#endif
3146 +
3147 " cas [%2], %0, %1\n"
3148 " cmp %0, %1\n"
3149 " bne,pn %%icc, 1b\n"
3150 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3151 " .previous"
3152 : "=&r" (tmp1), "=&r" (tmp2)
3153 : "r" (lock)
3154 - : "memory");
3155 + : "memory", "cc");
3156 }
3157
3158 -static int inline arch_read_trylock(arch_rwlock_t *lock)
3159 +static inline int arch_read_trylock(arch_rwlock_t *lock)
3160 {
3161 int tmp1, tmp2;
3162
3163 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3164 "1: ldsw [%2], %0\n"
3165 " brlz,a,pn %0, 2f\n"
3166 " mov 0, %0\n"
3167 -" add %0, 1, %1\n"
3168 +" addcc %0, 1, %1\n"
3169 +
3170 +#ifdef CONFIG_PAX_REFCOUNT
3171 +" tvs %%icc, 6\n"
3172 +#endif
3173 +
3174 " cas [%2], %0, %1\n"
3175 " cmp %0, %1\n"
3176 " bne,pn %%icc, 1b\n"
3177 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3178 return tmp1;
3179 }
3180
3181 -static void inline arch_read_unlock(arch_rwlock_t *lock)
3182 +static inline void arch_read_unlock(arch_rwlock_t *lock)
3183 {
3184 unsigned long tmp1, tmp2;
3185
3186 __asm__ __volatile__(
3187 "1: lduw [%2], %0\n"
3188 -" sub %0, 1, %1\n"
3189 +" subcc %0, 1, %1\n"
3190 +
3191 +#ifdef CONFIG_PAX_REFCOUNT
3192 +" tvs %%icc, 6\n"
3193 +#endif
3194 +
3195 " cas [%2], %0, %1\n"
3196 " cmp %0, %1\n"
3197 " bne,pn %%xcc, 1b\n"
3198 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3199 : "memory");
3200 }
3201
3202 -static void inline arch_write_lock(arch_rwlock_t *lock)
3203 +static inline void arch_write_lock(arch_rwlock_t *lock)
3204 {
3205 unsigned long mask, tmp1, tmp2;
3206
3207 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3208 : "memory");
3209 }
3210
3211 -static void inline arch_write_unlock(arch_rwlock_t *lock)
3212 +static inline void arch_write_unlock(arch_rwlock_t *lock)
3213 {
3214 __asm__ __volatile__(
3215 " stw %%g0, [%0]"
3216 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3217 : "memory");
3218 }
3219
3220 -static int inline arch_write_trylock(arch_rwlock_t *lock)
3221 +static inline int arch_write_trylock(arch_rwlock_t *lock)
3222 {
3223 unsigned long mask, tmp1, tmp2, result;
3224
3225 diff -urNp linux-3.0.7/arch/sparc/include/asm/thread_info_32.h linux-3.0.7/arch/sparc/include/asm/thread_info_32.h
3226 --- linux-3.0.7/arch/sparc/include/asm/thread_info_32.h 2011-07-21 22:17:23.000000000 -0400
3227 +++ linux-3.0.7/arch/sparc/include/asm/thread_info_32.h 2011-08-23 21:47:55.000000000 -0400
3228 @@ -50,6 +50,8 @@ struct thread_info {
3229 unsigned long w_saved;
3230
3231 struct restart_block restart_block;
3232 +
3233 + unsigned long lowest_stack;
3234 };
3235
3236 /*
3237 diff -urNp linux-3.0.7/arch/sparc/include/asm/thread_info_64.h linux-3.0.7/arch/sparc/include/asm/thread_info_64.h
3238 --- linux-3.0.7/arch/sparc/include/asm/thread_info_64.h 2011-07-21 22:17:23.000000000 -0400
3239 +++ linux-3.0.7/arch/sparc/include/asm/thread_info_64.h 2011-08-23 21:47:55.000000000 -0400
3240 @@ -63,6 +63,8 @@ struct thread_info {
3241 struct pt_regs *kern_una_regs;
3242 unsigned int kern_una_insn;
3243
3244 + unsigned long lowest_stack;
3245 +
3246 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3247 };
3248
3249 diff -urNp linux-3.0.7/arch/sparc/include/asm/uaccess_32.h linux-3.0.7/arch/sparc/include/asm/uaccess_32.h
3250 --- linux-3.0.7/arch/sparc/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
3251 +++ linux-3.0.7/arch/sparc/include/asm/uaccess_32.h 2011-08-23 21:47:55.000000000 -0400
3252 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3253
3254 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3255 {
3256 - if (n && __access_ok((unsigned long) to, n))
3257 + if ((long)n < 0)
3258 + return n;
3259 +
3260 + if (n && __access_ok((unsigned long) to, n)) {
3261 + if (!__builtin_constant_p(n))
3262 + check_object_size(from, n, true);
3263 return __copy_user(to, (__force void __user *) from, n);
3264 - else
3265 + } else
3266 return n;
3267 }
3268
3269 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3270 {
3271 + if ((long)n < 0)
3272 + return n;
3273 +
3274 + if (!__builtin_constant_p(n))
3275 + check_object_size(from, n, true);
3276 +
3277 return __copy_user(to, (__force void __user *) from, n);
3278 }
3279
3280 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3281 {
3282 - if (n && __access_ok((unsigned long) from, n))
3283 + if ((long)n < 0)
3284 + return n;
3285 +
3286 + if (n && __access_ok((unsigned long) from, n)) {
3287 + if (!__builtin_constant_p(n))
3288 + check_object_size(to, n, false);
3289 return __copy_user((__force void __user *) to, from, n);
3290 - else
3291 + } else
3292 return n;
3293 }
3294
3295 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3296 {
3297 + if ((long)n < 0)
3298 + return n;
3299 +
3300 return __copy_user((__force void __user *) to, from, n);
3301 }
3302
3303 diff -urNp linux-3.0.7/arch/sparc/include/asm/uaccess_64.h linux-3.0.7/arch/sparc/include/asm/uaccess_64.h
3304 --- linux-3.0.7/arch/sparc/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
3305 +++ linux-3.0.7/arch/sparc/include/asm/uaccess_64.h 2011-08-23 21:47:55.000000000 -0400
3306 @@ -10,6 +10,7 @@
3307 #include <linux/compiler.h>
3308 #include <linux/string.h>
3309 #include <linux/thread_info.h>
3310 +#include <linux/kernel.h>
3311 #include <asm/asi.h>
3312 #include <asm/system.h>
3313 #include <asm/spitfire.h>
3314 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3315 static inline unsigned long __must_check
3316 copy_from_user(void *to, const void __user *from, unsigned long size)
3317 {
3318 - unsigned long ret = ___copy_from_user(to, from, size);
3319 + unsigned long ret;
3320
3321 + if ((long)size < 0 || size > INT_MAX)
3322 + return size;
3323 +
3324 + if (!__builtin_constant_p(size))
3325 + check_object_size(to, size, false);
3326 +
3327 + ret = ___copy_from_user(to, from, size);
3328 if (unlikely(ret))
3329 ret = copy_from_user_fixup(to, from, size);
3330
3331 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3332 static inline unsigned long __must_check
3333 copy_to_user(void __user *to, const void *from, unsigned long size)
3334 {
3335 - unsigned long ret = ___copy_to_user(to, from, size);
3336 + unsigned long ret;
3337 +
3338 + if ((long)size < 0 || size > INT_MAX)
3339 + return size;
3340 +
3341 + if (!__builtin_constant_p(size))
3342 + check_object_size(from, size, true);
3343
3344 + ret = ___copy_to_user(to, from, size);
3345 if (unlikely(ret))
3346 ret = copy_to_user_fixup(to, from, size);
3347 return ret;
3348 diff -urNp linux-3.0.7/arch/sparc/include/asm/uaccess.h linux-3.0.7/arch/sparc/include/asm/uaccess.h
3349 --- linux-3.0.7/arch/sparc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
3350 +++ linux-3.0.7/arch/sparc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
3351 @@ -1,5 +1,13 @@
3352 #ifndef ___ASM_SPARC_UACCESS_H
3353 #define ___ASM_SPARC_UACCESS_H
3354 +
3355 +#ifdef __KERNEL__
3356 +#ifndef __ASSEMBLY__
3357 +#include <linux/types.h>
3358 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3359 +#endif
3360 +#endif
3361 +
3362 #if defined(__sparc__) && defined(__arch64__)
3363 #include <asm/uaccess_64.h>
3364 #else
3365 diff -urNp linux-3.0.7/arch/sparc/kernel/Makefile linux-3.0.7/arch/sparc/kernel/Makefile
3366 --- linux-3.0.7/arch/sparc/kernel/Makefile 2011-10-16 21:54:53.000000000 -0400
3367 +++ linux-3.0.7/arch/sparc/kernel/Makefile 2011-10-16 21:55:27.000000000 -0400
3368 @@ -3,7 +3,7 @@
3369 #
3370
3371 asflags-y := -ansi
3372 -ccflags-y := -Werror
3373 +#ccflags-y := -Werror
3374
3375 extra-y := head_$(BITS).o
3376 extra-y += init_task.o
3377 diff -urNp linux-3.0.7/arch/sparc/kernel/process_32.c linux-3.0.7/arch/sparc/kernel/process_32.c
3378 --- linux-3.0.7/arch/sparc/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
3379 +++ linux-3.0.7/arch/sparc/kernel/process_32.c 2011-08-23 21:48:14.000000000 -0400
3380 @@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3381 rw->ins[4], rw->ins[5],
3382 rw->ins[6],
3383 rw->ins[7]);
3384 - printk("%pS\n", (void *) rw->ins[7]);
3385 + printk("%pA\n", (void *) rw->ins[7]);
3386 rw = (struct reg_window32 *) rw->ins[6];
3387 }
3388 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3389 @@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3390
3391 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3392 r->psr, r->pc, r->npc, r->y, print_tainted());
3393 - printk("PC: <%pS>\n", (void *) r->pc);
3394 + printk("PC: <%pA>\n", (void *) r->pc);
3395 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3396 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3397 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3398 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3399 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3400 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3401 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3402 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3403
3404 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3405 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3406 @@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk,
3407 rw = (struct reg_window32 *) fp;
3408 pc = rw->ins[7];
3409 printk("[%08lx : ", pc);
3410 - printk("%pS ] ", (void *) pc);
3411 + printk("%pA ] ", (void *) pc);
3412 fp = rw->ins[6];
3413 } while (++count < 16);
3414 printk("\n");
3415 diff -urNp linux-3.0.7/arch/sparc/kernel/process_64.c linux-3.0.7/arch/sparc/kernel/process_64.c
3416 --- linux-3.0.7/arch/sparc/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
3417 +++ linux-3.0.7/arch/sparc/kernel/process_64.c 2011-08-23 21:48:14.000000000 -0400
3418 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3419 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3420 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3421 if (regs->tstate & TSTATE_PRIV)
3422 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3423 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3424 }
3425
3426 void show_regs(struct pt_regs *regs)
3427 {
3428 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3429 regs->tpc, regs->tnpc, regs->y, print_tainted());
3430 - printk("TPC: <%pS>\n", (void *) regs->tpc);
3431 + printk("TPC: <%pA>\n", (void *) regs->tpc);
3432 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3433 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3434 regs->u_regs[3]);
3435 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3436 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3437 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3438 regs->u_regs[15]);
3439 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3440 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3441 show_regwindow(regs);
3442 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3443 }
3444 @@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3445 ((tp && tp->task) ? tp->task->pid : -1));
3446
3447 if (gp->tstate & TSTATE_PRIV) {
3448 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3449 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3450 (void *) gp->tpc,
3451 (void *) gp->o7,
3452 (void *) gp->i7,
3453 diff -urNp linux-3.0.7/arch/sparc/kernel/sys_sparc_32.c linux-3.0.7/arch/sparc/kernel/sys_sparc_32.c
3454 --- linux-3.0.7/arch/sparc/kernel/sys_sparc_32.c 2011-07-21 22:17:23.000000000 -0400
3455 +++ linux-3.0.7/arch/sparc/kernel/sys_sparc_32.c 2011-08-23 21:47:55.000000000 -0400
3456 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3457 if (ARCH_SUN4C && len > 0x20000000)
3458 return -ENOMEM;
3459 if (!addr)
3460 - addr = TASK_UNMAPPED_BASE;
3461 + addr = current->mm->mmap_base;
3462
3463 if (flags & MAP_SHARED)
3464 addr = COLOUR_ALIGN(addr);
3465 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3466 }
3467 if (TASK_SIZE - PAGE_SIZE - len < addr)
3468 return -ENOMEM;
3469 - if (!vmm || addr + len <= vmm->vm_start)
3470 + if (check_heap_stack_gap(vmm, addr, len))
3471 return addr;
3472 addr = vmm->vm_end;
3473 if (flags & MAP_SHARED)
3474 diff -urNp linux-3.0.7/arch/sparc/kernel/sys_sparc_64.c linux-3.0.7/arch/sparc/kernel/sys_sparc_64.c
3475 --- linux-3.0.7/arch/sparc/kernel/sys_sparc_64.c 2011-07-21 22:17:23.000000000 -0400
3476 +++ linux-3.0.7/arch/sparc/kernel/sys_sparc_64.c 2011-08-23 21:47:55.000000000 -0400
3477 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3478 /* We do not accept a shared mapping if it would violate
3479 * cache aliasing constraints.
3480 */
3481 - if ((flags & MAP_SHARED) &&
3482 + if ((filp || (flags & MAP_SHARED)) &&
3483 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3484 return -EINVAL;
3485 return addr;
3486 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3487 if (filp || (flags & MAP_SHARED))
3488 do_color_align = 1;
3489
3490 +#ifdef CONFIG_PAX_RANDMMAP
3491 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3492 +#endif
3493 +
3494 if (addr) {
3495 if (do_color_align)
3496 addr = COLOUR_ALIGN(addr, pgoff);
3497 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3498 addr = PAGE_ALIGN(addr);
3499
3500 vma = find_vma(mm, addr);
3501 - if (task_size - len >= addr &&
3502 - (!vma || addr + len <= vma->vm_start))
3503 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3504 return addr;
3505 }
3506
3507 if (len > mm->cached_hole_size) {
3508 - start_addr = addr = mm->free_area_cache;
3509 + start_addr = addr = mm->free_area_cache;
3510 } else {
3511 - start_addr = addr = TASK_UNMAPPED_BASE;
3512 + start_addr = addr = mm->mmap_base;
3513 mm->cached_hole_size = 0;
3514 }
3515
3516 @@ -174,14 +177,14 @@ full_search:
3517 vma = find_vma(mm, VA_EXCLUDE_END);
3518 }
3519 if (unlikely(task_size < addr)) {
3520 - if (start_addr != TASK_UNMAPPED_BASE) {
3521 - start_addr = addr = TASK_UNMAPPED_BASE;
3522 + if (start_addr != mm->mmap_base) {
3523 + start_addr = addr = mm->mmap_base;
3524 mm->cached_hole_size = 0;
3525 goto full_search;
3526 }
3527 return -ENOMEM;
3528 }
3529 - if (likely(!vma || addr + len <= vma->vm_start)) {
3530 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3531 /*
3532 * Remember the place where we stopped the search:
3533 */
3534 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3535 /* We do not accept a shared mapping if it would violate
3536 * cache aliasing constraints.
3537 */
3538 - if ((flags & MAP_SHARED) &&
3539 + if ((filp || (flags & MAP_SHARED)) &&
3540 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3541 return -EINVAL;
3542 return addr;
3543 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3544 addr = PAGE_ALIGN(addr);
3545
3546 vma = find_vma(mm, addr);
3547 - if (task_size - len >= addr &&
3548 - (!vma || addr + len <= vma->vm_start))
3549 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3550 return addr;
3551 }
3552
3553 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3554 /* make sure it can fit in the remaining address space */
3555 if (likely(addr > len)) {
3556 vma = find_vma(mm, addr-len);
3557 - if (!vma || addr <= vma->vm_start) {
3558 + if (check_heap_stack_gap(vma, addr - len, len)) {
3559 /* remember the address as a hint for next time */
3560 return (mm->free_area_cache = addr-len);
3561 }
3562 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3563 if (unlikely(mm->mmap_base < len))
3564 goto bottomup;
3565
3566 - addr = mm->mmap_base-len;
3567 - if (do_color_align)
3568 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3569 + addr = mm->mmap_base - len;
3570
3571 do {
3572 + if (do_color_align)
3573 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3574 /*
3575 * Lookup failure means no vma is above this address,
3576 * else if new region fits below vma->vm_start,
3577 * return with success:
3578 */
3579 vma = find_vma(mm, addr);
3580 - if (likely(!vma || addr+len <= vma->vm_start)) {
3581 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3582 /* remember the address as a hint for next time */
3583 return (mm->free_area_cache = addr);
3584 }
3585 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3586 mm->cached_hole_size = vma->vm_start - addr;
3587
3588 /* try just below the current vma->vm_start */
3589 - addr = vma->vm_start-len;
3590 - if (do_color_align)
3591 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3592 - } while (likely(len < vma->vm_start));
3593 + addr = skip_heap_stack_gap(vma, len);
3594 + } while (!IS_ERR_VALUE(addr));
3595
3596 bottomup:
3597 /*
3598 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3599 gap == RLIM_INFINITY ||
3600 sysctl_legacy_va_layout) {
3601 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3602 +
3603 +#ifdef CONFIG_PAX_RANDMMAP
3604 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3605 + mm->mmap_base += mm->delta_mmap;
3606 +#endif
3607 +
3608 mm->get_unmapped_area = arch_get_unmapped_area;
3609 mm->unmap_area = arch_unmap_area;
3610 } else {
3611 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3612 gap = (task_size / 6 * 5);
3613
3614 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3615 +
3616 +#ifdef CONFIG_PAX_RANDMMAP
3617 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3618 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3619 +#endif
3620 +
3621 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3622 mm->unmap_area = arch_unmap_area_topdown;
3623 }
3624 diff -urNp linux-3.0.7/arch/sparc/kernel/traps_32.c linux-3.0.7/arch/sparc/kernel/traps_32.c
3625 --- linux-3.0.7/arch/sparc/kernel/traps_32.c 2011-07-21 22:17:23.000000000 -0400
3626 +++ linux-3.0.7/arch/sparc/kernel/traps_32.c 2011-08-23 21:48:14.000000000 -0400
3627 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3628 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3629 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3630
3631 +extern void gr_handle_kernel_exploit(void);
3632 +
3633 void die_if_kernel(char *str, struct pt_regs *regs)
3634 {
3635 static int die_counter;
3636 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3637 count++ < 30 &&
3638 (((unsigned long) rw) >= PAGE_OFFSET) &&
3639 !(((unsigned long) rw) & 0x7)) {
3640 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
3641 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
3642 (void *) rw->ins[7]);
3643 rw = (struct reg_window32 *)rw->ins[6];
3644 }
3645 }
3646 printk("Instruction DUMP:");
3647 instruction_dump ((unsigned long *) regs->pc);
3648 - if(regs->psr & PSR_PS)
3649 + if(regs->psr & PSR_PS) {
3650 + gr_handle_kernel_exploit();
3651 do_exit(SIGKILL);
3652 + }
3653 do_exit(SIGSEGV);
3654 }
3655
3656 diff -urNp linux-3.0.7/arch/sparc/kernel/traps_64.c linux-3.0.7/arch/sparc/kernel/traps_64.c
3657 --- linux-3.0.7/arch/sparc/kernel/traps_64.c 2011-07-21 22:17:23.000000000 -0400
3658 +++ linux-3.0.7/arch/sparc/kernel/traps_64.c 2011-08-23 21:48:14.000000000 -0400
3659 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3660 i + 1,
3661 p->trapstack[i].tstate, p->trapstack[i].tpc,
3662 p->trapstack[i].tnpc, p->trapstack[i].tt);
3663 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3664 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3665 }
3666 }
3667
3668 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
3669
3670 lvl -= 0x100;
3671 if (regs->tstate & TSTATE_PRIV) {
3672 +
3673 +#ifdef CONFIG_PAX_REFCOUNT
3674 + if (lvl == 6)
3675 + pax_report_refcount_overflow(regs);
3676 +#endif
3677 +
3678 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
3679 die_if_kernel(buffer, regs);
3680 }
3681 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
3682 void bad_trap_tl1(struct pt_regs *regs, long lvl)
3683 {
3684 char buffer[32];
3685 -
3686 +
3687 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
3688 0, lvl, SIGTRAP) == NOTIFY_STOP)
3689 return;
3690
3691 +#ifdef CONFIG_PAX_REFCOUNT
3692 + if (lvl == 6)
3693 + pax_report_refcount_overflow(regs);
3694 +#endif
3695 +
3696 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
3697
3698 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
3699 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
3700 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
3701 printk("%s" "ERROR(%d): ",
3702 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
3703 - printk("TPC<%pS>\n", (void *) regs->tpc);
3704 + printk("TPC<%pA>\n", (void *) regs->tpc);
3705 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
3706 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
3707 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
3708 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
3709 smp_processor_id(),
3710 (type & 0x1) ? 'I' : 'D',
3711 regs->tpc);
3712 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
3713 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
3714 panic("Irrecoverable Cheetah+ parity error.");
3715 }
3716
3717 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
3718 smp_processor_id(),
3719 (type & 0x1) ? 'I' : 'D',
3720 regs->tpc);
3721 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
3722 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
3723 }
3724
3725 struct sun4v_error_entry {
3726 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
3727
3728 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
3729 regs->tpc, tl);
3730 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
3731 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
3732 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3733 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
3734 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
3735 (void *) regs->u_regs[UREG_I7]);
3736 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
3737 "pte[%lx] error[%lx]\n",
3738 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
3739
3740 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
3741 regs->tpc, tl);
3742 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
3743 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
3744 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3745 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
3746 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
3747 (void *) regs->u_regs[UREG_I7]);
3748 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
3749 "pte[%lx] error[%lx]\n",
3750 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
3751 fp = (unsigned long)sf->fp + STACK_BIAS;
3752 }
3753
3754 - printk(" [%016lx] %pS\n", pc, (void *) pc);
3755 + printk(" [%016lx] %pA\n", pc, (void *) pc);
3756 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3757 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
3758 int index = tsk->curr_ret_stack;
3759 if (tsk->ret_stack && index >= graph) {
3760 pc = tsk->ret_stack[index - graph].ret;
3761 - printk(" [%016lx] %pS\n", pc, (void *) pc);
3762 + printk(" [%016lx] %pA\n", pc, (void *) pc);
3763 graph++;
3764 }
3765 }
3766 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
3767 return (struct reg_window *) (fp + STACK_BIAS);
3768 }
3769
3770 +extern void gr_handle_kernel_exploit(void);
3771 +
3772 void die_if_kernel(char *str, struct pt_regs *regs)
3773 {
3774 static int die_counter;
3775 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
3776 while (rw &&
3777 count++ < 30 &&
3778 kstack_valid(tp, (unsigned long) rw)) {
3779 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
3780 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
3781 (void *) rw->ins[7]);
3782
3783 rw = kernel_stack_up(rw);
3784 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
3785 }
3786 user_instruction_dump ((unsigned int __user *) regs->tpc);
3787 }
3788 - if (regs->tstate & TSTATE_PRIV)
3789 + if (regs->tstate & TSTATE_PRIV) {
3790 + gr_handle_kernel_exploit();
3791 do_exit(SIGKILL);
3792 + }
3793 do_exit(SIGSEGV);
3794 }
3795 EXPORT_SYMBOL(die_if_kernel);
3796 diff -urNp linux-3.0.7/arch/sparc/kernel/unaligned_64.c linux-3.0.7/arch/sparc/kernel/unaligned_64.c
3797 --- linux-3.0.7/arch/sparc/kernel/unaligned_64.c 2011-09-02 18:11:21.000000000 -0400
3798 +++ linux-3.0.7/arch/sparc/kernel/unaligned_64.c 2011-08-23 21:48:14.000000000 -0400
3799 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs
3800 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
3801
3802 if (__ratelimit(&ratelimit)) {
3803 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
3804 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
3805 regs->tpc, (void *) regs->tpc);
3806 }
3807 }
3808 diff -urNp linux-3.0.7/arch/sparc/lib/atomic_64.S linux-3.0.7/arch/sparc/lib/atomic_64.S
3809 --- linux-3.0.7/arch/sparc/lib/atomic_64.S 2011-07-21 22:17:23.000000000 -0400
3810 +++ linux-3.0.7/arch/sparc/lib/atomic_64.S 2011-08-23 21:47:55.000000000 -0400
3811 @@ -18,7 +18,12 @@
3812 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
3813 BACKOFF_SETUP(%o2)
3814 1: lduw [%o1], %g1
3815 - add %g1, %o0, %g7
3816 + addcc %g1, %o0, %g7
3817 +
3818 +#ifdef CONFIG_PAX_REFCOUNT
3819 + tvs %icc, 6
3820 +#endif
3821 +
3822 cas [%o1], %g1, %g7
3823 cmp %g1, %g7
3824 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3825 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
3826 2: BACKOFF_SPIN(%o2, %o3, 1b)
3827 .size atomic_add, .-atomic_add
3828
3829 + .globl atomic_add_unchecked
3830 + .type atomic_add_unchecked,#function
3831 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3832 + BACKOFF_SETUP(%o2)
3833 +1: lduw [%o1], %g1
3834 + add %g1, %o0, %g7
3835 + cas [%o1], %g1, %g7
3836 + cmp %g1, %g7
3837 + bne,pn %icc, 2f
3838 + nop
3839 + retl
3840 + nop
3841 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3842 + .size atomic_add_unchecked, .-atomic_add_unchecked
3843 +
3844 .globl atomic_sub
3845 .type atomic_sub,#function
3846 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3847 BACKOFF_SETUP(%o2)
3848 1: lduw [%o1], %g1
3849 - sub %g1, %o0, %g7
3850 + subcc %g1, %o0, %g7
3851 +
3852 +#ifdef CONFIG_PAX_REFCOUNT
3853 + tvs %icc, 6
3854 +#endif
3855 +
3856 cas [%o1], %g1, %g7
3857 cmp %g1, %g7
3858 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3859 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
3860 2: BACKOFF_SPIN(%o2, %o3, 1b)
3861 .size atomic_sub, .-atomic_sub
3862
3863 + .globl atomic_sub_unchecked
3864 + .type atomic_sub_unchecked,#function
3865 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3866 + BACKOFF_SETUP(%o2)
3867 +1: lduw [%o1], %g1
3868 + sub %g1, %o0, %g7
3869 + cas [%o1], %g1, %g7
3870 + cmp %g1, %g7
3871 + bne,pn %icc, 2f
3872 + nop
3873 + retl
3874 + nop
3875 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3876 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
3877 +
3878 .globl atomic_add_ret
3879 .type atomic_add_ret,#function
3880 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3881 BACKOFF_SETUP(%o2)
3882 1: lduw [%o1], %g1
3883 - add %g1, %o0, %g7
3884 + addcc %g1, %o0, %g7
3885 +
3886 +#ifdef CONFIG_PAX_REFCOUNT
3887 + tvs %icc, 6
3888 +#endif
3889 +
3890 cas [%o1], %g1, %g7
3891 cmp %g1, %g7
3892 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3893 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
3894 2: BACKOFF_SPIN(%o2, %o3, 1b)
3895 .size atomic_add_ret, .-atomic_add_ret
3896
3897 + .globl atomic_add_ret_unchecked
3898 + .type atomic_add_ret_unchecked,#function
3899 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3900 + BACKOFF_SETUP(%o2)
3901 +1: lduw [%o1], %g1
3902 + addcc %g1, %o0, %g7
3903 + cas [%o1], %g1, %g7
3904 + cmp %g1, %g7
3905 + bne,pn %icc, 2f
3906 + add %g7, %o0, %g7
3907 + sra %g7, 0, %o0
3908 + retl
3909 + nop
3910 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3911 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
3912 +
3913 .globl atomic_sub_ret
3914 .type atomic_sub_ret,#function
3915 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
3916 BACKOFF_SETUP(%o2)
3917 1: lduw [%o1], %g1
3918 - sub %g1, %o0, %g7
3919 + subcc %g1, %o0, %g7
3920 +
3921 +#ifdef CONFIG_PAX_REFCOUNT
3922 + tvs %icc, 6
3923 +#endif
3924 +
3925 cas [%o1], %g1, %g7
3926 cmp %g1, %g7
3927 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3928 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
3929 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
3930 BACKOFF_SETUP(%o2)
3931 1: ldx [%o1], %g1
3932 - add %g1, %o0, %g7
3933 + addcc %g1, %o0, %g7
3934 +
3935 +#ifdef CONFIG_PAX_REFCOUNT
3936 + tvs %xcc, 6
3937 +#endif
3938 +
3939 casx [%o1], %g1, %g7
3940 cmp %g1, %g7
3941 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3942 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
3943 2: BACKOFF_SPIN(%o2, %o3, 1b)
3944 .size atomic64_add, .-atomic64_add
3945
3946 + .globl atomic64_add_unchecked
3947 + .type atomic64_add_unchecked,#function
3948 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3949 + BACKOFF_SETUP(%o2)
3950 +1: ldx [%o1], %g1
3951 + addcc %g1, %o0, %g7
3952 + casx [%o1], %g1, %g7
3953 + cmp %g1, %g7
3954 + bne,pn %xcc, 2f
3955 + nop
3956 + retl
3957 + nop
3958 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3959 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
3960 +
3961 .globl atomic64_sub
3962 .type atomic64_sub,#function
3963 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3964 BACKOFF_SETUP(%o2)
3965 1: ldx [%o1], %g1
3966 - sub %g1, %o0, %g7
3967 + subcc %g1, %o0, %g7
3968 +
3969 +#ifdef CONFIG_PAX_REFCOUNT
3970 + tvs %xcc, 6
3971 +#endif
3972 +
3973 casx [%o1], %g1, %g7
3974 cmp %g1, %g7
3975 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3976 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
3977 2: BACKOFF_SPIN(%o2, %o3, 1b)
3978 .size atomic64_sub, .-atomic64_sub
3979
3980 + .globl atomic64_sub_unchecked
3981 + .type atomic64_sub_unchecked,#function
3982 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3983 + BACKOFF_SETUP(%o2)
3984 +1: ldx [%o1], %g1
3985 + subcc %g1, %o0, %g7
3986 + casx [%o1], %g1, %g7
3987 + cmp %g1, %g7
3988 + bne,pn %xcc, 2f
3989 + nop
3990 + retl
3991 + nop
3992 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3993 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
3994 +
3995 .globl atomic64_add_ret
3996 .type atomic64_add_ret,#function
3997 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3998 BACKOFF_SETUP(%o2)
3999 1: ldx [%o1], %g1
4000 - add %g1, %o0, %g7
4001 + addcc %g1, %o0, %g7
4002 +
4003 +#ifdef CONFIG_PAX_REFCOUNT
4004 + tvs %xcc, 6
4005 +#endif
4006 +
4007 casx [%o1], %g1, %g7
4008 cmp %g1, %g7
4009 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4010 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4011 2: BACKOFF_SPIN(%o2, %o3, 1b)
4012 .size atomic64_add_ret, .-atomic64_add_ret
4013
4014 + .globl atomic64_add_ret_unchecked
4015 + .type atomic64_add_ret_unchecked,#function
4016 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4017 + BACKOFF_SETUP(%o2)
4018 +1: ldx [%o1], %g1
4019 + addcc %g1, %o0, %g7
4020 + casx [%o1], %g1, %g7
4021 + cmp %g1, %g7
4022 + bne,pn %xcc, 2f
4023 + add %g7, %o0, %g7
4024 + mov %g7, %o0
4025 + retl
4026 + nop
4027 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4028 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4029 +
4030 .globl atomic64_sub_ret
4031 .type atomic64_sub_ret,#function
4032 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4033 BACKOFF_SETUP(%o2)
4034 1: ldx [%o1], %g1
4035 - sub %g1, %o0, %g7
4036 + subcc %g1, %o0, %g7
4037 +
4038 +#ifdef CONFIG_PAX_REFCOUNT
4039 + tvs %xcc, 6
4040 +#endif
4041 +
4042 casx [%o1], %g1, %g7
4043 cmp %g1, %g7
4044 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4045 diff -urNp linux-3.0.7/arch/sparc/lib/ksyms.c linux-3.0.7/arch/sparc/lib/ksyms.c
4046 --- linux-3.0.7/arch/sparc/lib/ksyms.c 2011-07-21 22:17:23.000000000 -0400
4047 +++ linux-3.0.7/arch/sparc/lib/ksyms.c 2011-08-23 21:48:14.000000000 -0400
4048 @@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4049
4050 /* Atomic counter implementation. */
4051 EXPORT_SYMBOL(atomic_add);
4052 +EXPORT_SYMBOL(atomic_add_unchecked);
4053 EXPORT_SYMBOL(atomic_add_ret);
4054 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
4055 EXPORT_SYMBOL(atomic_sub);
4056 +EXPORT_SYMBOL(atomic_sub_unchecked);
4057 EXPORT_SYMBOL(atomic_sub_ret);
4058 EXPORT_SYMBOL(atomic64_add);
4059 +EXPORT_SYMBOL(atomic64_add_unchecked);
4060 EXPORT_SYMBOL(atomic64_add_ret);
4061 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4062 EXPORT_SYMBOL(atomic64_sub);
4063 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4064 EXPORT_SYMBOL(atomic64_sub_ret);
4065
4066 /* Atomic bit operations. */
4067 diff -urNp linux-3.0.7/arch/sparc/lib/Makefile linux-3.0.7/arch/sparc/lib/Makefile
4068 --- linux-3.0.7/arch/sparc/lib/Makefile 2011-09-02 18:11:21.000000000 -0400
4069 +++ linux-3.0.7/arch/sparc/lib/Makefile 2011-08-23 21:47:55.000000000 -0400
4070 @@ -2,7 +2,7 @@
4071 #
4072
4073 asflags-y := -ansi -DST_DIV0=0x02
4074 -ccflags-y := -Werror
4075 +#ccflags-y := -Werror
4076
4077 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4078 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4079 diff -urNp linux-3.0.7/arch/sparc/Makefile linux-3.0.7/arch/sparc/Makefile
4080 --- linux-3.0.7/arch/sparc/Makefile 2011-07-21 22:17:23.000000000 -0400
4081 +++ linux-3.0.7/arch/sparc/Makefile 2011-08-23 21:48:14.000000000 -0400
4082 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4083 # Export what is needed by arch/sparc/boot/Makefile
4084 export VMLINUX_INIT VMLINUX_MAIN
4085 VMLINUX_INIT := $(head-y) $(init-y)
4086 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4087 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4088 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4089 VMLINUX_MAIN += $(drivers-y) $(net-y)
4090
4091 diff -urNp linux-3.0.7/arch/sparc/mm/fault_32.c linux-3.0.7/arch/sparc/mm/fault_32.c
4092 --- linux-3.0.7/arch/sparc/mm/fault_32.c 2011-07-21 22:17:23.000000000 -0400
4093 +++ linux-3.0.7/arch/sparc/mm/fault_32.c 2011-08-23 21:47:55.000000000 -0400
4094 @@ -22,6 +22,9 @@
4095 #include <linux/interrupt.h>
4096 #include <linux/module.h>
4097 #include <linux/kdebug.h>
4098 +#include <linux/slab.h>
4099 +#include <linux/pagemap.h>
4100 +#include <linux/compiler.h>
4101
4102 #include <asm/system.h>
4103 #include <asm/page.h>
4104 @@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4105 return safe_compute_effective_address(regs, insn);
4106 }
4107
4108 +#ifdef CONFIG_PAX_PAGEEXEC
4109 +#ifdef CONFIG_PAX_DLRESOLVE
4110 +static void pax_emuplt_close(struct vm_area_struct *vma)
4111 +{
4112 + vma->vm_mm->call_dl_resolve = 0UL;
4113 +}
4114 +
4115 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4116 +{
4117 + unsigned int *kaddr;
4118 +
4119 + vmf->page = alloc_page(GFP_HIGHUSER);
4120 + if (!vmf->page)
4121 + return VM_FAULT_OOM;
4122 +
4123 + kaddr = kmap(vmf->page);
4124 + memset(kaddr, 0, PAGE_SIZE);
4125 + kaddr[0] = 0x9DE3BFA8U; /* save */
4126 + flush_dcache_page(vmf->page);
4127 + kunmap(vmf->page);
4128 + return VM_FAULT_MAJOR;
4129 +}
4130 +
4131 +static const struct vm_operations_struct pax_vm_ops = {
4132 + .close = pax_emuplt_close,
4133 + .fault = pax_emuplt_fault
4134 +};
4135 +
4136 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4137 +{
4138 + int ret;
4139 +
4140 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4141 + vma->vm_mm = current->mm;
4142 + vma->vm_start = addr;
4143 + vma->vm_end = addr + PAGE_SIZE;
4144 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4145 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4146 + vma->vm_ops = &pax_vm_ops;
4147 +
4148 + ret = insert_vm_struct(current->mm, vma);
4149 + if (ret)
4150 + return ret;
4151 +
4152 + ++current->mm->total_vm;
4153 + return 0;
4154 +}
4155 +#endif
4156 +
4157 +/*
4158 + * PaX: decide what to do with offenders (regs->pc = fault address)
4159 + *
4160 + * returns 1 when task should be killed
4161 + * 2 when patched PLT trampoline was detected
4162 + * 3 when unpatched PLT trampoline was detected
4163 + */
4164 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4165 +{
4166 +
4167 +#ifdef CONFIG_PAX_EMUPLT
4168 + int err;
4169 +
4170 + do { /* PaX: patched PLT emulation #1 */
4171 + unsigned int sethi1, sethi2, jmpl;
4172 +
4173 + err = get_user(sethi1, (unsigned int *)regs->pc);
4174 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4175 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4176 +
4177 + if (err)
4178 + break;
4179 +
4180 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4181 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4182 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4183 + {
4184 + unsigned int addr;
4185 +
4186 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4187 + addr = regs->u_regs[UREG_G1];
4188 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4189 + regs->pc = addr;
4190 + regs->npc = addr+4;
4191 + return 2;
4192 + }
4193 + } while (0);
4194 +
4195 + { /* PaX: patched PLT emulation #2 */
4196 + unsigned int ba;
4197 +
4198 + err = get_user(ba, (unsigned int *)regs->pc);
4199 +
4200 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4201 + unsigned int addr;
4202 +
4203 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4204 + regs->pc = addr;
4205 + regs->npc = addr+4;
4206 + return 2;
4207 + }
4208 + }
4209 +
4210 + do { /* PaX: patched PLT emulation #3 */
4211 + unsigned int sethi, jmpl, nop;
4212 +
4213 + err = get_user(sethi, (unsigned int *)regs->pc);
4214 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4215 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4216 +
4217 + if (err)
4218 + break;
4219 +
4220 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4221 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4222 + nop == 0x01000000U)
4223 + {
4224 + unsigned int addr;
4225 +
4226 + addr = (sethi & 0x003FFFFFU) << 10;
4227 + regs->u_regs[UREG_G1] = addr;
4228 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4229 + regs->pc = addr;
4230 + regs->npc = addr+4;
4231 + return 2;
4232 + }
4233 + } while (0);
4234 +
4235 + do { /* PaX: unpatched PLT emulation step 1 */
4236 + unsigned int sethi, ba, nop;
4237 +
4238 + err = get_user(sethi, (unsigned int *)regs->pc);
4239 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
4240 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4241 +
4242 + if (err)
4243 + break;
4244 +
4245 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4246 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4247 + nop == 0x01000000U)
4248 + {
4249 + unsigned int addr, save, call;
4250 +
4251 + if ((ba & 0xFFC00000U) == 0x30800000U)
4252 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4253 + else
4254 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4255 +
4256 + err = get_user(save, (unsigned int *)addr);
4257 + err |= get_user(call, (unsigned int *)(addr+4));
4258 + err |= get_user(nop, (unsigned int *)(addr+8));
4259 + if (err)
4260 + break;
4261 +
4262 +#ifdef CONFIG_PAX_DLRESOLVE
4263 + if (save == 0x9DE3BFA8U &&
4264 + (call & 0xC0000000U) == 0x40000000U &&
4265 + nop == 0x01000000U)
4266 + {
4267 + struct vm_area_struct *vma;
4268 + unsigned long call_dl_resolve;
4269 +
4270 + down_read(&current->mm->mmap_sem);
4271 + call_dl_resolve = current->mm->call_dl_resolve;
4272 + up_read(&current->mm->mmap_sem);
4273 + if (likely(call_dl_resolve))
4274 + goto emulate;
4275 +
4276 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4277 +
4278 + down_write(&current->mm->mmap_sem);
4279 + if (current->mm->call_dl_resolve) {
4280 + call_dl_resolve = current->mm->call_dl_resolve;
4281 + up_write(&current->mm->mmap_sem);
4282 + if (vma)
4283 + kmem_cache_free(vm_area_cachep, vma);
4284 + goto emulate;
4285 + }
4286 +
4287 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4288 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4289 + up_write(&current->mm->mmap_sem);
4290 + if (vma)
4291 + kmem_cache_free(vm_area_cachep, vma);
4292 + return 1;
4293 + }
4294 +
4295 + if (pax_insert_vma(vma, call_dl_resolve)) {
4296 + up_write(&current->mm->mmap_sem);
4297 + kmem_cache_free(vm_area_cachep, vma);
4298 + return 1;
4299 + }
4300 +
4301 + current->mm->call_dl_resolve = call_dl_resolve;
4302 + up_write(&current->mm->mmap_sem);
4303 +
4304 +emulate:
4305 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4306 + regs->pc = call_dl_resolve;
4307 + regs->npc = addr+4;
4308 + return 3;
4309 + }
4310 +#endif
4311 +
4312 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4313 + if ((save & 0xFFC00000U) == 0x05000000U &&
4314 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4315 + nop == 0x01000000U)
4316 + {
4317 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4318 + regs->u_regs[UREG_G2] = addr + 4;
4319 + addr = (save & 0x003FFFFFU) << 10;
4320 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4321 + regs->pc = addr;
4322 + regs->npc = addr+4;
4323 + return 3;
4324 + }
4325 + }
4326 + } while (0);
4327 +
4328 + do { /* PaX: unpatched PLT emulation step 2 */
4329 + unsigned int save, call, nop;
4330 +
4331 + err = get_user(save, (unsigned int *)(regs->pc-4));
4332 + err |= get_user(call, (unsigned int *)regs->pc);
4333 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
4334 + if (err)
4335 + break;
4336 +
4337 + if (save == 0x9DE3BFA8U &&
4338 + (call & 0xC0000000U) == 0x40000000U &&
4339 + nop == 0x01000000U)
4340 + {
4341 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4342 +
4343 + regs->u_regs[UREG_RETPC] = regs->pc;
4344 + regs->pc = dl_resolve;
4345 + regs->npc = dl_resolve+4;
4346 + return 3;
4347 + }
4348 + } while (0);
4349 +#endif
4350 +
4351 + return 1;
4352 +}
4353 +
4354 +void pax_report_insns(void *pc, void *sp)
4355 +{
4356 + unsigned long i;
4357 +
4358 + printk(KERN_ERR "PAX: bytes at PC: ");
4359 + for (i = 0; i < 8; i++) {
4360 + unsigned int c;
4361 + if (get_user(c, (unsigned int *)pc+i))
4362 + printk(KERN_CONT "???????? ");
4363 + else
4364 + printk(KERN_CONT "%08x ", c);
4365 + }
4366 + printk("\n");
4367 +}
4368 +#endif
4369 +
4370 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4371 int text_fault)
4372 {
4373 @@ -281,6 +546,24 @@ good_area:
4374 if(!(vma->vm_flags & VM_WRITE))
4375 goto bad_area;
4376 } else {
4377 +
4378 +#ifdef CONFIG_PAX_PAGEEXEC
4379 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4380 + up_read(&mm->mmap_sem);
4381 + switch (pax_handle_fetch_fault(regs)) {
4382 +
4383 +#ifdef CONFIG_PAX_EMUPLT
4384 + case 2:
4385 + case 3:
4386 + return;
4387 +#endif
4388 +
4389 + }
4390 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4391 + do_group_exit(SIGKILL);
4392 + }
4393 +#endif
4394 +
4395 /* Allow reads even for write-only mappings */
4396 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4397 goto bad_area;
4398 diff -urNp linux-3.0.7/arch/sparc/mm/fault_64.c linux-3.0.7/arch/sparc/mm/fault_64.c
4399 --- linux-3.0.7/arch/sparc/mm/fault_64.c 2011-07-21 22:17:23.000000000 -0400
4400 +++ linux-3.0.7/arch/sparc/mm/fault_64.c 2011-08-23 21:48:14.000000000 -0400
4401 @@ -21,6 +21,9 @@
4402 #include <linux/kprobes.h>
4403 #include <linux/kdebug.h>
4404 #include <linux/percpu.h>
4405 +#include <linux/slab.h>
4406 +#include <linux/pagemap.h>
4407 +#include <linux/compiler.h>
4408
4409 #include <asm/page.h>
4410 #include <asm/pgtable.h>
4411 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4412 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4413 regs->tpc);
4414 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4415 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4416 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4417 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4418 dump_stack();
4419 unhandled_fault(regs->tpc, current, regs);
4420 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4421 show_regs(regs);
4422 }
4423
4424 +#ifdef CONFIG_PAX_PAGEEXEC
4425 +#ifdef CONFIG_PAX_DLRESOLVE
4426 +static void pax_emuplt_close(struct vm_area_struct *vma)
4427 +{
4428 + vma->vm_mm->call_dl_resolve = 0UL;
4429 +}
4430 +
4431 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4432 +{
4433 + unsigned int *kaddr;
4434 +
4435 + vmf->page = alloc_page(GFP_HIGHUSER);
4436 + if (!vmf->page)
4437 + return VM_FAULT_OOM;
4438 +
4439 + kaddr = kmap(vmf->page);
4440 + memset(kaddr, 0, PAGE_SIZE);
4441 + kaddr[0] = 0x9DE3BFA8U; /* save */
4442 + flush_dcache_page(vmf->page);
4443 + kunmap(vmf->page);
4444 + return VM_FAULT_MAJOR;
4445 +}
4446 +
4447 +static const struct vm_operations_struct pax_vm_ops = {
4448 + .close = pax_emuplt_close,
4449 + .fault = pax_emuplt_fault
4450 +};
4451 +
4452 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4453 +{
4454 + int ret;
4455 +
4456 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4457 + vma->vm_mm = current->mm;
4458 + vma->vm_start = addr;
4459 + vma->vm_end = addr + PAGE_SIZE;
4460 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4461 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4462 + vma->vm_ops = &pax_vm_ops;
4463 +
4464 + ret = insert_vm_struct(current->mm, vma);
4465 + if (ret)
4466 + return ret;
4467 +
4468 + ++current->mm->total_vm;
4469 + return 0;
4470 +}
4471 +#endif
4472 +
4473 +/*
4474 + * PaX: decide what to do with offenders (regs->tpc = fault address)
4475 + *
4476 + * returns 1 when task should be killed
4477 + * 2 when patched PLT trampoline was detected
4478 + * 3 when unpatched PLT trampoline was detected
4479 + */
4480 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4481 +{
4482 +
4483 +#ifdef CONFIG_PAX_EMUPLT
4484 + int err;
4485 +
4486 + do { /* PaX: patched PLT emulation #1 */
4487 + unsigned int sethi1, sethi2, jmpl;
4488 +
4489 + err = get_user(sethi1, (unsigned int *)regs->tpc);
4490 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4491 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4492 +
4493 + if (err)
4494 + break;
4495 +
4496 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4497 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4498 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4499 + {
4500 + unsigned long addr;
4501 +
4502 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4503 + addr = regs->u_regs[UREG_G1];
4504 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4505 +
4506 + if (test_thread_flag(TIF_32BIT))
4507 + addr &= 0xFFFFFFFFUL;
4508 +
4509 + regs->tpc = addr;
4510 + regs->tnpc = addr+4;
4511 + return 2;
4512 + }
4513 + } while (0);
4514 +
4515 + { /* PaX: patched PLT emulation #2 */
4516 + unsigned int ba;
4517 +
4518 + err = get_user(ba, (unsigned int *)regs->tpc);
4519 +
4520 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4521 + unsigned long addr;
4522 +
4523 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4524 +
4525 + if (test_thread_flag(TIF_32BIT))
4526 + addr &= 0xFFFFFFFFUL;
4527 +
4528 + regs->tpc = addr;
4529 + regs->tnpc = addr+4;
4530 + return 2;
4531 + }
4532 + }
4533 +
4534 + do { /* PaX: patched PLT emulation #3 */
4535 + unsigned int sethi, jmpl, nop;
4536 +
4537 + err = get_user(sethi, (unsigned int *)regs->tpc);
4538 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4539 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4540 +
4541 + if (err)
4542 + break;
4543 +
4544 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4545 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4546 + nop == 0x01000000U)
4547 + {
4548 + unsigned long addr;
4549 +
4550 + addr = (sethi & 0x003FFFFFU) << 10;
4551 + regs->u_regs[UREG_G1] = addr;
4552 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4553 +
4554 + if (test_thread_flag(TIF_32BIT))
4555 + addr &= 0xFFFFFFFFUL;
4556 +
4557 + regs->tpc = addr;
4558 + regs->tnpc = addr+4;
4559 + return 2;
4560 + }
4561 + } while (0);
4562 +
4563 + do { /* PaX: patched PLT emulation #4 */
4564 + unsigned int sethi, mov1, call, mov2;
4565 +
4566 + err = get_user(sethi, (unsigned int *)regs->tpc);
4567 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4568 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
4569 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4570 +
4571 + if (err)
4572 + break;
4573 +
4574 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4575 + mov1 == 0x8210000FU &&
4576 + (call & 0xC0000000U) == 0x40000000U &&
4577 + mov2 == 0x9E100001U)
4578 + {
4579 + unsigned long addr;
4580 +
4581 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4582 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4583 +
4584 + if (test_thread_flag(TIF_32BIT))
4585 + addr &= 0xFFFFFFFFUL;
4586 +
4587 + regs->tpc = addr;
4588 + regs->tnpc = addr+4;
4589 + return 2;
4590 + }
4591 + } while (0);
4592 +
4593 + do { /* PaX: patched PLT emulation #5 */
4594 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4595 +
4596 + err = get_user(sethi, (unsigned int *)regs->tpc);
4597 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4598 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4599 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4600 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4601 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4602 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4603 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4604 +
4605 + if (err)
4606 + break;
4607 +
4608 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4609 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
4610 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4611 + (or1 & 0xFFFFE000U) == 0x82106000U &&
4612 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
4613 + sllx == 0x83287020U &&
4614 + jmpl == 0x81C04005U &&
4615 + nop == 0x01000000U)
4616 + {
4617 + unsigned long addr;
4618 +
4619 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4620 + regs->u_regs[UREG_G1] <<= 32;
4621 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4622 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4623 + regs->tpc = addr;
4624 + regs->tnpc = addr+4;
4625 + return 2;
4626 + }
4627 + } while (0);
4628 +
4629 + do { /* PaX: patched PLT emulation #6 */
4630 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4631 +
4632 + err = get_user(sethi, (unsigned int *)regs->tpc);
4633 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4634 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4635 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4636 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
4637 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4638 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4639 +
4640 + if (err)
4641 + break;
4642 +
4643 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4644 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
4645 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4646 + sllx == 0x83287020U &&
4647 + (or & 0xFFFFE000U) == 0x8A116000U &&
4648 + jmpl == 0x81C04005U &&
4649 + nop == 0x01000000U)
4650 + {
4651 + unsigned long addr;
4652 +
4653 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4654 + regs->u_regs[UREG_G1] <<= 32;
4655 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4656 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4657 + regs->tpc = addr;
4658 + regs->tnpc = addr+4;
4659 + return 2;
4660 + }
4661 + } while (0);
4662 +
4663 + do { /* PaX: unpatched PLT emulation step 1 */
4664 + unsigned int sethi, ba, nop;
4665 +
4666 + err = get_user(sethi, (unsigned int *)regs->tpc);
4667 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4668 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4669 +
4670 + if (err)
4671 + break;
4672 +
4673 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4674 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4675 + nop == 0x01000000U)
4676 + {
4677 + unsigned long addr;
4678 + unsigned int save, call;
4679 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4680 +
4681 + if ((ba & 0xFFC00000U) == 0x30800000U)
4682 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4683 + else
4684 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4685 +
4686 + if (test_thread_flag(TIF_32BIT))
4687 + addr &= 0xFFFFFFFFUL;
4688 +
4689 + err = get_user(save, (unsigned int *)addr);
4690 + err |= get_user(call, (unsigned int *)(addr+4));
4691 + err |= get_user(nop, (unsigned int *)(addr+8));
4692 + if (err)
4693 + break;
4694 +
4695 +#ifdef CONFIG_PAX_DLRESOLVE
4696 + if (save == 0x9DE3BFA8U &&
4697 + (call & 0xC0000000U) == 0x40000000U &&
4698 + nop == 0x01000000U)
4699 + {
4700 + struct vm_area_struct *vma;
4701 + unsigned long call_dl_resolve;
4702 +
4703 + down_read(&current->mm->mmap_sem);
4704 + call_dl_resolve = current->mm->call_dl_resolve;
4705 + up_read(&current->mm->mmap_sem);
4706 + if (likely(call_dl_resolve))
4707 + goto emulate;
4708 +
4709 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4710 +
4711 + down_write(&current->mm->mmap_sem);
4712 + if (current->mm->call_dl_resolve) {
4713 + call_dl_resolve = current->mm->call_dl_resolve;
4714 + up_write(&current->mm->mmap_sem);
4715 + if (vma)
4716 + kmem_cache_free(vm_area_cachep, vma);
4717 + goto emulate;
4718 + }
4719 +
4720 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4721 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4722 + up_write(&current->mm->mmap_sem);
4723 + if (vma)
4724 + kmem_cache_free(vm_area_cachep, vma);
4725 + return 1;
4726 + }
4727 +
4728 + if (pax_insert_vma(vma, call_dl_resolve)) {
4729 + up_write(&current->mm->mmap_sem);
4730 + kmem_cache_free(vm_area_cachep, vma);
4731 + return 1;
4732 + }
4733 +
4734 + current->mm->call_dl_resolve = call_dl_resolve;
4735 + up_write(&current->mm->mmap_sem);
4736 +
4737 +emulate:
4738 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4739 + regs->tpc = call_dl_resolve;
4740 + regs->tnpc = addr+4;
4741 + return 3;
4742 + }
4743 +#endif
4744 +
4745 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4746 + if ((save & 0xFFC00000U) == 0x05000000U &&
4747 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4748 + nop == 0x01000000U)
4749 + {
4750 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4751 + regs->u_regs[UREG_G2] = addr + 4;
4752 + addr = (save & 0x003FFFFFU) << 10;
4753 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4754 +
4755 + if (test_thread_flag(TIF_32BIT))
4756 + addr &= 0xFFFFFFFFUL;
4757 +
4758 + regs->tpc = addr;
4759 + regs->tnpc = addr+4;
4760 + return 3;
4761 + }
4762 +
4763 + /* PaX: 64-bit PLT stub */
4764 + err = get_user(sethi1, (unsigned int *)addr);
4765 + err |= get_user(sethi2, (unsigned int *)(addr+4));
4766 + err |= get_user(or1, (unsigned int *)(addr+8));
4767 + err |= get_user(or2, (unsigned int *)(addr+12));
4768 + err |= get_user(sllx, (unsigned int *)(addr+16));
4769 + err |= get_user(add, (unsigned int *)(addr+20));
4770 + err |= get_user(jmpl, (unsigned int *)(addr+24));
4771 + err |= get_user(nop, (unsigned int *)(addr+28));
4772 + if (err)
4773 + break;
4774 +
4775 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
4776 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4777 + (or1 & 0xFFFFE000U) == 0x88112000U &&
4778 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
4779 + sllx == 0x89293020U &&
4780 + add == 0x8A010005U &&
4781 + jmpl == 0x89C14000U &&
4782 + nop == 0x01000000U)
4783 + {
4784 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4785 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4786 + regs->u_regs[UREG_G4] <<= 32;
4787 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4788 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
4789 + regs->u_regs[UREG_G4] = addr + 24;
4790 + addr = regs->u_regs[UREG_G5];
4791 + regs->tpc = addr;
4792 + regs->tnpc = addr+4;
4793 + return 3;
4794 + }
4795 + }
4796 + } while (0);
4797 +
4798 +#ifdef CONFIG_PAX_DLRESOLVE
4799 + do { /* PaX: unpatched PLT emulation step 2 */
4800 + unsigned int save, call, nop;
4801 +
4802 + err = get_user(save, (unsigned int *)(regs->tpc-4));
4803 + err |= get_user(call, (unsigned int *)regs->tpc);
4804 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
4805 + if (err)
4806 + break;
4807 +
4808 + if (save == 0x9DE3BFA8U &&
4809 + (call & 0xC0000000U) == 0x40000000U &&
4810 + nop == 0x01000000U)
4811 + {
4812 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4813 +
4814 + if (test_thread_flag(TIF_32BIT))
4815 + dl_resolve &= 0xFFFFFFFFUL;
4816 +
4817 + regs->u_regs[UREG_RETPC] = regs->tpc;
4818 + regs->tpc = dl_resolve;
4819 + regs->tnpc = dl_resolve+4;
4820 + return 3;
4821 + }
4822 + } while (0);
4823 +#endif
4824 +
4825 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
4826 + unsigned int sethi, ba, nop;
4827 +
4828 + err = get_user(sethi, (unsigned int *)regs->tpc);
4829 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4830 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4831 +
4832 + if (err)
4833 + break;
4834 +
4835 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4836 + (ba & 0xFFF00000U) == 0x30600000U &&
4837 + nop == 0x01000000U)
4838 + {
4839 + unsigned long addr;
4840 +
4841 + addr = (sethi & 0x003FFFFFU) << 10;
4842 + regs->u_regs[UREG_G1] = addr;
4843 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4844 +
4845 + if (test_thread_flag(TIF_32BIT))
4846 + addr &= 0xFFFFFFFFUL;
4847 +
4848 + regs->tpc = addr;
4849 + regs->tnpc = addr+4;
4850 + return 2;
4851 + }
4852 + } while (0);
4853 +
4854 +#endif
4855 +
4856 + return 1;
4857 +}
4858 +
4859 +void pax_report_insns(void *pc, void *sp)
4860 +{
4861 + unsigned long i;
4862 +
4863 + printk(KERN_ERR "PAX: bytes at PC: ");
4864 + for (i = 0; i < 8; i++) {
4865 + unsigned int c;
4866 + if (get_user(c, (unsigned int *)pc+i))
4867 + printk(KERN_CONT "???????? ");
4868 + else
4869 + printk(KERN_CONT "%08x ", c);
4870 + }
4871 + printk("\n");
4872 +}
4873 +#endif
4874 +
4875 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
4876 {
4877 struct mm_struct *mm = current->mm;
4878 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
4879 if (!vma)
4880 goto bad_area;
4881
4882 +#ifdef CONFIG_PAX_PAGEEXEC
4883 + /* PaX: detect ITLB misses on non-exec pages */
4884 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
4885 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
4886 + {
4887 + if (address != regs->tpc)
4888 + goto good_area;
4889 +
4890 + up_read(&mm->mmap_sem);
4891 + switch (pax_handle_fetch_fault(regs)) {
4892 +
4893 +#ifdef CONFIG_PAX_EMUPLT
4894 + case 2:
4895 + case 3:
4896 + return;
4897 +#endif
4898 +
4899 + }
4900 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
4901 + do_group_exit(SIGKILL);
4902 + }
4903 +#endif
4904 +
4905 /* Pure DTLB misses do not tell us whether the fault causing
4906 * load/store/atomic was a write or not, it only says that there
4907 * was no match. So in such a case we (carefully) read the
4908 diff -urNp linux-3.0.7/arch/sparc/mm/hugetlbpage.c linux-3.0.7/arch/sparc/mm/hugetlbpage.c
4909 --- linux-3.0.7/arch/sparc/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
4910 +++ linux-3.0.7/arch/sparc/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
4911 @@ -68,7 +68,7 @@ full_search:
4912 }
4913 return -ENOMEM;
4914 }
4915 - if (likely(!vma || addr + len <= vma->vm_start)) {
4916 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4917 /*
4918 * Remember the place where we stopped the search:
4919 */
4920 @@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
4921 /* make sure it can fit in the remaining address space */
4922 if (likely(addr > len)) {
4923 vma = find_vma(mm, addr-len);
4924 - if (!vma || addr <= vma->vm_start) {
4925 + if (check_heap_stack_gap(vma, addr - len, len)) {
4926 /* remember the address as a hint for next time */
4927 return (mm->free_area_cache = addr-len);
4928 }
4929 @@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
4930 if (unlikely(mm->mmap_base < len))
4931 goto bottomup;
4932
4933 - addr = (mm->mmap_base-len) & HPAGE_MASK;
4934 + addr = mm->mmap_base - len;
4935
4936 do {
4937 + addr &= HPAGE_MASK;
4938 /*
4939 * Lookup failure means no vma is above this address,
4940 * else if new region fits below vma->vm_start,
4941 * return with success:
4942 */
4943 vma = find_vma(mm, addr);
4944 - if (likely(!vma || addr+len <= vma->vm_start)) {
4945 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4946 /* remember the address as a hint for next time */
4947 return (mm->free_area_cache = addr);
4948 }
4949 @@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
4950 mm->cached_hole_size = vma->vm_start - addr;
4951
4952 /* try just below the current vma->vm_start */
4953 - addr = (vma->vm_start-len) & HPAGE_MASK;
4954 - } while (likely(len < vma->vm_start));
4955 + addr = skip_heap_stack_gap(vma, len);
4956 + } while (!IS_ERR_VALUE(addr));
4957
4958 bottomup:
4959 /*
4960 @@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
4961 if (addr) {
4962 addr = ALIGN(addr, HPAGE_SIZE);
4963 vma = find_vma(mm, addr);
4964 - if (task_size - len >= addr &&
4965 - (!vma || addr + len <= vma->vm_start))
4966 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4967 return addr;
4968 }
4969 if (mm->get_unmapped_area == arch_get_unmapped_area)
4970 diff -urNp linux-3.0.7/arch/sparc/mm/init_32.c linux-3.0.7/arch/sparc/mm/init_32.c
4971 --- linux-3.0.7/arch/sparc/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
4972 +++ linux-3.0.7/arch/sparc/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
4973 @@ -316,6 +316,9 @@ extern void device_scan(void);
4974 pgprot_t PAGE_SHARED __read_mostly;
4975 EXPORT_SYMBOL(PAGE_SHARED);
4976
4977 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
4978 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
4979 +
4980 void __init paging_init(void)
4981 {
4982 switch(sparc_cpu_model) {
4983 @@ -344,17 +347,17 @@ void __init paging_init(void)
4984
4985 /* Initialize the protection map with non-constant, MMU dependent values. */
4986 protection_map[0] = PAGE_NONE;
4987 - protection_map[1] = PAGE_READONLY;
4988 - protection_map[2] = PAGE_COPY;
4989 - protection_map[3] = PAGE_COPY;
4990 + protection_map[1] = PAGE_READONLY_NOEXEC;
4991 + protection_map[2] = PAGE_COPY_NOEXEC;
4992 + protection_map[3] = PAGE_COPY_NOEXEC;
4993 protection_map[4] = PAGE_READONLY;
4994 protection_map[5] = PAGE_READONLY;
4995 protection_map[6] = PAGE_COPY;
4996 protection_map[7] = PAGE_COPY;
4997 protection_map[8] = PAGE_NONE;
4998 - protection_map[9] = PAGE_READONLY;
4999 - protection_map[10] = PAGE_SHARED;
5000 - protection_map[11] = PAGE_SHARED;
5001 + protection_map[9] = PAGE_READONLY_NOEXEC;
5002 + protection_map[10] = PAGE_SHARED_NOEXEC;
5003 + protection_map[11] = PAGE_SHARED_NOEXEC;
5004 protection_map[12] = PAGE_READONLY;
5005 protection_map[13] = PAGE_READONLY;
5006 protection_map[14] = PAGE_SHARED;
5007 diff -urNp linux-3.0.7/arch/sparc/mm/Makefile linux-3.0.7/arch/sparc/mm/Makefile
5008 --- linux-3.0.7/arch/sparc/mm/Makefile 2011-07-21 22:17:23.000000000 -0400
5009 +++ linux-3.0.7/arch/sparc/mm/Makefile 2011-08-23 21:47:55.000000000 -0400
5010 @@ -2,7 +2,7 @@
5011 #
5012
5013 asflags-y := -ansi
5014 -ccflags-y := -Werror
5015 +#ccflags-y := -Werror
5016
5017 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5018 obj-y += fault_$(BITS).o
5019 diff -urNp linux-3.0.7/arch/sparc/mm/srmmu.c linux-3.0.7/arch/sparc/mm/srmmu.c
5020 --- linux-3.0.7/arch/sparc/mm/srmmu.c 2011-07-21 22:17:23.000000000 -0400
5021 +++ linux-3.0.7/arch/sparc/mm/srmmu.c 2011-08-23 21:47:55.000000000 -0400
5022 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5023 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5024 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5025 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5026 +
5027 +#ifdef CONFIG_PAX_PAGEEXEC
5028 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5029 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5030 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5031 +#endif
5032 +
5033 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5034 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5035
5036 diff -urNp linux-3.0.7/arch/um/include/asm/kmap_types.h linux-3.0.7/arch/um/include/asm/kmap_types.h
5037 --- linux-3.0.7/arch/um/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
5038 +++ linux-3.0.7/arch/um/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
5039 @@ -23,6 +23,7 @@ enum km_type {
5040 KM_IRQ1,
5041 KM_SOFTIRQ0,
5042 KM_SOFTIRQ1,
5043 + KM_CLEARPAGE,
5044 KM_TYPE_NR
5045 };
5046
5047 diff -urNp linux-3.0.7/arch/um/include/asm/page.h linux-3.0.7/arch/um/include/asm/page.h
5048 --- linux-3.0.7/arch/um/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
5049 +++ linux-3.0.7/arch/um/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
5050 @@ -14,6 +14,9 @@
5051 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5052 #define PAGE_MASK (~(PAGE_SIZE-1))
5053
5054 +#define ktla_ktva(addr) (addr)
5055 +#define ktva_ktla(addr) (addr)
5056 +
5057 #ifndef __ASSEMBLY__
5058
5059 struct page;
5060 diff -urNp linux-3.0.7/arch/um/kernel/process.c linux-3.0.7/arch/um/kernel/process.c
5061 --- linux-3.0.7/arch/um/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
5062 +++ linux-3.0.7/arch/um/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
5063 @@ -404,22 +404,6 @@ int singlestepping(void * t)
5064 return 2;
5065 }
5066
5067 -/*
5068 - * Only x86 and x86_64 have an arch_align_stack().
5069 - * All other arches have "#define arch_align_stack(x) (x)"
5070 - * in their asm/system.h
5071 - * As this is included in UML from asm-um/system-generic.h,
5072 - * we can use it to behave as the subarch does.
5073 - */
5074 -#ifndef arch_align_stack
5075 -unsigned long arch_align_stack(unsigned long sp)
5076 -{
5077 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5078 - sp -= get_random_int() % 8192;
5079 - return sp & ~0xf;
5080 -}
5081 -#endif
5082 -
5083 unsigned long get_wchan(struct task_struct *p)
5084 {
5085 unsigned long stack_page, sp, ip;
5086 diff -urNp linux-3.0.7/arch/um/sys-i386/syscalls.c linux-3.0.7/arch/um/sys-i386/syscalls.c
5087 --- linux-3.0.7/arch/um/sys-i386/syscalls.c 2011-07-21 22:17:23.000000000 -0400
5088 +++ linux-3.0.7/arch/um/sys-i386/syscalls.c 2011-08-23 21:47:55.000000000 -0400
5089 @@ -11,6 +11,21 @@
5090 #include "asm/uaccess.h"
5091 #include "asm/unistd.h"
5092
5093 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5094 +{
5095 + unsigned long pax_task_size = TASK_SIZE;
5096 +
5097 +#ifdef CONFIG_PAX_SEGMEXEC
5098 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5099 + pax_task_size = SEGMEXEC_TASK_SIZE;
5100 +#endif
5101 +
5102 + if (len > pax_task_size || addr > pax_task_size - len)
5103 + return -EINVAL;
5104 +
5105 + return 0;
5106 +}
5107 +
5108 /*
5109 * The prototype on i386 is:
5110 *
5111 diff -urNp linux-3.0.7/arch/x86/boot/bitops.h linux-3.0.7/arch/x86/boot/bitops.h
5112 --- linux-3.0.7/arch/x86/boot/bitops.h 2011-07-21 22:17:23.000000000 -0400
5113 +++ linux-3.0.7/arch/x86/boot/bitops.h 2011-08-23 21:47:55.000000000 -0400
5114 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5115 u8 v;
5116 const u32 *p = (const u32 *)addr;
5117
5118 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5119 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5120 return v;
5121 }
5122
5123 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5124
5125 static inline void set_bit(int nr, void *addr)
5126 {
5127 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5128 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5129 }
5130
5131 #endif /* BOOT_BITOPS_H */
5132 diff -urNp linux-3.0.7/arch/x86/boot/boot.h linux-3.0.7/arch/x86/boot/boot.h
5133 --- linux-3.0.7/arch/x86/boot/boot.h 2011-07-21 22:17:23.000000000 -0400
5134 +++ linux-3.0.7/arch/x86/boot/boot.h 2011-08-23 21:47:55.000000000 -0400
5135 @@ -85,7 +85,7 @@ static inline void io_delay(void)
5136 static inline u16 ds(void)
5137 {
5138 u16 seg;
5139 - asm("movw %%ds,%0" : "=rm" (seg));
5140 + asm volatile("movw %%ds,%0" : "=rm" (seg));
5141 return seg;
5142 }
5143
5144 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
5145 static inline int memcmp(const void *s1, const void *s2, size_t len)
5146 {
5147 u8 diff;
5148 - asm("repe; cmpsb; setnz %0"
5149 + asm volatile("repe; cmpsb; setnz %0"
5150 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5151 return diff;
5152 }
5153 diff -urNp linux-3.0.7/arch/x86/boot/compressed/head_32.S linux-3.0.7/arch/x86/boot/compressed/head_32.S
5154 --- linux-3.0.7/arch/x86/boot/compressed/head_32.S 2011-07-21 22:17:23.000000000 -0400
5155 +++ linux-3.0.7/arch/x86/boot/compressed/head_32.S 2011-08-23 21:47:55.000000000 -0400
5156 @@ -76,7 +76,7 @@ ENTRY(startup_32)
5157 notl %eax
5158 andl %eax, %ebx
5159 #else
5160 - movl $LOAD_PHYSICAL_ADDR, %ebx
5161 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5162 #endif
5163
5164 /* Target address to relocate to for decompression */
5165 @@ -162,7 +162,7 @@ relocated:
5166 * and where it was actually loaded.
5167 */
5168 movl %ebp, %ebx
5169 - subl $LOAD_PHYSICAL_ADDR, %ebx
5170 + subl $____LOAD_PHYSICAL_ADDR, %ebx
5171 jz 2f /* Nothing to be done if loaded at compiled addr. */
5172 /*
5173 * Process relocations.
5174 @@ -170,8 +170,7 @@ relocated:
5175
5176 1: subl $4, %edi
5177 movl (%edi), %ecx
5178 - testl %ecx, %ecx
5179 - jz 2f
5180 + jecxz 2f
5181 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5182 jmp 1b
5183 2:
5184 diff -urNp linux-3.0.7/arch/x86/boot/compressed/head_64.S linux-3.0.7/arch/x86/boot/compressed/head_64.S
5185 --- linux-3.0.7/arch/x86/boot/compressed/head_64.S 2011-07-21 22:17:23.000000000 -0400
5186 +++ linux-3.0.7/arch/x86/boot/compressed/head_64.S 2011-08-23 21:47:55.000000000 -0400
5187 @@ -91,7 +91,7 @@ ENTRY(startup_32)
5188 notl %eax
5189 andl %eax, %ebx
5190 #else
5191 - movl $LOAD_PHYSICAL_ADDR, %ebx
5192 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5193 #endif
5194
5195 /* Target address to relocate to for decompression */
5196 @@ -233,7 +233,7 @@ ENTRY(startup_64)
5197 notq %rax
5198 andq %rax, %rbp
5199 #else
5200 - movq $LOAD_PHYSICAL_ADDR, %rbp
5201 + movq $____LOAD_PHYSICAL_ADDR, %rbp
5202 #endif
5203
5204 /* Target address to relocate to for decompression */
5205 diff -urNp linux-3.0.7/arch/x86/boot/compressed/Makefile linux-3.0.7/arch/x86/boot/compressed/Makefile
5206 --- linux-3.0.7/arch/x86/boot/compressed/Makefile 2011-07-21 22:17:23.000000000 -0400
5207 +++ linux-3.0.7/arch/x86/boot/compressed/Makefile 2011-08-23 21:47:55.000000000 -0400
5208 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5209 KBUILD_CFLAGS += $(cflags-y)
5210 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5211 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5212 +ifdef CONSTIFY_PLUGIN
5213 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5214 +endif
5215
5216 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5217 GCOV_PROFILE := n
5218 diff -urNp linux-3.0.7/arch/x86/boot/compressed/misc.c linux-3.0.7/arch/x86/boot/compressed/misc.c
5219 --- linux-3.0.7/arch/x86/boot/compressed/misc.c 2011-07-21 22:17:23.000000000 -0400
5220 +++ linux-3.0.7/arch/x86/boot/compressed/misc.c 2011-08-23 21:47:55.000000000 -0400
5221 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
5222 case PT_LOAD:
5223 #ifdef CONFIG_RELOCATABLE
5224 dest = output;
5225 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5226 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5227 #else
5228 dest = (void *)(phdr->p_paddr);
5229 #endif
5230 @@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5231 error("Destination address too large");
5232 #endif
5233 #ifndef CONFIG_RELOCATABLE
5234 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5235 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5236 error("Wrong destination address");
5237 #endif
5238
5239 diff -urNp linux-3.0.7/arch/x86/boot/compressed/relocs.c linux-3.0.7/arch/x86/boot/compressed/relocs.c
5240 --- linux-3.0.7/arch/x86/boot/compressed/relocs.c 2011-07-21 22:17:23.000000000 -0400
5241 +++ linux-3.0.7/arch/x86/boot/compressed/relocs.c 2011-08-23 21:47:55.000000000 -0400
5242 @@ -13,8 +13,11 @@
5243
5244 static void die(char *fmt, ...);
5245
5246 +#include "../../../../include/generated/autoconf.h"
5247 +
5248 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5249 static Elf32_Ehdr ehdr;
5250 +static Elf32_Phdr *phdr;
5251 static unsigned long reloc_count, reloc_idx;
5252 static unsigned long *relocs;
5253
5254 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5255 }
5256 }
5257
5258 +static void read_phdrs(FILE *fp)
5259 +{
5260 + unsigned int i;
5261 +
5262 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5263 + if (!phdr) {
5264 + die("Unable to allocate %d program headers\n",
5265 + ehdr.e_phnum);
5266 + }
5267 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5268 + die("Seek to %d failed: %s\n",
5269 + ehdr.e_phoff, strerror(errno));
5270 + }
5271 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5272 + die("Cannot read ELF program headers: %s\n",
5273 + strerror(errno));
5274 + }
5275 + for(i = 0; i < ehdr.e_phnum; i++) {
5276 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5277 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5278 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5279 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5280 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5281 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5282 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5283 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5284 + }
5285 +
5286 +}
5287 +
5288 static void read_shdrs(FILE *fp)
5289 {
5290 - int i;
5291 + unsigned int i;
5292 Elf32_Shdr shdr;
5293
5294 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5295 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5296
5297 static void read_strtabs(FILE *fp)
5298 {
5299 - int i;
5300 + unsigned int i;
5301 for (i = 0; i < ehdr.e_shnum; i++) {
5302 struct section *sec = &secs[i];
5303 if (sec->shdr.sh_type != SHT_STRTAB) {
5304 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5305
5306 static void read_symtabs(FILE *fp)
5307 {
5308 - int i,j;
5309 + unsigned int i,j;
5310 for (i = 0; i < ehdr.e_shnum; i++) {
5311 struct section *sec = &secs[i];
5312 if (sec->shdr.sh_type != SHT_SYMTAB) {
5313 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5314
5315 static void read_relocs(FILE *fp)
5316 {
5317 - int i,j;
5318 + unsigned int i,j;
5319 + uint32_t base;
5320 +
5321 for (i = 0; i < ehdr.e_shnum; i++) {
5322 struct section *sec = &secs[i];
5323 if (sec->shdr.sh_type != SHT_REL) {
5324 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5325 die("Cannot read symbol table: %s\n",
5326 strerror(errno));
5327 }
5328 + base = 0;
5329 + for (j = 0; j < ehdr.e_phnum; j++) {
5330 + if (phdr[j].p_type != PT_LOAD )
5331 + continue;
5332 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5333 + continue;
5334 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5335 + break;
5336 + }
5337 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5338 Elf32_Rel *rel = &sec->reltab[j];
5339 - rel->r_offset = elf32_to_cpu(rel->r_offset);
5340 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5341 rel->r_info = elf32_to_cpu(rel->r_info);
5342 }
5343 }
5344 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5345
5346 static void print_absolute_symbols(void)
5347 {
5348 - int i;
5349 + unsigned int i;
5350 printf("Absolute symbols\n");
5351 printf(" Num: Value Size Type Bind Visibility Name\n");
5352 for (i = 0; i < ehdr.e_shnum; i++) {
5353 struct section *sec = &secs[i];
5354 char *sym_strtab;
5355 Elf32_Sym *sh_symtab;
5356 - int j;
5357 + unsigned int j;
5358
5359 if (sec->shdr.sh_type != SHT_SYMTAB) {
5360 continue;
5361 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5362
5363 static void print_absolute_relocs(void)
5364 {
5365 - int i, printed = 0;
5366 + unsigned int i, printed = 0;
5367
5368 for (i = 0; i < ehdr.e_shnum; i++) {
5369 struct section *sec = &secs[i];
5370 struct section *sec_applies, *sec_symtab;
5371 char *sym_strtab;
5372 Elf32_Sym *sh_symtab;
5373 - int j;
5374 + unsigned int j;
5375 if (sec->shdr.sh_type != SHT_REL) {
5376 continue;
5377 }
5378 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5379
5380 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5381 {
5382 - int i;
5383 + unsigned int i;
5384 /* Walk through the relocations */
5385 for (i = 0; i < ehdr.e_shnum; i++) {
5386 char *sym_strtab;
5387 Elf32_Sym *sh_symtab;
5388 struct section *sec_applies, *sec_symtab;
5389 - int j;
5390 + unsigned int j;
5391 struct section *sec = &secs[i];
5392
5393 if (sec->shdr.sh_type != SHT_REL) {
5394 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5395 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5396 continue;
5397 }
5398 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5399 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5400 + continue;
5401 +
5402 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5403 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5404 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5405 + continue;
5406 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5407 + continue;
5408 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5409 + continue;
5410 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5411 + continue;
5412 +#endif
5413 +
5414 switch (r_type) {
5415 case R_386_NONE:
5416 case R_386_PC32:
5417 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5418
5419 static void emit_relocs(int as_text)
5420 {
5421 - int i;
5422 + unsigned int i;
5423 /* Count how many relocations I have and allocate space for them. */
5424 reloc_count = 0;
5425 walk_relocs(count_reloc);
5426 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
5427 fname, strerror(errno));
5428 }
5429 read_ehdr(fp);
5430 + read_phdrs(fp);
5431 read_shdrs(fp);
5432 read_strtabs(fp);
5433 read_symtabs(fp);
5434 diff -urNp linux-3.0.7/arch/x86/boot/cpucheck.c linux-3.0.7/arch/x86/boot/cpucheck.c
5435 --- linux-3.0.7/arch/x86/boot/cpucheck.c 2011-07-21 22:17:23.000000000 -0400
5436 +++ linux-3.0.7/arch/x86/boot/cpucheck.c 2011-08-23 21:47:55.000000000 -0400
5437 @@ -74,7 +74,7 @@ static int has_fpu(void)
5438 u16 fcw = -1, fsw = -1;
5439 u32 cr0;
5440
5441 - asm("movl %%cr0,%0" : "=r" (cr0));
5442 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
5443 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5444 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5445 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5446 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5447 {
5448 u32 f0, f1;
5449
5450 - asm("pushfl ; "
5451 + asm volatile("pushfl ; "
5452 "pushfl ; "
5453 "popl %0 ; "
5454 "movl %0,%1 ; "
5455 @@ -115,7 +115,7 @@ static void get_flags(void)
5456 set_bit(X86_FEATURE_FPU, cpu.flags);
5457
5458 if (has_eflag(X86_EFLAGS_ID)) {
5459 - asm("cpuid"
5460 + asm volatile("cpuid"
5461 : "=a" (max_intel_level),
5462 "=b" (cpu_vendor[0]),
5463 "=d" (cpu_vendor[1]),
5464 @@ -124,7 +124,7 @@ static void get_flags(void)
5465
5466 if (max_intel_level >= 0x00000001 &&
5467 max_intel_level <= 0x0000ffff) {
5468 - asm("cpuid"
5469 + asm volatile("cpuid"
5470 : "=a" (tfms),
5471 "=c" (cpu.flags[4]),
5472 "=d" (cpu.flags[0])
5473 @@ -136,7 +136,7 @@ static void get_flags(void)
5474 cpu.model += ((tfms >> 16) & 0xf) << 4;
5475 }
5476
5477 - asm("cpuid"
5478 + asm volatile("cpuid"
5479 : "=a" (max_amd_level)
5480 : "a" (0x80000000)
5481 : "ebx", "ecx", "edx");
5482 @@ -144,7 +144,7 @@ static void get_flags(void)
5483 if (max_amd_level >= 0x80000001 &&
5484 max_amd_level <= 0x8000ffff) {
5485 u32 eax = 0x80000001;
5486 - asm("cpuid"
5487 + asm volatile("cpuid"
5488 : "+a" (eax),
5489 "=c" (cpu.flags[6]),
5490 "=d" (cpu.flags[1])
5491 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5492 u32 ecx = MSR_K7_HWCR;
5493 u32 eax, edx;
5494
5495 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5496 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5497 eax &= ~(1 << 15);
5498 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5499 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5500
5501 get_flags(); /* Make sure it really did something */
5502 err = check_flags();
5503 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5504 u32 ecx = MSR_VIA_FCR;
5505 u32 eax, edx;
5506
5507 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5508 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5509 eax |= (1<<1)|(1<<7);
5510 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5511 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5512
5513 set_bit(X86_FEATURE_CX8, cpu.flags);
5514 err = check_flags();
5515 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5516 u32 eax, edx;
5517 u32 level = 1;
5518
5519 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5520 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5521 - asm("cpuid"
5522 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5523 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5524 + asm volatile("cpuid"
5525 : "+a" (level), "=d" (cpu.flags[0])
5526 : : "ecx", "ebx");
5527 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5528 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5529
5530 err = check_flags();
5531 }
5532 diff -urNp linux-3.0.7/arch/x86/boot/header.S linux-3.0.7/arch/x86/boot/header.S
5533 --- linux-3.0.7/arch/x86/boot/header.S 2011-07-21 22:17:23.000000000 -0400
5534 +++ linux-3.0.7/arch/x86/boot/header.S 2011-08-23 21:47:55.000000000 -0400
5535 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5536 # single linked list of
5537 # struct setup_data
5538
5539 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5540 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5541
5542 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5543 #define VO_INIT_SIZE (VO__end - VO__text)
5544 diff -urNp linux-3.0.7/arch/x86/boot/Makefile linux-3.0.7/arch/x86/boot/Makefile
5545 --- linux-3.0.7/arch/x86/boot/Makefile 2011-07-21 22:17:23.000000000 -0400
5546 +++ linux-3.0.7/arch/x86/boot/Makefile 2011-08-23 21:47:55.000000000 -0400
5547 @@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
5548 $(call cc-option, -fno-stack-protector) \
5549 $(call cc-option, -mpreferred-stack-boundary=2)
5550 KBUILD_CFLAGS += $(call cc-option, -m32)
5551 +ifdef CONSTIFY_PLUGIN
5552 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5553 +endif
5554 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5555 GCOV_PROFILE := n
5556
5557 diff -urNp linux-3.0.7/arch/x86/boot/memory.c linux-3.0.7/arch/x86/boot/memory.c
5558 --- linux-3.0.7/arch/x86/boot/memory.c 2011-07-21 22:17:23.000000000 -0400
5559 +++ linux-3.0.7/arch/x86/boot/memory.c 2011-08-23 21:47:55.000000000 -0400
5560 @@ -19,7 +19,7 @@
5561
5562 static int detect_memory_e820(void)
5563 {
5564 - int count = 0;
5565 + unsigned int count = 0;
5566 struct biosregs ireg, oreg;
5567 struct e820entry *desc = boot_params.e820_map;
5568 static struct e820entry buf; /* static so it is zeroed */
5569 diff -urNp linux-3.0.7/arch/x86/boot/video.c linux-3.0.7/arch/x86/boot/video.c
5570 --- linux-3.0.7/arch/x86/boot/video.c 2011-07-21 22:17:23.000000000 -0400
5571 +++ linux-3.0.7/arch/x86/boot/video.c 2011-08-23 21:47:55.000000000 -0400
5572 @@ -96,7 +96,7 @@ static void store_mode_params(void)
5573 static unsigned int get_entry(void)
5574 {
5575 char entry_buf[4];
5576 - int i, len = 0;
5577 + unsigned int i, len = 0;
5578 int key;
5579 unsigned int v;
5580
5581 diff -urNp linux-3.0.7/arch/x86/boot/video-vesa.c linux-3.0.7/arch/x86/boot/video-vesa.c
5582 --- linux-3.0.7/arch/x86/boot/video-vesa.c 2011-07-21 22:17:23.000000000 -0400
5583 +++ linux-3.0.7/arch/x86/boot/video-vesa.c 2011-08-23 21:47:55.000000000 -0400
5584 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5585
5586 boot_params.screen_info.vesapm_seg = oreg.es;
5587 boot_params.screen_info.vesapm_off = oreg.di;
5588 + boot_params.screen_info.vesapm_size = oreg.cx;
5589 }
5590
5591 /*
5592 diff -urNp linux-3.0.7/arch/x86/crypto/aes-x86_64-asm_64.S linux-3.0.7/arch/x86/crypto/aes-x86_64-asm_64.S
5593 --- linux-3.0.7/arch/x86/crypto/aes-x86_64-asm_64.S 2011-07-21 22:17:23.000000000 -0400
5594 +++ linux-3.0.7/arch/x86/crypto/aes-x86_64-asm_64.S 2011-10-06 04:17:55.000000000 -0400
5595 @@ -8,6 +8,8 @@
5596 * including this sentence is retained in full.
5597 */
5598
5599 +#include <asm/alternative-asm.h>
5600 +
5601 .extern crypto_ft_tab
5602 .extern crypto_it_tab
5603 .extern crypto_fl_tab
5604 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
5605 je B192; \
5606 leaq 32(r9),r9;
5607
5608 +#define ret pax_force_retaddr; ret
5609 +
5610 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
5611 movq r1,r2; \
5612 movq r3,r4; \
5613 diff -urNp linux-3.0.7/arch/x86/crypto/salsa20-x86_64-asm_64.S linux-3.0.7/arch/x86/crypto/salsa20-x86_64-asm_64.S
5614 --- linux-3.0.7/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-07-21 22:17:23.000000000 -0400
5615 +++ linux-3.0.7/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-10-06 04:17:55.000000000 -0400
5616 @@ -1,3 +1,5 @@
5617 +#include <asm/alternative-asm.h>
5618 +
5619 # enter ECRYPT_encrypt_bytes
5620 .text
5621 .p2align 5
5622 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
5623 add %r11,%rsp
5624 mov %rdi,%rax
5625 mov %rsi,%rdx
5626 + pax_force_retaddr
5627 ret
5628 # bytesatleast65:
5629 ._bytesatleast65:
5630 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
5631 add %r11,%rsp
5632 mov %rdi,%rax
5633 mov %rsi,%rdx
5634 + pax_force_retaddr
5635 ret
5636 # enter ECRYPT_ivsetup
5637 .text
5638 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
5639 add %r11,%rsp
5640 mov %rdi,%rax
5641 mov %rsi,%rdx
5642 + pax_force_retaddr
5643 ret
5644 diff -urNp linux-3.0.7/arch/x86/crypto/twofish-x86_64-asm_64.S linux-3.0.7/arch/x86/crypto/twofish-x86_64-asm_64.S
5645 --- linux-3.0.7/arch/x86/crypto/twofish-x86_64-asm_64.S 2011-07-21 22:17:23.000000000 -0400
5646 +++ linux-3.0.7/arch/x86/crypto/twofish-x86_64-asm_64.S 2011-10-06 04:17:55.000000000 -0400
5647 @@ -21,6 +21,7 @@
5648 .text
5649
5650 #include <asm/asm-offsets.h>
5651 +#include <asm/alternative-asm.h>
5652
5653 #define a_offset 0
5654 #define b_offset 4
5655 @@ -269,6 +270,7 @@ twofish_enc_blk:
5656
5657 popq R1
5658 movq $1,%rax
5659 + pax_force_retaddr
5660 ret
5661
5662 twofish_dec_blk:
5663 @@ -321,4 +323,5 @@ twofish_dec_blk:
5664
5665 popq R1
5666 movq $1,%rax
5667 + pax_force_retaddr
5668 ret
5669 diff -urNp linux-3.0.7/arch/x86/ia32/ia32_aout.c linux-3.0.7/arch/x86/ia32/ia32_aout.c
5670 --- linux-3.0.7/arch/x86/ia32/ia32_aout.c 2011-07-21 22:17:23.000000000 -0400
5671 +++ linux-3.0.7/arch/x86/ia32/ia32_aout.c 2011-08-23 21:48:14.000000000 -0400
5672 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
5673 unsigned long dump_start, dump_size;
5674 struct user32 dump;
5675
5676 + memset(&dump, 0, sizeof(dump));
5677 +
5678 fs = get_fs();
5679 set_fs(KERNEL_DS);
5680 has_dumped = 1;
5681 diff -urNp linux-3.0.7/arch/x86/ia32/ia32entry.S linux-3.0.7/arch/x86/ia32/ia32entry.S
5682 --- linux-3.0.7/arch/x86/ia32/ia32entry.S 2011-07-21 22:17:23.000000000 -0400
5683 +++ linux-3.0.7/arch/x86/ia32/ia32entry.S 2011-10-11 10:44:33.000000000 -0400
5684 @@ -13,7 +13,9 @@
5685 #include <asm/thread_info.h>
5686 #include <asm/segment.h>
5687 #include <asm/irqflags.h>
5688 +#include <asm/pgtable.h>
5689 #include <linux/linkage.h>
5690 +#include <asm/alternative-asm.h>
5691
5692 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
5693 #include <linux/elf-em.h>
5694 @@ -95,6 +97,29 @@ ENTRY(native_irq_enable_sysexit)
5695 ENDPROC(native_irq_enable_sysexit)
5696 #endif
5697
5698 + .macro pax_enter_kernel_user
5699 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5700 + call pax_enter_kernel_user
5701 +#endif
5702 + .endm
5703 +
5704 + .macro pax_exit_kernel_user
5705 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5706 + call pax_exit_kernel_user
5707 +#endif
5708 +#ifdef CONFIG_PAX_RANDKSTACK
5709 + pushq %rax
5710 + call pax_randomize_kstack
5711 + popq %rax
5712 +#endif
5713 + .endm
5714 +
5715 + .macro pax_erase_kstack
5716 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5717 + call pax_erase_kstack
5718 +#endif
5719 + .endm
5720 +
5721 /*
5722 * 32bit SYSENTER instruction entry.
5723 *
5724 @@ -121,7 +146,7 @@ ENTRY(ia32_sysenter_target)
5725 CFI_REGISTER rsp,rbp
5726 SWAPGS_UNSAFE_STACK
5727 movq PER_CPU_VAR(kernel_stack), %rsp
5728 - addq $(KERNEL_STACK_OFFSET),%rsp
5729 + pax_enter_kernel_user
5730 /*
5731 * No need to follow this irqs on/off section: the syscall
5732 * disabled irqs, here we enable it straight after entry:
5733 @@ -134,7 +159,8 @@ ENTRY(ia32_sysenter_target)
5734 CFI_REL_OFFSET rsp,0
5735 pushfq_cfi
5736 /*CFI_REL_OFFSET rflags,0*/
5737 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
5738 + GET_THREAD_INFO(%r10)
5739 + movl TI_sysenter_return(%r10), %r10d
5740 CFI_REGISTER rip,r10
5741 pushq_cfi $__USER32_CS
5742 /*CFI_REL_OFFSET cs,0*/
5743 @@ -146,6 +172,12 @@ ENTRY(ia32_sysenter_target)
5744 SAVE_ARGS 0,0,1
5745 /* no need to do an access_ok check here because rbp has been
5746 32bit zero extended */
5747 +
5748 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5749 + mov $PAX_USER_SHADOW_BASE,%r10
5750 + add %r10,%rbp
5751 +#endif
5752 +
5753 1: movl (%rbp),%ebp
5754 .section __ex_table,"a"
5755 .quad 1b,ia32_badarg
5756 @@ -168,6 +200,8 @@ sysenter_dispatch:
5757 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5758 jnz sysexit_audit
5759 sysexit_from_sys_call:
5760 + pax_exit_kernel_user
5761 + pax_erase_kstack
5762 andl $~TS_COMPAT,TI_status(%r10)
5763 /* clear IF, that popfq doesn't enable interrupts early */
5764 andl $~0x200,EFLAGS-R11(%rsp)
5765 @@ -194,6 +228,9 @@ sysexit_from_sys_call:
5766 movl %eax,%esi /* 2nd arg: syscall number */
5767 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
5768 call audit_syscall_entry
5769 +
5770 + pax_erase_kstack
5771 +
5772 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
5773 cmpq $(IA32_NR_syscalls-1),%rax
5774 ja ia32_badsys
5775 @@ -246,6 +283,9 @@ sysenter_tracesys:
5776 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
5777 movq %rsp,%rdi /* &pt_regs -> arg1 */
5778 call syscall_trace_enter
5779 +
5780 + pax_erase_kstack
5781 +
5782 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5783 RESTORE_REST
5784 cmpq $(IA32_NR_syscalls-1),%rax
5785 @@ -277,19 +317,24 @@ ENDPROC(ia32_sysenter_target)
5786 ENTRY(ia32_cstar_target)
5787 CFI_STARTPROC32 simple
5788 CFI_SIGNAL_FRAME
5789 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
5790 + CFI_DEF_CFA rsp,0
5791 CFI_REGISTER rip,rcx
5792 /*CFI_REGISTER rflags,r11*/
5793 SWAPGS_UNSAFE_STACK
5794 movl %esp,%r8d
5795 CFI_REGISTER rsp,r8
5796 movq PER_CPU_VAR(kernel_stack),%rsp
5797 +
5798 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5799 + pax_enter_kernel_user
5800 +#endif
5801 +
5802 /*
5803 * No need to follow this irqs on/off section: the syscall
5804 * disabled irqs and here we enable it straight after entry:
5805 */
5806 ENABLE_INTERRUPTS(CLBR_NONE)
5807 - SAVE_ARGS 8,1,1
5808 + SAVE_ARGS 8*6,1,1
5809 movl %eax,%eax /* zero extension */
5810 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
5811 movq %rcx,RIP-ARGOFFSET(%rsp)
5812 @@ -305,6 +350,12 @@ ENTRY(ia32_cstar_target)
5813 /* no need to do an access_ok check here because r8 has been
5814 32bit zero extended */
5815 /* hardware stack frame is complete now */
5816 +
5817 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5818 + mov $PAX_USER_SHADOW_BASE,%r10
5819 + add %r10,%r8
5820 +#endif
5821 +
5822 1: movl (%r8),%r9d
5823 .section __ex_table,"a"
5824 .quad 1b,ia32_badarg
5825 @@ -327,6 +378,8 @@ cstar_dispatch:
5826 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5827 jnz sysretl_audit
5828 sysretl_from_sys_call:
5829 + pax_exit_kernel_user
5830 + pax_erase_kstack
5831 andl $~TS_COMPAT,TI_status(%r10)
5832 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
5833 movl RIP-ARGOFFSET(%rsp),%ecx
5834 @@ -364,6 +417,9 @@ cstar_tracesys:
5835 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5836 movq %rsp,%rdi /* &pt_regs -> arg1 */
5837 call syscall_trace_enter
5838 +
5839 + pax_erase_kstack
5840 +
5841 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
5842 RESTORE_REST
5843 xchgl %ebp,%r9d
5844 @@ -409,6 +465,7 @@ ENTRY(ia32_syscall)
5845 CFI_REL_OFFSET rip,RIP-RIP
5846 PARAVIRT_ADJUST_EXCEPTION_FRAME
5847 SWAPGS
5848 + pax_enter_kernel_user
5849 /*
5850 * No need to follow this irqs on/off section: the syscall
5851 * disabled irqs and here we enable it straight after entry:
5852 @@ -441,6 +498,9 @@ ia32_tracesys:
5853 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5854 movq %rsp,%rdi /* &pt_regs -> arg1 */
5855 call syscall_trace_enter
5856 +
5857 + pax_erase_kstack
5858 +
5859 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5860 RESTORE_REST
5861 cmpq $(IA32_NR_syscalls-1),%rax
5862 @@ -455,6 +515,7 @@ ia32_badsys:
5863
5864 quiet_ni_syscall:
5865 movq $-ENOSYS,%rax
5866 + pax_force_retaddr
5867 ret
5868 CFI_ENDPROC
5869
5870 diff -urNp linux-3.0.7/arch/x86/ia32/ia32_signal.c linux-3.0.7/arch/x86/ia32/ia32_signal.c
5871 --- linux-3.0.7/arch/x86/ia32/ia32_signal.c 2011-07-21 22:17:23.000000000 -0400
5872 +++ linux-3.0.7/arch/x86/ia32/ia32_signal.c 2011-10-06 04:17:55.000000000 -0400
5873 @@ -167,7 +167,7 @@ asmlinkage long sys32_sigaltstack(const
5874 }
5875 seg = get_fs();
5876 set_fs(KERNEL_DS);
5877 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
5878 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
5879 set_fs(seg);
5880 if (ret >= 0 && uoss_ptr) {
5881 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
5882 @@ -374,7 +374,7 @@ static int ia32_setup_sigcontext(struct
5883 */
5884 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
5885 size_t frame_size,
5886 - void **fpstate)
5887 + void __user **fpstate)
5888 {
5889 unsigned long sp;
5890
5891 @@ -395,7 +395,7 @@ static void __user *get_sigframe(struct
5892
5893 if (used_math()) {
5894 sp = sp - sig_xstate_ia32_size;
5895 - *fpstate = (struct _fpstate_ia32 *) sp;
5896 + *fpstate = (struct _fpstate_ia32 __user *) sp;
5897 if (save_i387_xstate_ia32(*fpstate) < 0)
5898 return (void __user *) -1L;
5899 }
5900 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
5901 sp -= frame_size;
5902 /* Align the stack pointer according to the i386 ABI,
5903 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
5904 - sp = ((sp + 4) & -16ul) - 4;
5905 + sp = ((sp - 12) & -16ul) - 4;
5906 return (void __user *) sp;
5907 }
5908
5909 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
5910 * These are actually not used anymore, but left because some
5911 * gdb versions depend on them as a marker.
5912 */
5913 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5914 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
5915 } put_user_catch(err);
5916
5917 if (err)
5918 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
5919 0xb8,
5920 __NR_ia32_rt_sigreturn,
5921 0x80cd,
5922 - 0,
5923 + 0
5924 };
5925
5926 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
5927 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
5928
5929 if (ka->sa.sa_flags & SA_RESTORER)
5930 restorer = ka->sa.sa_restorer;
5931 + else if (current->mm->context.vdso)
5932 + /* Return stub is in 32bit vsyscall page */
5933 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5934 else
5935 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
5936 - rt_sigreturn);
5937 + restorer = &frame->retcode;
5938 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
5939
5940 /*
5941 * Not actually used anymore, but left because some gdb
5942 * versions need it.
5943 */
5944 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5945 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
5946 } put_user_catch(err);
5947
5948 if (err)
5949 diff -urNp linux-3.0.7/arch/x86/ia32/sys_ia32.c linux-3.0.7/arch/x86/ia32/sys_ia32.c
5950 --- linux-3.0.7/arch/x86/ia32/sys_ia32.c 2011-07-21 22:17:23.000000000 -0400
5951 +++ linux-3.0.7/arch/x86/ia32/sys_ia32.c 2011-10-06 04:17:55.000000000 -0400
5952 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsign
5953 */
5954 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
5955 {
5956 - typeof(ubuf->st_uid) uid = 0;
5957 - typeof(ubuf->st_gid) gid = 0;
5958 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
5959 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
5960 SET_UID(uid, stat->uid);
5961 SET_GID(gid, stat->gid);
5962 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
5963 @@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int
5964 }
5965 set_fs(KERNEL_DS);
5966 ret = sys_rt_sigprocmask(how,
5967 - set ? (sigset_t __user *)&s : NULL,
5968 - oset ? (sigset_t __user *)&s : NULL,
5969 + set ? (sigset_t __force_user *)&s : NULL,
5970 + oset ? (sigset_t __force_user *)&s : NULL,
5971 sigsetsize);
5972 set_fs(old_fs);
5973 if (ret)
5974 @@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int
5975 return alarm_setitimer(seconds);
5976 }
5977
5978 -asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
5979 +asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
5980 int options)
5981 {
5982 return compat_sys_wait4(pid, stat_addr, options, NULL);
5983 @@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_inter
5984 mm_segment_t old_fs = get_fs();
5985
5986 set_fs(KERNEL_DS);
5987 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
5988 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
5989 set_fs(old_fs);
5990 if (put_compat_timespec(&t, interval))
5991 return -EFAULT;
5992 @@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(comp
5993 mm_segment_t old_fs = get_fs();
5994
5995 set_fs(KERNEL_DS);
5996 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
5997 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
5998 set_fs(old_fs);
5999 if (!ret) {
6000 switch (_NSIG_WORDS) {
6001 @@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(in
6002 if (copy_siginfo_from_user32(&info, uinfo))
6003 return -EFAULT;
6004 set_fs(KERNEL_DS);
6005 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
6006 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
6007 set_fs(old_fs);
6008 return ret;
6009 }
6010 @@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_f
6011 return -EFAULT;
6012
6013 set_fs(KERNEL_DS);
6014 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
6015 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
6016 count);
6017 set_fs(old_fs);
6018
6019 diff -urNp linux-3.0.7/arch/x86/include/asm/alternative-asm.h linux-3.0.7/arch/x86/include/asm/alternative-asm.h
6020 --- linux-3.0.7/arch/x86/include/asm/alternative-asm.h 2011-07-21 22:17:23.000000000 -0400
6021 +++ linux-3.0.7/arch/x86/include/asm/alternative-asm.h 2011-10-07 19:07:23.000000000 -0400
6022 @@ -15,6 +15,20 @@
6023 .endm
6024 #endif
6025
6026 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
6027 + .macro pax_force_retaddr rip=0
6028 + btsq $63,\rip(%rsp)
6029 + .endm
6030 + .macro pax_force_fptr ptr
6031 + btsq $63,\ptr
6032 + .endm
6033 +#else
6034 + .macro pax_force_retaddr rip=0
6035 + .endm
6036 + .macro pax_force_fptr ptr
6037 + .endm
6038 +#endif
6039 +
6040 .macro altinstruction_entry orig alt feature orig_len alt_len
6041 .align 8
6042 .quad \orig
6043 diff -urNp linux-3.0.7/arch/x86/include/asm/alternative.h linux-3.0.7/arch/x86/include/asm/alternative.h
6044 --- linux-3.0.7/arch/x86/include/asm/alternative.h 2011-07-21 22:17:23.000000000 -0400
6045 +++ linux-3.0.7/arch/x86/include/asm/alternative.h 2011-08-23 21:47:55.000000000 -0400
6046 @@ -93,7 +93,7 @@ static inline int alternatives_text_rese
6047 ".section .discard,\"aw\",@progbits\n" \
6048 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
6049 ".previous\n" \
6050 - ".section .altinstr_replacement, \"ax\"\n" \
6051 + ".section .altinstr_replacement, \"a\"\n" \
6052 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6053 ".previous"
6054
6055 diff -urNp linux-3.0.7/arch/x86/include/asm/apic.h linux-3.0.7/arch/x86/include/asm/apic.h
6056 --- linux-3.0.7/arch/x86/include/asm/apic.h 2011-07-21 22:17:23.000000000 -0400
6057 +++ linux-3.0.7/arch/x86/include/asm/apic.h 2011-08-23 21:48:14.000000000 -0400
6058 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(vo
6059
6060 #ifdef CONFIG_X86_LOCAL_APIC
6061
6062 -extern unsigned int apic_verbosity;
6063 +extern int apic_verbosity;
6064 extern int local_apic_timer_c2_ok;
6065
6066 extern int disable_apic;
6067 diff -urNp linux-3.0.7/arch/x86/include/asm/apm.h linux-3.0.7/arch/x86/include/asm/apm.h
6068 --- linux-3.0.7/arch/x86/include/asm/apm.h 2011-07-21 22:17:23.000000000 -0400
6069 +++ linux-3.0.7/arch/x86/include/asm/apm.h 2011-08-23 21:47:55.000000000 -0400
6070 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6071 __asm__ __volatile__(APM_DO_ZERO_SEGS
6072 "pushl %%edi\n\t"
6073 "pushl %%ebp\n\t"
6074 - "lcall *%%cs:apm_bios_entry\n\t"
6075 + "lcall *%%ss:apm_bios_entry\n\t"
6076 "setc %%al\n\t"
6077 "popl %%ebp\n\t"
6078 "popl %%edi\n\t"
6079 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6080 __asm__ __volatile__(APM_DO_ZERO_SEGS
6081 "pushl %%edi\n\t"
6082 "pushl %%ebp\n\t"
6083 - "lcall *%%cs:apm_bios_entry\n\t"
6084 + "lcall *%%ss:apm_bios_entry\n\t"
6085 "setc %%bl\n\t"
6086 "popl %%ebp\n\t"
6087 "popl %%edi\n\t"
6088 diff -urNp linux-3.0.7/arch/x86/include/asm/atomic64_32.h linux-3.0.7/arch/x86/include/asm/atomic64_32.h
6089 --- linux-3.0.7/arch/x86/include/asm/atomic64_32.h 2011-07-21 22:17:23.000000000 -0400
6090 +++ linux-3.0.7/arch/x86/include/asm/atomic64_32.h 2011-08-23 21:47:55.000000000 -0400
6091 @@ -12,6 +12,14 @@ typedef struct {
6092 u64 __aligned(8) counter;
6093 } atomic64_t;
6094
6095 +#ifdef CONFIG_PAX_REFCOUNT
6096 +typedef struct {
6097 + u64 __aligned(8) counter;
6098 +} atomic64_unchecked_t;
6099 +#else
6100 +typedef atomic64_t atomic64_unchecked_t;
6101 +#endif
6102 +
6103 #define ATOMIC64_INIT(val) { (val) }
6104
6105 #ifdef CONFIG_X86_CMPXCHG64
6106 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
6107 }
6108
6109 /**
6110 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
6111 + * @p: pointer to type atomic64_unchecked_t
6112 + * @o: expected value
6113 + * @n: new value
6114 + *
6115 + * Atomically sets @v to @n if it was equal to @o and returns
6116 + * the old value.
6117 + */
6118 +
6119 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
6120 +{
6121 + return cmpxchg64(&v->counter, o, n);
6122 +}
6123 +
6124 +/**
6125 * atomic64_xchg - xchg atomic64 variable
6126 * @v: pointer to type atomic64_t
6127 * @n: value to assign
6128 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
6129 }
6130
6131 /**
6132 + * atomic64_set_unchecked - set atomic64 variable
6133 + * @v: pointer to type atomic64_unchecked_t
6134 + * @n: value to assign
6135 + *
6136 + * Atomically sets the value of @v to @n.
6137 + */
6138 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
6139 +{
6140 + unsigned high = (unsigned)(i >> 32);
6141 + unsigned low = (unsigned)i;
6142 + asm volatile(ATOMIC64_ALTERNATIVE(set)
6143 + : "+b" (low), "+c" (high)
6144 + : "S" (v)
6145 + : "eax", "edx", "memory"
6146 + );
6147 +}
6148 +
6149 +/**
6150 * atomic64_read - read atomic64 variable
6151 * @v: pointer to type atomic64_t
6152 *
6153 @@ -93,6 +134,22 @@ static inline long long atomic64_read(at
6154 }
6155
6156 /**
6157 + * atomic64_read_unchecked - read atomic64 variable
6158 + * @v: pointer to type atomic64_unchecked_t
6159 + *
6160 + * Atomically reads the value of @v and returns it.
6161 + */
6162 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
6163 +{
6164 + long long r;
6165 + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
6166 + : "=A" (r), "+c" (v)
6167 + : : "memory"
6168 + );
6169 + return r;
6170 + }
6171 +
6172 +/**
6173 * atomic64_add_return - add and return
6174 * @i: integer value to add
6175 * @v: pointer to type atomic64_t
6176 @@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
6177 return i;
6178 }
6179
6180 +/**
6181 + * atomic64_add_return_unchecked - add and return
6182 + * @i: integer value to add
6183 + * @v: pointer to type atomic64_unchecked_t
6184 + *
6185 + * Atomically adds @i to @v and returns @i + *@v
6186 + */
6187 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
6188 +{
6189 + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
6190 + : "+A" (i), "+c" (v)
6191 + : : "memory"
6192 + );
6193 + return i;
6194 +}
6195 +
6196 /*
6197 * Other variants with different arithmetic operators:
6198 */
6199 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
6200 return a;
6201 }
6202
6203 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6204 +{
6205 + long long a;
6206 + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
6207 + : "=A" (a)
6208 + : "S" (v)
6209 + : "memory", "ecx"
6210 + );
6211 + return a;
6212 +}
6213 +
6214 static inline long long atomic64_dec_return(atomic64_t *v)
6215 {
6216 long long a;
6217 @@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
6218 }
6219
6220 /**
6221 + * atomic64_add_unchecked - add integer to atomic64 variable
6222 + * @i: integer value to add
6223 + * @v: pointer to type atomic64_unchecked_t
6224 + *
6225 + * Atomically adds @i to @v.
6226 + */
6227 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
6228 +{
6229 + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
6230 + : "+A" (i), "+c" (v)
6231 + : : "memory"
6232 + );
6233 + return i;
6234 +}
6235 +
6236 +/**
6237 * atomic64_sub - subtract the atomic64 variable
6238 * @i: integer value to subtract
6239 * @v: pointer to type atomic64_t
6240 diff -urNp linux-3.0.7/arch/x86/include/asm/atomic64_64.h linux-3.0.7/arch/x86/include/asm/atomic64_64.h
6241 --- linux-3.0.7/arch/x86/include/asm/atomic64_64.h 2011-07-21 22:17:23.000000000 -0400
6242 +++ linux-3.0.7/arch/x86/include/asm/atomic64_64.h 2011-08-23 21:47:55.000000000 -0400
6243 @@ -18,7 +18,19 @@
6244 */
6245 static inline long atomic64_read(const atomic64_t *v)
6246 {
6247 - return (*(volatile long *)&(v)->counter);
6248 + return (*(volatile const long *)&(v)->counter);
6249 +}
6250 +
6251 +/**
6252 + * atomic64_read_unchecked - read atomic64 variable
6253 + * @v: pointer of type atomic64_unchecked_t
6254 + *
6255 + * Atomically reads the value of @v.
6256 + * Doesn't imply a read memory barrier.
6257 + */
6258 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6259 +{
6260 + return (*(volatile const long *)&(v)->counter);
6261 }
6262
6263 /**
6264 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6265 }
6266
6267 /**
6268 + * atomic64_set_unchecked - set atomic64 variable
6269 + * @v: pointer to type atomic64_unchecked_t
6270 + * @i: required value
6271 + *
6272 + * Atomically sets the value of @v to @i.
6273 + */
6274 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6275 +{
6276 + v->counter = i;
6277 +}
6278 +
6279 +/**
6280 * atomic64_add - add integer to atomic64 variable
6281 * @i: integer value to add
6282 * @v: pointer to type atomic64_t
6283 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6284 */
6285 static inline void atomic64_add(long i, atomic64_t *v)
6286 {
6287 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
6288 +
6289 +#ifdef CONFIG_PAX_REFCOUNT
6290 + "jno 0f\n"
6291 + LOCK_PREFIX "subq %1,%0\n"
6292 + "int $4\n0:\n"
6293 + _ASM_EXTABLE(0b, 0b)
6294 +#endif
6295 +
6296 + : "=m" (v->counter)
6297 + : "er" (i), "m" (v->counter));
6298 +}
6299 +
6300 +/**
6301 + * atomic64_add_unchecked - add integer to atomic64 variable
6302 + * @i: integer value to add
6303 + * @v: pointer to type atomic64_unchecked_t
6304 + *
6305 + * Atomically adds @i to @v.
6306 + */
6307 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6308 +{
6309 asm volatile(LOCK_PREFIX "addq %1,%0"
6310 : "=m" (v->counter)
6311 : "er" (i), "m" (v->counter));
6312 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
6313 */
6314 static inline void atomic64_sub(long i, atomic64_t *v)
6315 {
6316 - asm volatile(LOCK_PREFIX "subq %1,%0"
6317 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
6318 +
6319 +#ifdef CONFIG_PAX_REFCOUNT
6320 + "jno 0f\n"
6321 + LOCK_PREFIX "addq %1,%0\n"
6322 + "int $4\n0:\n"
6323 + _ASM_EXTABLE(0b, 0b)
6324 +#endif
6325 +
6326 + : "=m" (v->counter)
6327 + : "er" (i), "m" (v->counter));
6328 +}
6329 +
6330 +/**
6331 + * atomic64_sub_unchecked - subtract the atomic64 variable
6332 + * @i: integer value to subtract
6333 + * @v: pointer to type atomic64_unchecked_t
6334 + *
6335 + * Atomically subtracts @i from @v.
6336 + */
6337 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6338 +{
6339 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
6340 : "=m" (v->counter)
6341 : "er" (i), "m" (v->counter));
6342 }
6343 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
6344 {
6345 unsigned char c;
6346
6347 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6348 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
6349 +
6350 +#ifdef CONFIG_PAX_REFCOUNT
6351 + "jno 0f\n"
6352 + LOCK_PREFIX "addq %2,%0\n"
6353 + "int $4\n0:\n"
6354 + _ASM_EXTABLE(0b, 0b)
6355 +#endif
6356 +
6357 + "sete %1\n"
6358 : "=m" (v->counter), "=qm" (c)
6359 : "er" (i), "m" (v->counter) : "memory");
6360 return c;
6361 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
6362 */
6363 static inline void atomic64_inc(atomic64_t *v)
6364 {
6365 + asm volatile(LOCK_PREFIX "incq %0\n"
6366 +
6367 +#ifdef CONFIG_PAX_REFCOUNT
6368 + "jno 0f\n"
6369 + LOCK_PREFIX "decq %0\n"
6370 + "int $4\n0:\n"
6371 + _ASM_EXTABLE(0b, 0b)
6372 +#endif
6373 +
6374 + : "=m" (v->counter)
6375 + : "m" (v->counter));
6376 +}
6377 +
6378 +/**
6379 + * atomic64_inc_unchecked - increment atomic64 variable
6380 + * @v: pointer to type atomic64_unchecked_t
6381 + *
6382 + * Atomically increments @v by 1.
6383 + */
6384 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6385 +{
6386 asm volatile(LOCK_PREFIX "incq %0"
6387 : "=m" (v->counter)
6388 : "m" (v->counter));
6389 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
6390 */
6391 static inline void atomic64_dec(atomic64_t *v)
6392 {
6393 - asm volatile(LOCK_PREFIX "decq %0"
6394 + asm volatile(LOCK_PREFIX "decq %0\n"
6395 +
6396 +#ifdef CONFIG_PAX_REFCOUNT
6397 + "jno 0f\n"
6398 + LOCK_PREFIX "incq %0\n"
6399 + "int $4\n0:\n"
6400 + _ASM_EXTABLE(0b, 0b)
6401 +#endif
6402 +
6403 + : "=m" (v->counter)
6404 + : "m" (v->counter));
6405 +}
6406 +
6407 +/**
6408 + * atomic64_dec_unchecked - decrement atomic64 variable
6409 + * @v: pointer to type atomic64_t
6410 + *
6411 + * Atomically decrements @v by 1.
6412 + */
6413 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6414 +{
6415 + asm volatile(LOCK_PREFIX "decq %0\n"
6416 : "=m" (v->counter)
6417 : "m" (v->counter));
6418 }
6419 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
6420 {
6421 unsigned char c;
6422
6423 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
6424 + asm volatile(LOCK_PREFIX "decq %0\n"
6425 +
6426 +#ifdef CONFIG_PAX_REFCOUNT
6427 + "jno 0f\n"
6428 + LOCK_PREFIX "incq %0\n"
6429 + "int $4\n0:\n"
6430 + _ASM_EXTABLE(0b, 0b)
6431 +#endif
6432 +
6433 + "sete %1\n"
6434 : "=m" (v->counter), "=qm" (c)
6435 : "m" (v->counter) : "memory");
6436 return c != 0;
6437 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
6438 {
6439 unsigned char c;
6440
6441 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
6442 + asm volatile(LOCK_PREFIX "incq %0\n"
6443 +
6444 +#ifdef CONFIG_PAX_REFCOUNT
6445 + "jno 0f\n"
6446 + LOCK_PREFIX "decq %0\n"
6447 + "int $4\n0:\n"
6448 + _ASM_EXTABLE(0b, 0b)
6449 +#endif
6450 +
6451 + "sete %1\n"
6452 : "=m" (v->counter), "=qm" (c)
6453 : "m" (v->counter) : "memory");
6454 return c != 0;
6455 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
6456 {
6457 unsigned char c;
6458
6459 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6460 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
6461 +
6462 +#ifdef CONFIG_PAX_REFCOUNT
6463 + "jno 0f\n"
6464 + LOCK_PREFIX "subq %2,%0\n"
6465 + "int $4\n0:\n"
6466 + _ASM_EXTABLE(0b, 0b)
6467 +#endif
6468 +
6469 + "sets %1\n"
6470 : "=m" (v->counter), "=qm" (c)
6471 : "er" (i), "m" (v->counter) : "memory");
6472 return c;
6473 @@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
6474 static inline long atomic64_add_return(long i, atomic64_t *v)
6475 {
6476 long __i = i;
6477 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6478 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6479 +
6480 +#ifdef CONFIG_PAX_REFCOUNT
6481 + "jno 0f\n"
6482 + "movq %0, %1\n"
6483 + "int $4\n0:\n"
6484 + _ASM_EXTABLE(0b, 0b)
6485 +#endif
6486 +
6487 + : "+r" (i), "+m" (v->counter)
6488 + : : "memory");
6489 + return i + __i;
6490 +}
6491 +
6492 +/**
6493 + * atomic64_add_return_unchecked - add and return
6494 + * @i: integer value to add
6495 + * @v: pointer to type atomic64_unchecked_t
6496 + *
6497 + * Atomically adds @i to @v and returns @i + @v
6498 + */
6499 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6500 +{
6501 + long __i = i;
6502 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
6503 : "+r" (i), "+m" (v->counter)
6504 : : "memory");
6505 return i + __i;
6506 @@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
6507 }
6508
6509 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6510 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6511 +{
6512 + return atomic64_add_return_unchecked(1, v);
6513 +}
6514 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6515
6516 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6517 @@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
6518 return cmpxchg(&v->counter, old, new);
6519 }
6520
6521 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
6522 +{
6523 + return cmpxchg(&v->counter, old, new);
6524 +}
6525 +
6526 static inline long atomic64_xchg(atomic64_t *v, long new)
6527 {
6528 return xchg(&v->counter, new);
6529 @@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
6530 */
6531 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6532 {
6533 - long c, old;
6534 + long c, old, new;
6535 c = atomic64_read(v);
6536 for (;;) {
6537 - if (unlikely(c == (u)))
6538 + if (unlikely(c == u))
6539 break;
6540 - old = atomic64_cmpxchg((v), c, c + (a));
6541 +
6542 + asm volatile("add %2,%0\n"
6543 +
6544 +#ifdef CONFIG_PAX_REFCOUNT
6545 + "jno 0f\n"
6546 + "sub %2,%0\n"
6547 + "int $4\n0:\n"
6548 + _ASM_EXTABLE(0b, 0b)
6549 +#endif
6550 +
6551 + : "=r" (new)
6552 + : "0" (c), "ir" (a));
6553 +
6554 + old = atomic64_cmpxchg(v, c, new);
6555 if (likely(old == c))
6556 break;
6557 c = old;
6558 }
6559 - return c != (u);
6560 + return c != u;
6561 }
6562
6563 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6564 diff -urNp linux-3.0.7/arch/x86/include/asm/atomic.h linux-3.0.7/arch/x86/include/asm/atomic.h
6565 --- linux-3.0.7/arch/x86/include/asm/atomic.h 2011-07-21 22:17:23.000000000 -0400
6566 +++ linux-3.0.7/arch/x86/include/asm/atomic.h 2011-08-23 21:47:55.000000000 -0400
6567 @@ -22,7 +22,18 @@
6568 */
6569 static inline int atomic_read(const atomic_t *v)
6570 {
6571 - return (*(volatile int *)&(v)->counter);
6572 + return (*(volatile const int *)&(v)->counter);
6573 +}
6574 +
6575 +/**
6576 + * atomic_read_unchecked - read atomic variable
6577 + * @v: pointer of type atomic_unchecked_t
6578 + *
6579 + * Atomically reads the value of @v.
6580 + */
6581 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6582 +{
6583 + return (*(volatile const int *)&(v)->counter);
6584 }
6585
6586 /**
6587 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6588 }
6589
6590 /**
6591 + * atomic_set_unchecked - set atomic variable
6592 + * @v: pointer of type atomic_unchecked_t
6593 + * @i: required value
6594 + *
6595 + * Atomically sets the value of @v to @i.
6596 + */
6597 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6598 +{
6599 + v->counter = i;
6600 +}
6601 +
6602 +/**
6603 * atomic_add - add integer to atomic variable
6604 * @i: integer value to add
6605 * @v: pointer of type atomic_t
6606 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6607 */
6608 static inline void atomic_add(int i, atomic_t *v)
6609 {
6610 - asm volatile(LOCK_PREFIX "addl %1,%0"
6611 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6612 +
6613 +#ifdef CONFIG_PAX_REFCOUNT
6614 + "jno 0f\n"
6615 + LOCK_PREFIX "subl %1,%0\n"
6616 + "int $4\n0:\n"
6617 + _ASM_EXTABLE(0b, 0b)
6618 +#endif
6619 +
6620 + : "+m" (v->counter)
6621 + : "ir" (i));
6622 +}
6623 +
6624 +/**
6625 + * atomic_add_unchecked - add integer to atomic variable
6626 + * @i: integer value to add
6627 + * @v: pointer of type atomic_unchecked_t
6628 + *
6629 + * Atomically adds @i to @v.
6630 + */
6631 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6632 +{
6633 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6634 : "+m" (v->counter)
6635 : "ir" (i));
6636 }
6637 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6638 */
6639 static inline void atomic_sub(int i, atomic_t *v)
6640 {
6641 - asm volatile(LOCK_PREFIX "subl %1,%0"
6642 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6643 +
6644 +#ifdef CONFIG_PAX_REFCOUNT
6645 + "jno 0f\n"
6646 + LOCK_PREFIX "addl %1,%0\n"
6647 + "int $4\n0:\n"
6648 + _ASM_EXTABLE(0b, 0b)
6649 +#endif
6650 +
6651 + : "+m" (v->counter)
6652 + : "ir" (i));
6653 +}
6654 +
6655 +/**
6656 + * atomic_sub_unchecked - subtract integer from atomic variable
6657 + * @i: integer value to subtract
6658 + * @v: pointer of type atomic_unchecked_t
6659 + *
6660 + * Atomically subtracts @i from @v.
6661 + */
6662 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6663 +{
6664 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6665 : "+m" (v->counter)
6666 : "ir" (i));
6667 }
6668 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6669 {
6670 unsigned char c;
6671
6672 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6673 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
6674 +
6675 +#ifdef CONFIG_PAX_REFCOUNT
6676 + "jno 0f\n"
6677 + LOCK_PREFIX "addl %2,%0\n"
6678 + "int $4\n0:\n"
6679 + _ASM_EXTABLE(0b, 0b)
6680 +#endif
6681 +
6682 + "sete %1\n"
6683 : "+m" (v->counter), "=qm" (c)
6684 : "ir" (i) : "memory");
6685 return c;
6686 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6687 */
6688 static inline void atomic_inc(atomic_t *v)
6689 {
6690 - asm volatile(LOCK_PREFIX "incl %0"
6691 + asm volatile(LOCK_PREFIX "incl %0\n"
6692 +
6693 +#ifdef CONFIG_PAX_REFCOUNT
6694 + "jno 0f\n"
6695 + LOCK_PREFIX "decl %0\n"
6696 + "int $4\n0:\n"
6697 + _ASM_EXTABLE(0b, 0b)
6698 +#endif
6699 +
6700 + : "+m" (v->counter));
6701 +}
6702 +
6703 +/**
6704 + * atomic_inc_unchecked - increment atomic variable
6705 + * @v: pointer of type atomic_unchecked_t
6706 + *
6707 + * Atomically increments @v by 1.
6708 + */
6709 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6710 +{
6711 + asm volatile(LOCK_PREFIX "incl %0\n"
6712 : "+m" (v->counter));
6713 }
6714
6715 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6716 */
6717 static inline void atomic_dec(atomic_t *v)
6718 {
6719 - asm volatile(LOCK_PREFIX "decl %0"
6720 + asm volatile(LOCK_PREFIX "decl %0\n"
6721 +
6722 +#ifdef CONFIG_PAX_REFCOUNT
6723 + "jno 0f\n"
6724 + LOCK_PREFIX "incl %0\n"
6725 + "int $4\n0:\n"
6726 + _ASM_EXTABLE(0b, 0b)
6727 +#endif
6728 +
6729 + : "+m" (v->counter));
6730 +}
6731 +
6732 +/**
6733 + * atomic_dec_unchecked - decrement atomic variable
6734 + * @v: pointer of type atomic_unchecked_t
6735 + *
6736 + * Atomically decrements @v by 1.
6737 + */
6738 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6739 +{
6740 + asm volatile(LOCK_PREFIX "decl %0\n"
6741 : "+m" (v->counter));
6742 }
6743
6744 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6745 {
6746 unsigned char c;
6747
6748 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
6749 + asm volatile(LOCK_PREFIX "decl %0\n"
6750 +
6751 +#ifdef CONFIG_PAX_REFCOUNT
6752 + "jno 0f\n"
6753 + LOCK_PREFIX "incl %0\n"
6754 + "int $4\n0:\n"
6755 + _ASM_EXTABLE(0b, 0b)
6756 +#endif
6757 +
6758 + "sete %1\n"
6759 : "+m" (v->counter), "=qm" (c)
6760 : : "memory");
6761 return c != 0;
6762 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
6763 {
6764 unsigned char c;
6765
6766 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
6767 + asm volatile(LOCK_PREFIX "incl %0\n"
6768 +
6769 +#ifdef CONFIG_PAX_REFCOUNT
6770 + "jno 0f\n"
6771 + LOCK_PREFIX "decl %0\n"
6772 + "int $4\n0:\n"
6773 + _ASM_EXTABLE(0b, 0b)
6774 +#endif
6775 +
6776 + "sete %1\n"
6777 + : "+m" (v->counter), "=qm" (c)
6778 + : : "memory");
6779 + return c != 0;
6780 +}
6781 +
6782 +/**
6783 + * atomic_inc_and_test_unchecked - increment and test
6784 + * @v: pointer of type atomic_unchecked_t
6785 + *
6786 + * Atomically increments @v by 1
6787 + * and returns true if the result is zero, or false for all
6788 + * other cases.
6789 + */
6790 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6791 +{
6792 + unsigned char c;
6793 +
6794 + asm volatile(LOCK_PREFIX "incl %0\n"
6795 + "sete %1\n"
6796 : "+m" (v->counter), "=qm" (c)
6797 : : "memory");
6798 return c != 0;
6799 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
6800 {
6801 unsigned char c;
6802
6803 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6804 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
6805 +
6806 +#ifdef CONFIG_PAX_REFCOUNT
6807 + "jno 0f\n"
6808 + LOCK_PREFIX "subl %2,%0\n"
6809 + "int $4\n0:\n"
6810 + _ASM_EXTABLE(0b, 0b)
6811 +#endif
6812 +
6813 + "sets %1\n"
6814 : "+m" (v->counter), "=qm" (c)
6815 : "ir" (i) : "memory");
6816 return c;
6817 @@ -180,6 +342,46 @@ static inline int atomic_add_return(int
6818 #endif
6819 /* Modern 486+ processor */
6820 __i = i;
6821 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6822 +
6823 +#ifdef CONFIG_PAX_REFCOUNT
6824 + "jno 0f\n"
6825 + "movl %0, %1\n"
6826 + "int $4\n0:\n"
6827 + _ASM_EXTABLE(0b, 0b)
6828 +#endif
6829 +
6830 + : "+r" (i), "+m" (v->counter)
6831 + : : "memory");
6832 + return i + __i;
6833 +
6834 +#ifdef CONFIG_M386
6835 +no_xadd: /* Legacy 386 processor */
6836 + local_irq_save(flags);
6837 + __i = atomic_read(v);
6838 + atomic_set(v, i + __i);
6839 + local_irq_restore(flags);
6840 + return i + __i;
6841 +#endif
6842 +}
6843 +
6844 +/**
6845 + * atomic_add_return_unchecked - add integer and return
6846 + * @v: pointer of type atomic_unchecked_t
6847 + * @i: integer value to add
6848 + *
6849 + * Atomically adds @i to @v and returns @i + @v
6850 + */
6851 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6852 +{
6853 + int __i;
6854 +#ifdef CONFIG_M386
6855 + unsigned long flags;
6856 + if (unlikely(boot_cpu_data.x86 <= 3))
6857 + goto no_xadd;
6858 +#endif
6859 + /* Modern 486+ processor */
6860 + __i = i;
6861 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6862 : "+r" (i), "+m" (v->counter)
6863 : : "memory");
6864 @@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
6865 }
6866
6867 #define atomic_inc_return(v) (atomic_add_return(1, v))
6868 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6869 +{
6870 + return atomic_add_return_unchecked(1, v);
6871 +}
6872 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6873
6874 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6875 @@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
6876 return cmpxchg(&v->counter, old, new);
6877 }
6878
6879 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6880 +{
6881 + return cmpxchg(&v->counter, old, new);
6882 +}
6883 +
6884 static inline int atomic_xchg(atomic_t *v, int new)
6885 {
6886 return xchg(&v->counter, new);
6887 }
6888
6889 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6890 +{
6891 + return xchg(&v->counter, new);
6892 +}
6893 +
6894 /**
6895 * atomic_add_unless - add unless the number is already a given value
6896 * @v: pointer of type atomic_t
6897 @@ -231,21 +447,77 @@ static inline int atomic_xchg(atomic_t *
6898 */
6899 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6900 {
6901 - int c, old;
6902 + int c, old, new;
6903 c = atomic_read(v);
6904 for (;;) {
6905 - if (unlikely(c == (u)))
6906 + if (unlikely(c == u))
6907 break;
6908 - old = atomic_cmpxchg((v), c, c + (a));
6909 +
6910 + asm volatile("addl %2,%0\n"
6911 +
6912 +#ifdef CONFIG_PAX_REFCOUNT
6913 + "jno 0f\n"
6914 + "subl %2,%0\n"
6915 + "int $4\n0:\n"
6916 + _ASM_EXTABLE(0b, 0b)
6917 +#endif
6918 +
6919 + : "=r" (new)
6920 + : "0" (c), "ir" (a));
6921 +
6922 + old = atomic_cmpxchg(v, c, new);
6923 if (likely(old == c))
6924 break;
6925 c = old;
6926 }
6927 - return c != (u);
6928 + return c != u;
6929 }
6930
6931 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6932
6933 +/**
6934 + * atomic_inc_not_zero_hint - increment if not null
6935 + * @v: pointer of type atomic_t
6936 + * @hint: probable value of the atomic before the increment
6937 + *
6938 + * This version of atomic_inc_not_zero() gives a hint of probable
6939 + * value of the atomic. This helps processor to not read the memory
6940 + * before doing the atomic read/modify/write cycle, lowering
6941 + * number of bus transactions on some arches.
6942 + *
6943 + * Returns: 0 if increment was not done, 1 otherwise.
6944 + */
6945 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
6946 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
6947 +{
6948 + int val, c = hint, new;
6949 +
6950 + /* sanity test, should be removed by compiler if hint is a constant */
6951 + if (!hint)
6952 + return atomic_inc_not_zero(v);
6953 +
6954 + do {
6955 + asm volatile("incl %0\n"
6956 +
6957 +#ifdef CONFIG_PAX_REFCOUNT
6958 + "jno 0f\n"
6959 + "decl %0\n"
6960 + "int $4\n0:\n"
6961 + _ASM_EXTABLE(0b, 0b)
6962 +#endif
6963 +
6964 + : "=r" (new)
6965 + : "0" (c));
6966 +
6967 + val = atomic_cmpxchg(v, c, new);
6968 + if (val == c)
6969 + return 1;
6970 + c = val;
6971 + } while (c);
6972 +
6973 + return 0;
6974 +}
6975 +
6976 /*
6977 * atomic_dec_if_positive - decrement by 1 if old value positive
6978 * @v: pointer of type atomic_t
6979 diff -urNp linux-3.0.7/arch/x86/include/asm/bitops.h linux-3.0.7/arch/x86/include/asm/bitops.h
6980 --- linux-3.0.7/arch/x86/include/asm/bitops.h 2011-07-21 22:17:23.000000000 -0400
6981 +++ linux-3.0.7/arch/x86/include/asm/bitops.h 2011-08-23 21:47:55.000000000 -0400
6982 @@ -38,7 +38,7 @@
6983 * a mask operation on a byte.
6984 */
6985 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
6986 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
6987 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
6988 #define CONST_MASK(nr) (1 << ((nr) & 7))
6989
6990 /**
6991 diff -urNp linux-3.0.7/arch/x86/include/asm/boot.h linux-3.0.7/arch/x86/include/asm/boot.h
6992 --- linux-3.0.7/arch/x86/include/asm/boot.h 2011-07-21 22:17:23.000000000 -0400
6993 +++ linux-3.0.7/arch/x86/include/asm/boot.h 2011-08-23 21:47:55.000000000 -0400
6994 @@ -11,10 +11,15 @@
6995 #include <asm/pgtable_types.h>
6996
6997 /* Physical address where kernel should be loaded. */
6998 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6999 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
7000 + (CONFIG_PHYSICAL_ALIGN - 1)) \
7001 & ~(CONFIG_PHYSICAL_ALIGN - 1))
7002
7003 +#ifndef __ASSEMBLY__
7004 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
7005 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
7006 +#endif
7007 +
7008 /* Minimum kernel alignment, as a power of two */
7009 #ifdef CONFIG_X86_64
7010 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
7011 diff -urNp linux-3.0.7/arch/x86/include/asm/cacheflush.h linux-3.0.7/arch/x86/include/asm/cacheflush.h
7012 --- linux-3.0.7/arch/x86/include/asm/cacheflush.h 2011-07-21 22:17:23.000000000 -0400
7013 +++ linux-3.0.7/arch/x86/include/asm/cacheflush.h 2011-08-23 21:47:55.000000000 -0400
7014 @@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
7015 unsigned long pg_flags = pg->flags & _PGMT_MASK;
7016
7017 if (pg_flags == _PGMT_DEFAULT)
7018 - return -1;
7019 + return ~0UL;
7020 else if (pg_flags == _PGMT_WC)
7021 return _PAGE_CACHE_WC;
7022 else if (pg_flags == _PGMT_UC_MINUS)
7023 diff -urNp linux-3.0.7/arch/x86/include/asm/cache.h linux-3.0.7/arch/x86/include/asm/cache.h
7024 --- linux-3.0.7/arch/x86/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
7025 +++ linux-3.0.7/arch/x86/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
7026 @@ -5,12 +5,13 @@
7027
7028 /* L1 cache line size */
7029 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7030 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7031 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7032
7033 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7034 +#define __read_only __attribute__((__section__(".data..read_only")))
7035
7036 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
7037 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
7038 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
7039
7040 #ifdef CONFIG_X86_VSMP
7041 #ifdef CONFIG_SMP
7042 diff -urNp linux-3.0.7/arch/x86/include/asm/checksum_32.h linux-3.0.7/arch/x86/include/asm/checksum_32.h
7043 --- linux-3.0.7/arch/x86/include/asm/checksum_32.h 2011-07-21 22:17:23.000000000 -0400
7044 +++ linux-3.0.7/arch/x86/include/asm/checksum_32.h 2011-08-23 21:47:55.000000000 -0400
7045 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
7046 int len, __wsum sum,
7047 int *src_err_ptr, int *dst_err_ptr);
7048
7049 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
7050 + int len, __wsum sum,
7051 + int *src_err_ptr, int *dst_err_ptr);
7052 +
7053 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
7054 + int len, __wsum sum,
7055 + int *src_err_ptr, int *dst_err_ptr);
7056 +
7057 /*
7058 * Note: when you get a NULL pointer exception here this means someone
7059 * passed in an incorrect kernel address to one of these functions.
7060 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
7061 int *err_ptr)
7062 {
7063 might_sleep();
7064 - return csum_partial_copy_generic((__force void *)src, dst,
7065 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
7066 len, sum, err_ptr, NULL);
7067 }
7068
7069 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
7070 {
7071 might_sleep();
7072 if (access_ok(VERIFY_WRITE, dst, len))
7073 - return csum_partial_copy_generic(src, (__force void *)dst,
7074 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
7075 len, sum, NULL, err_ptr);
7076
7077 if (len)
7078 diff -urNp linux-3.0.7/arch/x86/include/asm/cpufeature.h linux-3.0.7/arch/x86/include/asm/cpufeature.h
7079 --- linux-3.0.7/arch/x86/include/asm/cpufeature.h 2011-07-21 22:17:23.000000000 -0400
7080 +++ linux-3.0.7/arch/x86/include/asm/cpufeature.h 2011-08-23 21:47:55.000000000 -0400
7081 @@ -358,7 +358,7 @@ static __always_inline __pure bool __sta
7082 ".section .discard,\"aw\",@progbits\n"
7083 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
7084 ".previous\n"
7085 - ".section .altinstr_replacement,\"ax\"\n"
7086 + ".section .altinstr_replacement,\"a\"\n"
7087 "3: movb $1,%0\n"
7088 "4:\n"
7089 ".previous\n"
7090 diff -urNp linux-3.0.7/arch/x86/include/asm/desc_defs.h linux-3.0.7/arch/x86/include/asm/desc_defs.h
7091 --- linux-3.0.7/arch/x86/include/asm/desc_defs.h 2011-07-21 22:17:23.000000000 -0400
7092 +++ linux-3.0.7/arch/x86/include/asm/desc_defs.h 2011-08-23 21:47:55.000000000 -0400
7093 @@ -31,6 +31,12 @@ struct desc_struct {
7094 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
7095 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
7096 };
7097 + struct {
7098 + u16 offset_low;
7099 + u16 seg;
7100 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
7101 + unsigned offset_high: 16;
7102 + } gate;
7103 };
7104 } __attribute__((packed));
7105
7106 diff -urNp linux-3.0.7/arch/x86/include/asm/desc.h linux-3.0.7/arch/x86/include/asm/desc.h
7107 --- linux-3.0.7/arch/x86/include/asm/desc.h 2011-07-21 22:17:23.000000000 -0400
7108 +++ linux-3.0.7/arch/x86/include/asm/desc.h 2011-08-23 21:47:55.000000000 -0400
7109 @@ -4,6 +4,7 @@
7110 #include <asm/desc_defs.h>
7111 #include <asm/ldt.h>
7112 #include <asm/mmu.h>
7113 +#include <asm/pgtable.h>
7114
7115 #include <linux/smp.h>
7116
7117 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_
7118
7119 desc->type = (info->read_exec_only ^ 1) << 1;
7120 desc->type |= info->contents << 2;
7121 + desc->type |= info->seg_not_present ^ 1;
7122
7123 desc->s = 1;
7124 desc->dpl = 0x3;
7125 @@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_
7126 }
7127
7128 extern struct desc_ptr idt_descr;
7129 -extern gate_desc idt_table[];
7130 -
7131 -struct gdt_page {
7132 - struct desc_struct gdt[GDT_ENTRIES];
7133 -} __attribute__((aligned(PAGE_SIZE)));
7134 -
7135 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
7136 +extern gate_desc idt_table[256];
7137
7138 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
7139 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
7140 {
7141 - return per_cpu(gdt_page, cpu).gdt;
7142 + return cpu_gdt_table[cpu];
7143 }
7144
7145 #ifdef CONFIG_X86_64
7146 @@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *
7147 unsigned long base, unsigned dpl, unsigned flags,
7148 unsigned short seg)
7149 {
7150 - gate->a = (seg << 16) | (base & 0xffff);
7151 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
7152 + gate->gate.offset_low = base;
7153 + gate->gate.seg = seg;
7154 + gate->gate.reserved = 0;
7155 + gate->gate.type = type;
7156 + gate->gate.s = 0;
7157 + gate->gate.dpl = dpl;
7158 + gate->gate.p = 1;
7159 + gate->gate.offset_high = base >> 16;
7160 }
7161
7162 #endif
7163 @@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(str
7164
7165 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
7166 {
7167 + pax_open_kernel();
7168 memcpy(&idt[entry], gate, sizeof(*gate));
7169 + pax_close_kernel();
7170 }
7171
7172 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
7173 {
7174 + pax_open_kernel();
7175 memcpy(&ldt[entry], desc, 8);
7176 + pax_close_kernel();
7177 }
7178
7179 static inline void
7180 @@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struc
7181 default: size = sizeof(*gdt); break;
7182 }
7183
7184 + pax_open_kernel();
7185 memcpy(&gdt[entry], desc, size);
7186 + pax_close_kernel();
7187 }
7188
7189 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
7190 @@ -207,7 +216,9 @@ static inline void native_set_ldt(const
7191
7192 static inline void native_load_tr_desc(void)
7193 {
7194 + pax_open_kernel();
7195 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
7196 + pax_close_kernel();
7197 }
7198
7199 static inline void native_load_gdt(const struct desc_ptr *dtr)
7200 @@ -244,8 +255,10 @@ static inline void native_load_tls(struc
7201 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
7202 unsigned int i;
7203
7204 + pax_open_kernel();
7205 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
7206 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
7207 + pax_close_kernel();
7208 }
7209
7210 #define _LDT_empty(info) \
7211 @@ -307,7 +320,7 @@ static inline void set_desc_limit(struct
7212 desc->limit = (limit >> 16) & 0xf;
7213 }
7214
7215 -static inline void _set_gate(int gate, unsigned type, void *addr,
7216 +static inline void _set_gate(int gate, unsigned type, const void *addr,
7217 unsigned dpl, unsigned ist, unsigned seg)
7218 {
7219 gate_desc s;
7220 @@ -326,7 +339,7 @@ static inline void _set_gate(int gate, u
7221 * Pentium F0 0F bugfix can have resulted in the mapped
7222 * IDT being write-protected.
7223 */
7224 -static inline void set_intr_gate(unsigned int n, void *addr)
7225 +static inline void set_intr_gate(unsigned int n, const void *addr)
7226 {
7227 BUG_ON((unsigned)n > 0xFF);
7228 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7229 @@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsig
7230 /*
7231 * This routine sets up an interrupt gate at directory privilege level 3.
7232 */
7233 -static inline void set_system_intr_gate(unsigned int n, void *addr)
7234 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
7235 {
7236 BUG_ON((unsigned)n > 0xFF);
7237 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7238 }
7239
7240 -static inline void set_system_trap_gate(unsigned int n, void *addr)
7241 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
7242 {
7243 BUG_ON((unsigned)n > 0xFF);
7244 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7245 }
7246
7247 -static inline void set_trap_gate(unsigned int n, void *addr)
7248 +static inline void set_trap_gate(unsigned int n, const void *addr)
7249 {
7250 BUG_ON((unsigned)n > 0xFF);
7251 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7252 @@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigne
7253 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7254 {
7255 BUG_ON((unsigned)n > 0xFF);
7256 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7257 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7258 }
7259
7260 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7261 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7262 {
7263 BUG_ON((unsigned)n > 0xFF);
7264 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7265 }
7266
7267 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7268 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7269 {
7270 BUG_ON((unsigned)n > 0xFF);
7271 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7272 }
7273
7274 +#ifdef CONFIG_X86_32
7275 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7276 +{
7277 + struct desc_struct d;
7278 +
7279 + if (likely(limit))
7280 + limit = (limit - 1UL) >> PAGE_SHIFT;
7281 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
7282 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7283 +}
7284 +#endif
7285 +
7286 #endif /* _ASM_X86_DESC_H */
7287 diff -urNp linux-3.0.7/arch/x86/include/asm/e820.h linux-3.0.7/arch/x86/include/asm/e820.h
7288 --- linux-3.0.7/arch/x86/include/asm/e820.h 2011-07-21 22:17:23.000000000 -0400
7289 +++ linux-3.0.7/arch/x86/include/asm/e820.h 2011-08-23 21:47:55.000000000 -0400
7290 @@ -69,7 +69,7 @@ struct e820map {
7291 #define ISA_START_ADDRESS 0xa0000
7292 #define ISA_END_ADDRESS 0x100000
7293
7294 -#define BIOS_BEGIN 0x000a0000
7295 +#define BIOS_BEGIN 0x000c0000
7296 #define BIOS_END 0x00100000
7297
7298 #define BIOS_ROM_BASE 0xffe00000
7299 diff -urNp linux-3.0.7/arch/x86/include/asm/elf.h linux-3.0.7/arch/x86/include/asm/elf.h
7300 --- linux-3.0.7/arch/x86/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
7301 +++ linux-3.0.7/arch/x86/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
7302 @@ -237,7 +237,25 @@ extern int force_personality32;
7303 the loader. We need to make sure that it is out of the way of the program
7304 that it will "exec", and that there is sufficient room for the brk. */
7305
7306 +#ifdef CONFIG_PAX_SEGMEXEC
7307 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7308 +#else
7309 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7310 +#endif
7311 +
7312 +#ifdef CONFIG_PAX_ASLR
7313 +#ifdef CONFIG_X86_32
7314 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7315 +
7316 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7317 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7318 +#else
7319 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
7320 +
7321 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7322 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7323 +#endif
7324 +#endif
7325
7326 /* This yields a mask that user programs can use to figure out what
7327 instruction set this CPU supports. This could be done in user space,
7328 @@ -290,9 +308,7 @@ do { \
7329
7330 #define ARCH_DLINFO \
7331 do { \
7332 - if (vdso_enabled) \
7333 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7334 - (unsigned long)current->mm->context.vdso); \
7335 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
7336 } while (0)
7337
7338 #define AT_SYSINFO 32
7339 @@ -303,7 +319,7 @@ do { \
7340
7341 #endif /* !CONFIG_X86_32 */
7342
7343 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7344 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7345
7346 #define VDSO_ENTRY \
7347 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7348 @@ -317,7 +333,4 @@ extern int arch_setup_additional_pages(s
7349 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7350 #define compat_arch_setup_additional_pages syscall32_setup_pages
7351
7352 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7353 -#define arch_randomize_brk arch_randomize_brk
7354 -
7355 #endif /* _ASM_X86_ELF_H */
7356 diff -urNp linux-3.0.7/arch/x86/include/asm/emergency-restart.h linux-3.0.7/arch/x86/include/asm/emergency-restart.h
7357 --- linux-3.0.7/arch/x86/include/asm/emergency-restart.h 2011-07-21 22:17:23.000000000 -0400
7358 +++ linux-3.0.7/arch/x86/include/asm/emergency-restart.h 2011-08-23 21:47:55.000000000 -0400
7359 @@ -15,6 +15,6 @@ enum reboot_type {
7360
7361 extern enum reboot_type reboot_type;
7362
7363 -extern void machine_emergency_restart(void);
7364 +extern void machine_emergency_restart(void) __noreturn;
7365
7366 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7367 diff -urNp linux-3.0.7/arch/x86/include/asm/futex.h linux-3.0.7/arch/x86/include/asm/futex.h
7368 --- linux-3.0.7/arch/x86/include/asm/futex.h 2011-07-21 22:17:23.000000000 -0400
7369 +++ linux-3.0.7/arch/x86/include/asm/futex.h 2011-10-06 04:17:55.000000000 -0400
7370 @@ -12,16 +12,18 @@
7371 #include <asm/system.h>
7372
7373 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7374 + typecheck(u32 __user *, uaddr); \
7375 asm volatile("1:\t" insn "\n" \
7376 "2:\t.section .fixup,\"ax\"\n" \
7377 "3:\tmov\t%3, %1\n" \
7378 "\tjmp\t2b\n" \
7379 "\t.previous\n" \
7380 _ASM_EXTABLE(1b, 3b) \
7381 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7382 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
7383 : "i" (-EFAULT), "0" (oparg), "1" (0))
7384
7385 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7386 + typecheck(u32 __user *, uaddr); \
7387 asm volatile("1:\tmovl %2, %0\n" \
7388 "\tmovl\t%0, %3\n" \
7389 "\t" insn "\n" \
7390 @@ -34,7 +36,7 @@
7391 _ASM_EXTABLE(1b, 4b) \
7392 _ASM_EXTABLE(2b, 4b) \
7393 : "=&a" (oldval), "=&r" (ret), \
7394 - "+m" (*uaddr), "=&r" (tem) \
7395 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
7396 : "r" (oparg), "i" (-EFAULT), "1" (0))
7397
7398 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7399 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7400
7401 switch (op) {
7402 case FUTEX_OP_SET:
7403 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7404 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7405 break;
7406 case FUTEX_OP_ADD:
7407 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7408 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7409 uaddr, oparg);
7410 break;
7411 case FUTEX_OP_OR:
7412 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7413 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7414 return -EFAULT;
7415
7416 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7417 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7418 "2:\t.section .fixup, \"ax\"\n"
7419 "3:\tmov %3, %0\n"
7420 "\tjmp 2b\n"
7421 "\t.previous\n"
7422 _ASM_EXTABLE(1b, 3b)
7423 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7424 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
7425 : "i" (-EFAULT), "r" (newval), "1" (oldval)
7426 : "memory"
7427 );
7428 diff -urNp linux-3.0.7/arch/x86/include/asm/hw_irq.h linux-3.0.7/arch/x86/include/asm/hw_irq.h
7429 --- linux-3.0.7/arch/x86/include/asm/hw_irq.h 2011-07-21 22:17:23.000000000 -0400
7430 +++ linux-3.0.7/arch/x86/include/asm/hw_irq.h 2011-08-23 21:47:55.000000000 -0400
7431 @@ -137,8 +137,8 @@ extern void setup_ioapic_dest(void);
7432 extern void enable_IO_APIC(void);
7433
7434 /* Statistics */
7435 -extern atomic_t irq_err_count;
7436 -extern atomic_t irq_mis_count;
7437 +extern atomic_unchecked_t irq_err_count;
7438 +extern atomic_unchecked_t irq_mis_count;
7439
7440 /* EISA */
7441 extern void eisa_set_level_irq(unsigned int irq);
7442 diff -urNp linux-3.0.7/arch/x86/include/asm/i387.h linux-3.0.7/arch/x86/include/asm/i387.h
7443 --- linux-3.0.7/arch/x86/include/asm/i387.h 2011-07-21 22:17:23.000000000 -0400
7444 +++ linux-3.0.7/arch/x86/include/asm/i387.h 2011-10-06 04:17:55.000000000 -0400
7445 @@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7446 {
7447 int err;
7448
7449 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7450 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7451 + fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
7452 +#endif
7453 +
7454 /* See comment in fxsave() below. */
7455 #ifdef CONFIG_AS_FXSAVEQ
7456 asm volatile("1: fxrstorq %[fx]\n\t"
7457 @@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7458 {
7459 int err;
7460
7461 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7462 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7463 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7464 +#endif
7465 +
7466 /*
7467 * Clear the bytes not touched by the fxsave and reserved
7468 * for the SW usage.
7469 @@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7470 #endif /* CONFIG_X86_64 */
7471
7472 /* We need a safe address that is cheap to find and that is already
7473 - in L1 during context switch. The best choices are unfortunately
7474 - different for UP and SMP */
7475 -#ifdef CONFIG_SMP
7476 -#define safe_address (__per_cpu_offset[0])
7477 -#else
7478 -#define safe_address (kstat_cpu(0).cpustat.user)
7479 -#endif
7480 + in L1 during context switch. */
7481 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7482
7483 /*
7484 * These must be called with preempt disabled
7485 @@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7486 struct thread_info *me = current_thread_info();
7487 preempt_disable();
7488 if (me->status & TS_USEDFPU)
7489 - __save_init_fpu(me->task);
7490 + __save_init_fpu(current);
7491 else
7492 clts();
7493 }
7494 diff -urNp linux-3.0.7/arch/x86/include/asm/io.h linux-3.0.7/arch/x86/include/asm/io.h
7495 --- linux-3.0.7/arch/x86/include/asm/io.h 2011-07-21 22:17:23.000000000 -0400
7496 +++ linux-3.0.7/arch/x86/include/asm/io.h 2011-08-23 21:47:55.000000000 -0400
7497 @@ -196,6 +196,17 @@ extern void set_iounmap_nonlazy(void);
7498
7499 #include <linux/vmalloc.h>
7500
7501 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7502 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7503 +{
7504 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7505 +}
7506 +
7507 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7508 +{
7509 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7510 +}
7511 +
7512 /*
7513 * Convert a virtual cached pointer to an uncached pointer
7514 */
7515 diff -urNp linux-3.0.7/arch/x86/include/asm/irqflags.h linux-3.0.7/arch/x86/include/asm/irqflags.h
7516 --- linux-3.0.7/arch/x86/include/asm/irqflags.h 2011-07-21 22:17:23.000000000 -0400
7517 +++ linux-3.0.7/arch/x86/include/asm/irqflags.h 2011-08-23 21:47:55.000000000 -0400
7518 @@ -140,6 +140,11 @@ static inline unsigned long arch_local_i
7519 sti; \
7520 sysexit
7521
7522 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
7523 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7524 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
7525 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7526 +
7527 #else
7528 #define INTERRUPT_RETURN iret
7529 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7530 diff -urNp linux-3.0.7/arch/x86/include/asm/kprobes.h linux-3.0.7/arch/x86/include/asm/kprobes.h
7531 --- linux-3.0.7/arch/x86/include/asm/kprobes.h 2011-07-21 22:17:23.000000000 -0400
7532 +++ linux-3.0.7/arch/x86/include/asm/kprobes.h 2011-08-23 21:47:55.000000000 -0400
7533 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
7534 #define RELATIVEJUMP_SIZE 5
7535 #define RELATIVECALL_OPCODE 0xe8
7536 #define RELATIVE_ADDR_SIZE 4
7537 -#define MAX_STACK_SIZE 64
7538 -#define MIN_STACK_SIZE(ADDR) \
7539 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
7540 - THREAD_SIZE - (unsigned long)(ADDR))) \
7541 - ? (MAX_STACK_SIZE) \
7542 - : (((unsigned long)current_thread_info()) + \
7543 - THREAD_SIZE - (unsigned long)(ADDR)))
7544 +#define MAX_STACK_SIZE 64UL
7545 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
7546
7547 #define flush_insn_slot(p) do { } while (0)
7548
7549 diff -urNp linux-3.0.7/arch/x86/include/asm/kvm_host.h linux-3.0.7/arch/x86/include/asm/kvm_host.h
7550 --- linux-3.0.7/arch/x86/include/asm/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
7551 +++ linux-3.0.7/arch/x86/include/asm/kvm_host.h 2011-08-26 19:49:56.000000000 -0400
7552 @@ -441,7 +441,7 @@ struct kvm_arch {
7553 unsigned int n_used_mmu_pages;
7554 unsigned int n_requested_mmu_pages;
7555 unsigned int n_max_mmu_pages;
7556 - atomic_t invlpg_counter;
7557 + atomic_unchecked_t invlpg_counter;
7558 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
7559 /*
7560 * Hash table of struct kvm_mmu_page.
7561 @@ -619,7 +619,7 @@ struct kvm_x86_ops {
7562 enum x86_intercept_stage stage);
7563
7564 const struct trace_print_flags *exit_reasons_str;
7565 -};
7566 +} __do_const;
7567
7568 struct kvm_arch_async_pf {
7569 u32 token;
7570 diff -urNp linux-3.0.7/arch/x86/include/asm/local.h linux-3.0.7/arch/x86/include/asm/local.h
7571 --- linux-3.0.7/arch/x86/include/asm/local.h 2011-07-21 22:17:23.000000000 -0400
7572 +++ linux-3.0.7/arch/x86/include/asm/local.h 2011-08-23 21:47:55.000000000 -0400
7573 @@ -18,26 +18,58 @@ typedef struct {
7574
7575 static inline void local_inc(local_t *l)
7576 {
7577 - asm volatile(_ASM_INC "%0"
7578 + asm volatile(_ASM_INC "%0\n"
7579 +
7580 +#ifdef CONFIG_PAX_REFCOUNT
7581 + "jno 0f\n"
7582 + _ASM_DEC "%0\n"
7583 + "int $4\n0:\n"
7584 + _ASM_EXTABLE(0b, 0b)
7585 +#endif
7586 +
7587 : "+m" (l->a.counter));
7588 }
7589
7590 static inline void local_dec(local_t *l)
7591 {
7592 - asm volatile(_ASM_DEC "%0"
7593 + asm volatile(_ASM_DEC "%0\n"
7594 +
7595 +#ifdef CONFIG_PAX_REFCOUNT
7596 + "jno 0f\n"
7597 + _ASM_INC "%0\n"
7598 + "int $4\n0:\n"
7599 + _ASM_EXTABLE(0b, 0b)
7600 +#endif
7601 +
7602 : "+m" (l->a.counter));
7603 }
7604
7605 static inline void local_add(long i, local_t *l)
7606 {
7607 - asm volatile(_ASM_ADD "%1,%0"
7608 + asm volatile(_ASM_ADD "%1,%0\n"
7609 +
7610 +#ifdef CONFIG_PAX_REFCOUNT
7611 + "jno 0f\n"
7612 + _ASM_SUB "%1,%0\n"
7613 + "int $4\n0:\n"
7614 + _ASM_EXTABLE(0b, 0b)
7615 +#endif
7616 +
7617 : "+m" (l->a.counter)
7618 : "ir" (i));
7619 }
7620
7621 static inline void local_sub(long i, local_t *l)
7622 {
7623 - asm volatile(_ASM_SUB "%1,%0"
7624 + asm volatile(_ASM_SUB "%1,%0\n"
7625 +
7626 +#ifdef CONFIG_PAX_REFCOUNT
7627 + "jno 0f\n"
7628 + _ASM_ADD "%1,%0\n"
7629 + "int $4\n0:\n"
7630 + _ASM_EXTABLE(0b, 0b)
7631 +#endif
7632 +
7633 : "+m" (l->a.counter)
7634 : "ir" (i));
7635 }
7636 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
7637 {
7638 unsigned char c;
7639
7640 - asm volatile(_ASM_SUB "%2,%0; sete %1"
7641 + asm volatile(_ASM_SUB "%2,%0\n"
7642 +
7643 +#ifdef CONFIG_PAX_REFCOUNT
7644 + "jno 0f\n"
7645 + _ASM_ADD "%2,%0\n"
7646 + "int $4\n0:\n"
7647 + _ASM_EXTABLE(0b, 0b)
7648 +#endif
7649 +
7650 + "sete %1\n"
7651 : "+m" (l->a.counter), "=qm" (c)
7652 : "ir" (i) : "memory");
7653 return c;
7654 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
7655 {
7656 unsigned char c;
7657
7658 - asm volatile(_ASM_DEC "%0; sete %1"
7659 + asm volatile(_ASM_DEC "%0\n"
7660 +
7661 +#ifdef CONFIG_PAX_REFCOUNT
7662 + "jno 0f\n"
7663 + _ASM_INC "%0\n"
7664 + "int $4\n0:\n"
7665 + _ASM_EXTABLE(0b, 0b)
7666 +#endif
7667 +
7668 + "sete %1\n"
7669 : "+m" (l->a.counter), "=qm" (c)
7670 : : "memory");
7671 return c != 0;
7672 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
7673 {
7674 unsigned char c;
7675
7676 - asm volatile(_ASM_INC "%0; sete %1"
7677 + asm volatile(_ASM_INC "%0\n"
7678 +
7679 +#ifdef CONFIG_PAX_REFCOUNT
7680 + "jno 0f\n"
7681 + _ASM_DEC "%0\n"
7682 + "int $4\n0:\n"
7683 + _ASM_EXTABLE(0b, 0b)
7684 +#endif
7685 +
7686 + "sete %1\n"
7687 : "+m" (l->a.counter), "=qm" (c)
7688 : : "memory");
7689 return c != 0;
7690 @@ -110,7 +169,16 @@ static inline int local_add_negative(lon
7691 {
7692 unsigned char c;
7693
7694 - asm volatile(_ASM_ADD "%2,%0; sets %1"
7695 + asm volatile(_ASM_ADD "%2,%0\n"
7696 +
7697 +#ifdef CONFIG_PAX_REFCOUNT
7698 + "jno 0f\n"
7699 + _ASM_SUB "%2,%0\n"
7700 + "int $4\n0:\n"
7701 + _ASM_EXTABLE(0b, 0b)
7702 +#endif
7703 +
7704 + "sets %1\n"
7705 : "+m" (l->a.counter), "=qm" (c)
7706 : "ir" (i) : "memory");
7707 return c;
7708 @@ -133,7 +201,15 @@ static inline long local_add_return(long
7709 #endif
7710 /* Modern 486+ processor */
7711 __i = i;
7712 - asm volatile(_ASM_XADD "%0, %1;"
7713 + asm volatile(_ASM_XADD "%0, %1\n"
7714 +
7715 +#ifdef CONFIG_PAX_REFCOUNT
7716 + "jno 0f\n"
7717 + _ASM_MOV "%0,%1\n"
7718 + "int $4\n0:\n"
7719 + _ASM_EXTABLE(0b, 0b)
7720 +#endif
7721 +
7722 : "+r" (i), "+m" (l->a.counter)
7723 : : "memory");
7724 return i + __i;
7725 diff -urNp linux-3.0.7/arch/x86/include/asm/mman.h linux-3.0.7/arch/x86/include/asm/mman.h
7726 --- linux-3.0.7/arch/x86/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
7727 +++ linux-3.0.7/arch/x86/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
7728 @@ -5,4 +5,14 @@
7729
7730 #include <asm-generic/mman.h>
7731
7732 +#ifdef __KERNEL__
7733 +#ifndef __ASSEMBLY__
7734 +#ifdef CONFIG_X86_32
7735 +#define arch_mmap_check i386_mmap_check
7736 +int i386_mmap_check(unsigned long addr, unsigned long len,
7737 + unsigned long flags);
7738 +#endif
7739 +#endif
7740 +#endif
7741 +
7742 #endif /* _ASM_X86_MMAN_H */
7743 diff -urNp linux-3.0.7/arch/x86/include/asm/mmu_context.h linux-3.0.7/arch/x86/include/asm/mmu_context.h
7744 --- linux-3.0.7/arch/x86/include/asm/mmu_context.h 2011-07-21 22:17:23.000000000 -0400
7745 +++ linux-3.0.7/arch/x86/include/asm/mmu_context.h 2011-08-23 21:48:14.000000000 -0400
7746 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
7747
7748 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7749 {
7750 +
7751 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7752 + unsigned int i;
7753 + pgd_t *pgd;
7754 +
7755 + pax_open_kernel();
7756 + pgd = get_cpu_pgd(smp_processor_id());
7757 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7758 + set_pgd_batched(pgd+i, native_make_pgd(0));
7759 + pax_close_kernel();
7760 +#endif
7761 +
7762 #ifdef CONFIG_SMP
7763 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7764 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7765 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
7766 struct task_struct *tsk)
7767 {
7768 unsigned cpu = smp_processor_id();
7769 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7770 + int tlbstate = TLBSTATE_OK;
7771 +#endif
7772
7773 if (likely(prev != next)) {
7774 #ifdef CONFIG_SMP
7775 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7776 + tlbstate = percpu_read(cpu_tlbstate.state);
7777 +#endif
7778 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7779 percpu_write(cpu_tlbstate.active_mm, next);
7780 #endif
7781 cpumask_set_cpu(cpu, mm_cpumask(next));
7782
7783 /* Re-load page tables */
7784 +#ifdef CONFIG_PAX_PER_CPU_PGD
7785 + pax_open_kernel();
7786 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7787 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7788 + pax_close_kernel();
7789 + load_cr3(get_cpu_pgd(cpu));
7790 +#else
7791 load_cr3(next->pgd);
7792 +#endif
7793
7794 /* stop flush ipis for the previous mm */
7795 cpumask_clear_cpu(cpu, mm_cpumask(prev));
7796 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
7797 */
7798 if (unlikely(prev->context.ldt != next->context.ldt))
7799 load_LDT_nolock(&next->context);
7800 - }
7801 +
7802 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7803 + if (!(__supported_pte_mask & _PAGE_NX)) {
7804 + smp_mb__before_clear_bit();
7805 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7806 + smp_mb__after_clear_bit();
7807 + cpu_set(cpu, next->context.cpu_user_cs_mask);
7808 + }
7809 +#endif
7810 +
7811 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7812 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7813 + prev->context.user_cs_limit != next->context.user_cs_limit))
7814 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7815 #ifdef CONFIG_SMP
7816 + else if (unlikely(tlbstate != TLBSTATE_OK))
7817 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7818 +#endif
7819 +#endif
7820 +
7821 + }
7822 else {
7823 +
7824 +#ifdef CONFIG_PAX_PER_CPU_PGD
7825 + pax_open_kernel();
7826 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7827 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7828 + pax_close_kernel();
7829 + load_cr3(get_cpu_pgd(cpu));
7830 +#endif
7831 +
7832 +#ifdef CONFIG_SMP
7833 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7834 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7835
7836 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
7837 * tlb flush IPI delivery. We must reload CR3
7838 * to make sure to use no freed page tables.
7839 */
7840 +
7841 +#ifndef CONFIG_PAX_PER_CPU_PGD
7842 load_cr3(next->pgd);
7843 +#endif
7844 +
7845 load_LDT_nolock(&next->context);
7846 +
7847 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7848 + if (!(__supported_pte_mask & _PAGE_NX))
7849 + cpu_set(cpu, next->context.cpu_user_cs_mask);
7850 +#endif
7851 +
7852 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7853 +#ifdef CONFIG_PAX_PAGEEXEC
7854 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7855 +#endif
7856 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7857 +#endif
7858 +
7859 }
7860 - }
7861 #endif
7862 + }
7863 }
7864
7865 #define activate_mm(prev, next) \
7866 diff -urNp linux-3.0.7/arch/x86/include/asm/mmu.h linux-3.0.7/arch/x86/include/asm/mmu.h
7867 --- linux-3.0.7/arch/x86/include/asm/mmu.h 2011-07-21 22:17:23.000000000 -0400
7868 +++ linux-3.0.7/arch/x86/include/asm/mmu.h 2011-08-23 21:47:55.000000000 -0400
7869 @@ -9,7 +9,7 @@
7870 * we put the segment information here.
7871 */
7872 typedef struct {
7873 - void *ldt;
7874 + struct desc_struct *ldt;
7875 int size;
7876
7877 #ifdef CONFIG_X86_64
7878 @@ -18,7 +18,19 @@ typedef struct {
7879 #endif
7880
7881 struct mutex lock;
7882 - void *vdso;
7883 + unsigned long vdso;
7884 +
7885 +#ifdef CONFIG_X86_32
7886 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7887 + unsigned long user_cs_base;
7888 + unsigned long user_cs_limit;
7889 +
7890 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7891 + cpumask_t cpu_user_cs_mask;
7892 +#endif
7893 +
7894 +#endif
7895 +#endif
7896 } mm_context_t;
7897
7898 #ifdef CONFIG_SMP
7899 diff -urNp linux-3.0.7/arch/x86/include/asm/module.h linux-3.0.7/arch/x86/include/asm/module.h
7900 --- linux-3.0.7/arch/x86/include/asm/module.h 2011-07-21 22:17:23.000000000 -0400
7901 +++ linux-3.0.7/arch/x86/include/asm/module.h 2011-10-07 19:24:31.000000000 -0400
7902 @@ -5,6 +5,7 @@
7903
7904 #ifdef CONFIG_X86_64
7905 /* X86_64 does not define MODULE_PROC_FAMILY */
7906 +#define MODULE_PROC_FAMILY ""
7907 #elif defined CONFIG_M386
7908 #define MODULE_PROC_FAMILY "386 "
7909 #elif defined CONFIG_M486
7910 @@ -59,8 +60,18 @@
7911 #error unknown processor family
7912 #endif
7913
7914 -#ifdef CONFIG_X86_32
7915 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
7916 +#ifdef CONFIG_PAX_KERNEXEC
7917 +#define MODULE_PAX_KERNEXEC "KERNEXEC "
7918 +#else
7919 +#define MODULE_PAX_KERNEXEC ""
7920 #endif
7921
7922 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7923 +#define MODULE_PAX_UDEREF "UDEREF "
7924 +#else
7925 +#define MODULE_PAX_UDEREF ""
7926 +#endif
7927 +
7928 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
7929 +
7930 #endif /* _ASM_X86_MODULE_H */
7931 diff -urNp linux-3.0.7/arch/x86/include/asm/page_64_types.h linux-3.0.7/arch/x86/include/asm/page_64_types.h
7932 --- linux-3.0.7/arch/x86/include/asm/page_64_types.h 2011-07-21 22:17:23.000000000 -0400
7933 +++ linux-3.0.7/arch/x86/include/asm/page_64_types.h 2011-08-23 21:47:55.000000000 -0400
7934 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
7935
7936 /* duplicated to the one in bootmem.h */
7937 extern unsigned long max_pfn;
7938 -extern unsigned long phys_base;
7939 +extern const unsigned long phys_base;
7940
7941 extern unsigned long __phys_addr(unsigned long);
7942 #define __phys_reloc_hide(x) (x)
7943 diff -urNp linux-3.0.7/arch/x86/include/asm/paravirt.h linux-3.0.7/arch/x86/include/asm/paravirt.h
7944 --- linux-3.0.7/arch/x86/include/asm/paravirt.h 2011-07-21 22:17:23.000000000 -0400
7945 +++ linux-3.0.7/arch/x86/include/asm/paravirt.h 2011-08-23 21:47:55.000000000 -0400
7946 @@ -658,6 +658,18 @@ static inline void set_pgd(pgd_t *pgdp,
7947 val);
7948 }
7949
7950 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
7951 +{
7952 + pgdval_t val = native_pgd_val(pgd);
7953 +
7954 + if (sizeof(pgdval_t) > sizeof(long))
7955 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
7956 + val, (u64)val >> 32);
7957 + else
7958 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
7959 + val);
7960 +}
7961 +
7962 static inline void pgd_clear(pgd_t *pgdp)
7963 {
7964 set_pgd(pgdp, __pgd(0));
7965 @@ -739,6 +751,21 @@ static inline void __set_fixmap(unsigned
7966 pv_mmu_ops.set_fixmap(idx, phys, flags);
7967 }
7968
7969 +#ifdef CONFIG_PAX_KERNEXEC
7970 +static inline unsigned long pax_open_kernel(void)
7971 +{
7972 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
7973 +}
7974 +
7975 +static inline unsigned long pax_close_kernel(void)
7976 +{
7977 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
7978 +}
7979 +#else
7980 +static inline unsigned long pax_open_kernel(void) { return 0; }
7981 +static inline unsigned long pax_close_kernel(void) { return 0; }
7982 +#endif
7983 +
7984 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
7985
7986 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
7987 @@ -955,7 +982,7 @@ extern void default_banner(void);
7988
7989 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
7990 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
7991 -#define PARA_INDIRECT(addr) *%cs:addr
7992 +#define PARA_INDIRECT(addr) *%ss:addr
7993 #endif
7994
7995 #define INTERRUPT_RETURN \
7996 @@ -1032,6 +1059,21 @@ extern void default_banner(void);
7997 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
7998 CLBR_NONE, \
7999 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
8000 +
8001 +#define GET_CR0_INTO_RDI \
8002 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
8003 + mov %rax,%rdi
8004 +
8005 +#define SET_RDI_INTO_CR0 \
8006 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
8007 +
8008 +#define GET_CR3_INTO_RDI \
8009 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
8010 + mov %rax,%rdi
8011 +
8012 +#define SET_RDI_INTO_CR3 \
8013 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
8014 +
8015 #endif /* CONFIG_X86_32 */
8016
8017 #endif /* __ASSEMBLY__ */
8018 diff -urNp linux-3.0.7/arch/x86/include/asm/paravirt_types.h linux-3.0.7/arch/x86/include/asm/paravirt_types.h
8019 --- linux-3.0.7/arch/x86/include/asm/paravirt_types.h 2011-07-21 22:17:23.000000000 -0400
8020 +++ linux-3.0.7/arch/x86/include/asm/paravirt_types.h 2011-08-23 21:47:55.000000000 -0400
8021 @@ -78,19 +78,19 @@ struct pv_init_ops {
8022 */
8023 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
8024 unsigned long addr, unsigned len);
8025 -};
8026 +} __no_const;
8027
8028
8029 struct pv_lazy_ops {
8030 /* Set deferred update mode, used for batching operations. */
8031 void (*enter)(void);
8032 void (*leave)(void);
8033 -};
8034 +} __no_const;
8035
8036 struct pv_time_ops {
8037 unsigned long long (*sched_clock)(void);
8038 unsigned long (*get_tsc_khz)(void);
8039 -};
8040 +} __no_const;
8041
8042 struct pv_cpu_ops {
8043 /* hooks for various privileged instructions */
8044 @@ -186,7 +186,7 @@ struct pv_cpu_ops {
8045
8046 void (*start_context_switch)(struct task_struct *prev);
8047 void (*end_context_switch)(struct task_struct *next);
8048 -};
8049 +} __no_const;
8050
8051 struct pv_irq_ops {
8052 /*
8053 @@ -217,7 +217,7 @@ struct pv_apic_ops {
8054 unsigned long start_eip,
8055 unsigned long start_esp);
8056 #endif
8057 -};
8058 +} __no_const;
8059
8060 struct pv_mmu_ops {
8061 unsigned long (*read_cr2)(void);
8062 @@ -306,6 +306,7 @@ struct pv_mmu_ops {
8063 struct paravirt_callee_save make_pud;
8064
8065 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
8066 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
8067 #endif /* PAGETABLE_LEVELS == 4 */
8068 #endif /* PAGETABLE_LEVELS >= 3 */
8069
8070 @@ -317,6 +318,12 @@ struct pv_mmu_ops {
8071 an mfn. We can tell which is which from the index. */
8072 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
8073 phys_addr_t phys, pgprot_t flags);
8074 +
8075 +#ifdef CONFIG_PAX_KERNEXEC
8076 + unsigned long (*pax_open_kernel)(void);
8077 + unsigned long (*pax_close_kernel)(void);
8078 +#endif
8079 +
8080 };
8081
8082 struct arch_spinlock;
8083 @@ -327,7 +334,7 @@ struct pv_lock_ops {
8084 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
8085 int (*spin_trylock)(struct arch_spinlock *lock);
8086 void (*spin_unlock)(struct arch_spinlock *lock);
8087 -};
8088 +} __no_const;
8089
8090 /* This contains all the paravirt structures: we get a convenient
8091 * number for each function using the offset which we use to indicate
8092 diff -urNp linux-3.0.7/arch/x86/include/asm/pgalloc.h linux-3.0.7/arch/x86/include/asm/pgalloc.h
8093 --- linux-3.0.7/arch/x86/include/asm/pgalloc.h 2011-07-21 22:17:23.000000000 -0400
8094 +++ linux-3.0.7/arch/x86/include/asm/pgalloc.h 2011-08-23 21:47:55.000000000 -0400
8095 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
8096 pmd_t *pmd, pte_t *pte)
8097 {
8098 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8099 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
8100 +}
8101 +
8102 +static inline void pmd_populate_user(struct mm_struct *mm,
8103 + pmd_t *pmd, pte_t *pte)
8104 +{
8105 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8106 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
8107 }
8108
8109 diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable-2level.h linux-3.0.7/arch/x86/include/asm/pgtable-2level.h
8110 --- linux-3.0.7/arch/x86/include/asm/pgtable-2level.h 2011-07-21 22:17:23.000000000 -0400
8111 +++ linux-3.0.7/arch/x86/include/asm/pgtable-2level.h 2011-08-23 21:47:55.000000000 -0400
8112 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
8113
8114 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8115 {
8116 + pax_open_kernel();
8117 *pmdp = pmd;
8118 + pax_close_kernel();
8119 }
8120
8121 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
8122 diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable_32.h linux-3.0.7/arch/x86/include/asm/pgtable_32.h
8123 --- linux-3.0.7/arch/x86/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
8124 +++ linux-3.0.7/arch/x86/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
8125 @@ -25,9 +25,6 @@
8126 struct mm_struct;
8127 struct vm_area_struct;
8128
8129 -extern pgd_t swapper_pg_dir[1024];
8130 -extern pgd_t initial_page_table[1024];
8131 -
8132 static inline void pgtable_cache_init(void) { }
8133 static inline void check_pgt_cache(void) { }
8134 void paging_init(void);
8135 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
8136 # include <asm/pgtable-2level.h>
8137 #endif
8138
8139 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
8140 +extern pgd_t initial_page_table[PTRS_PER_PGD];
8141 +#ifdef CONFIG_X86_PAE
8142 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
8143 +#endif
8144 +
8145 #if defined(CONFIG_HIGHPTE)
8146 #define pte_offset_map(dir, address) \
8147 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
8148 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
8149 /* Clear a kernel PTE and flush it from the TLB */
8150 #define kpte_clear_flush(ptep, vaddr) \
8151 do { \
8152 + pax_open_kernel(); \
8153 pte_clear(&init_mm, (vaddr), (ptep)); \
8154 + pax_close_kernel(); \
8155 __flush_tlb_one((vaddr)); \
8156 } while (0)
8157
8158 @@ -74,6 +79,9 @@ do { \
8159
8160 #endif /* !__ASSEMBLY__ */
8161
8162 +#define HAVE_ARCH_UNMAPPED_AREA
8163 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
8164 +
8165 /*
8166 * kern_addr_valid() is (1) for FLATMEM and (0) for
8167 * SPARSEMEM and DISCONTIGMEM
8168 diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable_32_types.h linux-3.0.7/arch/x86/include/asm/pgtable_32_types.h
8169 --- linux-3.0.7/arch/x86/include/asm/pgtable_32_types.h 2011-07-21 22:17:23.000000000 -0400
8170 +++ linux-3.0.7/arch/x86/include/asm/pgtable_32_types.h 2011-08-23 21:47:55.000000000 -0400
8171 @@ -8,7 +8,7 @@
8172 */
8173 #ifdef CONFIG_X86_PAE
8174 # include <asm/pgtable-3level_types.h>
8175 -# define PMD_SIZE (1UL << PMD_SHIFT)
8176 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
8177 # define PMD_MASK (~(PMD_SIZE - 1))
8178 #else
8179 # include <asm/pgtable-2level_types.h>
8180 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
8181 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
8182 #endif
8183
8184 +#ifdef CONFIG_PAX_KERNEXEC
8185 +#ifndef __ASSEMBLY__
8186 +extern unsigned char MODULES_EXEC_VADDR[];
8187 +extern unsigned char MODULES_EXEC_END[];
8188 +#endif
8189 +#include <asm/boot.h>
8190 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
8191 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
8192 +#else
8193 +#define ktla_ktva(addr) (addr)
8194 +#define ktva_ktla(addr) (addr)
8195 +#endif
8196 +
8197 #define MODULES_VADDR VMALLOC_START
8198 #define MODULES_END VMALLOC_END
8199 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
8200 diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable-3level.h linux-3.0.7/arch/x86/include/asm/pgtable-3level.h
8201 --- linux-3.0.7/arch/x86/include/asm/pgtable-3level.h 2011-07-21 22:17:23.000000000 -0400
8202 +++ linux-3.0.7/arch/x86/include/asm/pgtable-3level.h 2011-08-23 21:47:55.000000000 -0400
8203 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
8204
8205 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8206 {
8207 + pax_open_kernel();
8208 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8209 + pax_close_kernel();
8210 }
8211
8212 static inline void native_set_pud(pud_t *pudp, pud_t pud)
8213 {
8214 + pax_open_kernel();
8215 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8216 + pax_close_kernel();
8217 }
8218
8219 /*
8220 diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable_64.h linux-3.0.7/arch/x86/include/asm/pgtable_64.h
8221 --- linux-3.0.7/arch/x86/include/asm/pgtable_64.h 2011-07-21 22:17:23.000000000 -0400
8222 +++ linux-3.0.7/arch/x86/include/asm/pgtable_64.h 2011-08-23 21:47:55.000000000 -0400
8223 @@ -16,10 +16,13 @@
8224
8225 extern pud_t level3_kernel_pgt[512];
8226 extern pud_t level3_ident_pgt[512];
8227 +extern pud_t level3_vmalloc_pgt[512];
8228 +extern pud_t level3_vmemmap_pgt[512];
8229 +extern pud_t level2_vmemmap_pgt[512];
8230 extern pmd_t level2_kernel_pgt[512];
8231 extern pmd_t level2_fixmap_pgt[512];
8232 -extern pmd_t level2_ident_pgt[512];
8233 -extern pgd_t init_level4_pgt[];
8234 +extern pmd_t level2_ident_pgt[512*2];
8235 +extern pgd_t init_level4_pgt[512];
8236
8237 #define swapper_pg_dir init_level4_pgt
8238
8239 @@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8240
8241 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8242 {
8243 + pax_open_kernel();
8244 *pmdp = pmd;
8245 + pax_close_kernel();
8246 }
8247
8248 static inline void native_pmd_clear(pmd_t *pmd)
8249 @@ -107,6 +112,13 @@ static inline void native_pud_clear(pud_
8250
8251 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8252 {
8253 + pax_open_kernel();
8254 + *pgdp = pgd;
8255 + pax_close_kernel();
8256 +}
8257 +
8258 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
8259 +{
8260 *pgdp = pgd;
8261 }
8262
8263 diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable_64_types.h linux-3.0.7/arch/x86/include/asm/pgtable_64_types.h
8264 --- linux-3.0.7/arch/x86/include/asm/pgtable_64_types.h 2011-07-21 22:17:23.000000000 -0400
8265 +++ linux-3.0.7/arch/x86/include/asm/pgtable_64_types.h 2011-08-23 21:47:55.000000000 -0400
8266 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8267 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8268 #define MODULES_END _AC(0xffffffffff000000, UL)
8269 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8270 +#define MODULES_EXEC_VADDR MODULES_VADDR
8271 +#define MODULES_EXEC_END MODULES_END
8272 +
8273 +#define ktla_ktva(addr) (addr)
8274 +#define ktva_ktla(addr) (addr)
8275
8276 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8277 diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable.h linux-3.0.7/arch/x86/include/asm/pgtable.h
8278 --- linux-3.0.7/arch/x86/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
8279 +++ linux-3.0.7/arch/x86/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
8280 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm
8281
8282 #ifndef __PAGETABLE_PUD_FOLDED
8283 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
8284 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
8285 #define pgd_clear(pgd) native_pgd_clear(pgd)
8286 #endif
8287
8288 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm
8289
8290 #define arch_end_context_switch(prev) do {} while(0)
8291
8292 +#define pax_open_kernel() native_pax_open_kernel()
8293 +#define pax_close_kernel() native_pax_close_kernel()
8294 #endif /* CONFIG_PARAVIRT */
8295
8296 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
8297 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8298 +
8299 +#ifdef CONFIG_PAX_KERNEXEC
8300 +static inline unsigned long native_pax_open_kernel(void)
8301 +{
8302 + unsigned long cr0;
8303 +
8304 + preempt_disable();
8305 + barrier();
8306 + cr0 = read_cr0() ^ X86_CR0_WP;
8307 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
8308 + write_cr0(cr0);
8309 + return cr0 ^ X86_CR0_WP;
8310 +}
8311 +
8312 +static inline unsigned long native_pax_close_kernel(void)
8313 +{
8314 + unsigned long cr0;
8315 +
8316 + cr0 = read_cr0() ^ X86_CR0_WP;
8317 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8318 + write_cr0(cr0);
8319 + barrier();
8320 + preempt_enable_no_resched();
8321 + return cr0 ^ X86_CR0_WP;
8322 +}
8323 +#else
8324 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
8325 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
8326 +#endif
8327 +
8328 /*
8329 * The following only work if pte_present() is true.
8330 * Undefined behaviour if not..
8331 */
8332 +static inline int pte_user(pte_t pte)
8333 +{
8334 + return pte_val(pte) & _PAGE_USER;
8335 +}
8336 +
8337 static inline int pte_dirty(pte_t pte)
8338 {
8339 return pte_flags(pte) & _PAGE_DIRTY;
8340 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t
8341 return pte_clear_flags(pte, _PAGE_RW);
8342 }
8343
8344 +static inline pte_t pte_mkread(pte_t pte)
8345 +{
8346 + return __pte(pte_val(pte) | _PAGE_USER);
8347 +}
8348 +
8349 static inline pte_t pte_mkexec(pte_t pte)
8350 {
8351 - return pte_clear_flags(pte, _PAGE_NX);
8352 +#ifdef CONFIG_X86_PAE
8353 + if (__supported_pte_mask & _PAGE_NX)
8354 + return pte_clear_flags(pte, _PAGE_NX);
8355 + else
8356 +#endif
8357 + return pte_set_flags(pte, _PAGE_USER);
8358 +}
8359 +
8360 +static inline pte_t pte_exprotect(pte_t pte)
8361 +{
8362 +#ifdef CONFIG_X86_PAE
8363 + if (__supported_pte_mask & _PAGE_NX)
8364 + return pte_set_flags(pte, _PAGE_NX);
8365 + else
8366 +#endif
8367 + return pte_clear_flags(pte, _PAGE_USER);
8368 }
8369
8370 static inline pte_t pte_mkdirty(pte_t pte)
8371 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long
8372 #endif
8373
8374 #ifndef __ASSEMBLY__
8375 +
8376 +#ifdef CONFIG_PAX_PER_CPU_PGD
8377 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8378 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8379 +{
8380 + return cpu_pgd[cpu];
8381 +}
8382 +#endif
8383 +
8384 #include <linux/mm_types.h>
8385
8386 static inline int pte_none(pte_t pte)
8387 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *p
8388
8389 static inline int pgd_bad(pgd_t pgd)
8390 {
8391 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8392 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8393 }
8394
8395 static inline int pgd_none(pgd_t pgd)
8396 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
8397 * pgd_offset() returns a (pgd_t *)
8398 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8399 */
8400 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8401 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8402 +
8403 +#ifdef CONFIG_PAX_PER_CPU_PGD
8404 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8405 +#endif
8406 +
8407 /*
8408 * a shortcut which implies the use of the kernel's pgd, instead
8409 * of a process's
8410 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
8411 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8412 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8413
8414 +#ifdef CONFIG_X86_32
8415 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8416 +#else
8417 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8418 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8419 +
8420 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8421 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8422 +#else
8423 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8424 +#endif
8425 +
8426 +#endif
8427 +
8428 #ifndef __ASSEMBLY__
8429
8430 extern int direct_gbpages;
8431 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(st
8432 * dst and src can be on the same page, but the range must not overlap,
8433 * and must not cross a page boundary.
8434 */
8435 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8436 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8437 {
8438 - memcpy(dst, src, count * sizeof(pgd_t));
8439 + pax_open_kernel();
8440 + while (count--)
8441 + *dst++ = *src++;
8442 + pax_close_kernel();
8443 }
8444
8445 +#ifdef CONFIG_PAX_PER_CPU_PGD
8446 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8447 +#endif
8448 +
8449 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8450 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8451 +#else
8452 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8453 +#endif
8454
8455 #include <asm-generic/pgtable.h>
8456 #endif /* __ASSEMBLY__ */
8457 diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable_types.h linux-3.0.7/arch/x86/include/asm/pgtable_types.h
8458 --- linux-3.0.7/arch/x86/include/asm/pgtable_types.h 2011-07-21 22:17:23.000000000 -0400
8459 +++ linux-3.0.7/arch/x86/include/asm/pgtable_types.h 2011-08-23 21:47:55.000000000 -0400
8460 @@ -16,13 +16,12 @@
8461 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8462 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8463 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8464 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8465 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8466 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8467 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8468 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8469 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8470 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8471 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8472 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8473 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8474 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8475
8476 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8477 @@ -40,7 +39,6 @@
8478 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8479 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8480 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8481 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8482 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8483 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8484 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8485 @@ -57,8 +55,10 @@
8486
8487 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8488 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8489 -#else
8490 +#elif defined(CONFIG_KMEMCHECK)
8491 #define _PAGE_NX (_AT(pteval_t, 0))
8492 +#else
8493 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8494 #endif
8495
8496 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8497 @@ -96,6 +96,9 @@
8498 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8499 _PAGE_ACCESSED)
8500
8501 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
8502 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
8503 +
8504 #define __PAGE_KERNEL_EXEC \
8505 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8506 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8507 @@ -106,8 +109,8 @@
8508 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8509 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8510 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8511 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8512 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
8513 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8514 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
8515 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8516 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
8517 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
8518 @@ -166,8 +169,8 @@
8519 * bits are combined, this will alow user to access the high address mapped
8520 * VDSO in the presence of CONFIG_COMPAT_VDSO
8521 */
8522 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8523 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8524 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8525 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8526 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8527 #endif
8528
8529 @@ -205,7 +208,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8530 {
8531 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8532 }
8533 +#endif
8534
8535 +#if PAGETABLE_LEVELS == 3
8536 +#include <asm-generic/pgtable-nopud.h>
8537 +#endif
8538 +
8539 +#if PAGETABLE_LEVELS == 2
8540 +#include <asm-generic/pgtable-nopmd.h>
8541 +#endif
8542 +
8543 +#ifndef __ASSEMBLY__
8544 #if PAGETABLE_LEVELS > 3
8545 typedef struct { pudval_t pud; } pud_t;
8546
8547 @@ -219,8 +232,6 @@ static inline pudval_t native_pud_val(pu
8548 return pud.pud;
8549 }
8550 #else
8551 -#include <asm-generic/pgtable-nopud.h>
8552 -
8553 static inline pudval_t native_pud_val(pud_t pud)
8554 {
8555 return native_pgd_val(pud.pgd);
8556 @@ -240,8 +251,6 @@ static inline pmdval_t native_pmd_val(pm
8557 return pmd.pmd;
8558 }
8559 #else
8560 -#include <asm-generic/pgtable-nopmd.h>
8561 -
8562 static inline pmdval_t native_pmd_val(pmd_t pmd)
8563 {
8564 return native_pgd_val(pmd.pud.pgd);
8565 @@ -281,7 +290,6 @@ typedef struct page *pgtable_t;
8566
8567 extern pteval_t __supported_pte_mask;
8568 extern void set_nx(void);
8569 -extern int nx_enabled;
8570
8571 #define pgprot_writecombine pgprot_writecombine
8572 extern pgprot_t pgprot_writecombine(pgprot_t prot);
8573 diff -urNp linux-3.0.7/arch/x86/include/asm/processor.h linux-3.0.7/arch/x86/include/asm/processor.h
8574 --- linux-3.0.7/arch/x86/include/asm/processor.h 2011-07-21 22:17:23.000000000 -0400
8575 +++ linux-3.0.7/arch/x86/include/asm/processor.h 2011-08-23 21:47:55.000000000 -0400
8576 @@ -266,7 +266,7 @@ struct tss_struct {
8577
8578 } ____cacheline_aligned;
8579
8580 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8581 +extern struct tss_struct init_tss[NR_CPUS];
8582
8583 /*
8584 * Save the original ist values for checking stack pointers during debugging
8585 @@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(co
8586 */
8587 #define TASK_SIZE PAGE_OFFSET
8588 #define TASK_SIZE_MAX TASK_SIZE
8589 +
8590 +#ifdef CONFIG_PAX_SEGMEXEC
8591 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8592 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8593 +#else
8594 #define STACK_TOP TASK_SIZE
8595 -#define STACK_TOP_MAX STACK_TOP
8596 +#endif
8597 +
8598 +#define STACK_TOP_MAX TASK_SIZE
8599
8600 #define INIT_THREAD { \
8601 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
8602 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8603 .vm86_info = NULL, \
8604 .sysenter_cs = __KERNEL_CS, \
8605 .io_bitmap_ptr = NULL, \
8606 @@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(co
8607 */
8608 #define INIT_TSS { \
8609 .x86_tss = { \
8610 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
8611 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8612 .ss0 = __KERNEL_DS, \
8613 .ss1 = __KERNEL_CS, \
8614 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8615 @@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(co
8616 extern unsigned long thread_saved_pc(struct task_struct *tsk);
8617
8618 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8619 -#define KSTK_TOP(info) \
8620 -({ \
8621 - unsigned long *__ptr = (unsigned long *)(info); \
8622 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8623 -})
8624 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
8625
8626 /*
8627 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8628 @@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(str
8629 #define task_pt_regs(task) \
8630 ({ \
8631 struct pt_regs *__regs__; \
8632 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8633 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8634 __regs__ - 1; \
8635 })
8636
8637 @@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(str
8638 /*
8639 * User space process size. 47bits minus one guard page.
8640 */
8641 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8642 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8643
8644 /* This decides where the kernel will search for a free chunk of vm
8645 * space during mmap's.
8646 */
8647 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8648 - 0xc0000000 : 0xFFFFe000)
8649 + 0xc0000000 : 0xFFFFf000)
8650
8651 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8652 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8653 @@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(str
8654 #define STACK_TOP_MAX TASK_SIZE_MAX
8655
8656 #define INIT_THREAD { \
8657 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8658 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8659 }
8660
8661 #define INIT_TSS { \
8662 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8663 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8664 }
8665
8666 /*
8667 @@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs
8668 */
8669 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
8670
8671 +#ifdef CONFIG_PAX_SEGMEXEC
8672 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8673 +#endif
8674 +
8675 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
8676
8677 /* Get/set a process' ability to use the timestamp counter instruction */
8678 diff -urNp linux-3.0.7/arch/x86/include/asm/ptrace.h linux-3.0.7/arch/x86/include/asm/ptrace.h
8679 --- linux-3.0.7/arch/x86/include/asm/ptrace.h 2011-07-21 22:17:23.000000000 -0400
8680 +++ linux-3.0.7/arch/x86/include/asm/ptrace.h 2011-08-23 21:47:55.000000000 -0400
8681 @@ -153,28 +153,29 @@ static inline unsigned long regs_return_
8682 }
8683
8684 /*
8685 - * user_mode_vm(regs) determines whether a register set came from user mode.
8686 + * user_mode(regs) determines whether a register set came from user mode.
8687 * This is true if V8086 mode was enabled OR if the register set was from
8688 * protected mode with RPL-3 CS value. This tricky test checks that with
8689 * one comparison. Many places in the kernel can bypass this full check
8690 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8691 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8692 + * be used.
8693 */
8694 -static inline int user_mode(struct pt_regs *regs)
8695 +static inline int user_mode_novm(struct pt_regs *regs)
8696 {
8697 #ifdef CONFIG_X86_32
8698 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8699 #else
8700 - return !!(regs->cs & 3);
8701 + return !!(regs->cs & SEGMENT_RPL_MASK);
8702 #endif
8703 }
8704
8705 -static inline int user_mode_vm(struct pt_regs *regs)
8706 +static inline int user_mode(struct pt_regs *regs)
8707 {
8708 #ifdef CONFIG_X86_32
8709 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8710 USER_RPL;
8711 #else
8712 - return user_mode(regs);
8713 + return user_mode_novm(regs);
8714 #endif
8715 }
8716
8717 diff -urNp linux-3.0.7/arch/x86/include/asm/reboot.h linux-3.0.7/arch/x86/include/asm/reboot.h
8718 --- linux-3.0.7/arch/x86/include/asm/reboot.h 2011-07-21 22:17:23.000000000 -0400
8719 +++ linux-3.0.7/arch/x86/include/asm/reboot.h 2011-08-23 21:47:55.000000000 -0400
8720 @@ -6,19 +6,19 @@
8721 struct pt_regs;
8722
8723 struct machine_ops {
8724 - void (*restart)(char *cmd);
8725 - void (*halt)(void);
8726 - void (*power_off)(void);
8727 + void (* __noreturn restart)(char *cmd);
8728 + void (* __noreturn halt)(void);
8729 + void (* __noreturn power_off)(void);
8730 void (*shutdown)(void);
8731 void (*crash_shutdown)(struct pt_regs *);
8732 - void (*emergency_restart)(void);
8733 -};
8734 + void (* __noreturn emergency_restart)(void);
8735 +} __no_const;
8736
8737 extern struct machine_ops machine_ops;
8738
8739 void native_machine_crash_shutdown(struct pt_regs *regs);
8740 void native_machine_shutdown(void);
8741 -void machine_real_restart(unsigned int type);
8742 +void machine_real_restart(unsigned int type) __noreturn;
8743 /* These must match dispatch_table in reboot_32.S */
8744 #define MRR_BIOS 0
8745 #define MRR_APM 1
8746 diff -urNp linux-3.0.7/arch/x86/include/asm/rwsem.h linux-3.0.7/arch/x86/include/asm/rwsem.h
8747 --- linux-3.0.7/arch/x86/include/asm/rwsem.h 2011-07-21 22:17:23.000000000 -0400
8748 +++ linux-3.0.7/arch/x86/include/asm/rwsem.h 2011-08-23 21:47:55.000000000 -0400
8749 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw
8750 {
8751 asm volatile("# beginning down_read\n\t"
8752 LOCK_PREFIX _ASM_INC "(%1)\n\t"
8753 +
8754 +#ifdef CONFIG_PAX_REFCOUNT
8755 + "jno 0f\n"
8756 + LOCK_PREFIX _ASM_DEC "(%1)\n"
8757 + "int $4\n0:\n"
8758 + _ASM_EXTABLE(0b, 0b)
8759 +#endif
8760 +
8761 /* adds 0x00000001 */
8762 " jns 1f\n"
8763 " call call_rwsem_down_read_failed\n"
8764 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
8765 "1:\n\t"
8766 " mov %1,%2\n\t"
8767 " add %3,%2\n\t"
8768 +
8769 +#ifdef CONFIG_PAX_REFCOUNT
8770 + "jno 0f\n"
8771 + "sub %3,%2\n"
8772 + "int $4\n0:\n"
8773 + _ASM_EXTABLE(0b, 0b)
8774 +#endif
8775 +
8776 " jle 2f\n\t"
8777 LOCK_PREFIX " cmpxchg %2,%0\n\t"
8778 " jnz 1b\n\t"
8779 @@ -104,6 +120,14 @@ static inline void __down_write_nested(s
8780 long tmp;
8781 asm volatile("# beginning down_write\n\t"
8782 LOCK_PREFIX " xadd %1,(%2)\n\t"
8783 +
8784 +#ifdef CONFIG_PAX_REFCOUNT
8785 + "jno 0f\n"
8786 + "mov %1,(%2)\n"
8787 + "int $4\n0:\n"
8788 + _ASM_EXTABLE(0b, 0b)
8789 +#endif
8790 +
8791 /* adds 0xffff0001, returns the old value */
8792 " test %1,%1\n\t"
8793 /* was the count 0 before? */
8794 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
8795 long tmp;
8796 asm volatile("# beginning __up_read\n\t"
8797 LOCK_PREFIX " xadd %1,(%2)\n\t"
8798 +
8799 +#ifdef CONFIG_PAX_REFCOUNT
8800 + "jno 0f\n"
8801 + "mov %1,(%2)\n"
8802 + "int $4\n0:\n"
8803 + _ASM_EXTABLE(0b, 0b)
8804 +#endif
8805 +
8806 /* subtracts 1, returns the old value */
8807 " jns 1f\n\t"
8808 " call call_rwsem_wake\n" /* expects old value in %edx */
8809 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
8810 long tmp;
8811 asm volatile("# beginning __up_write\n\t"
8812 LOCK_PREFIX " xadd %1,(%2)\n\t"
8813 +
8814 +#ifdef CONFIG_PAX_REFCOUNT
8815 + "jno 0f\n"
8816 + "mov %1,(%2)\n"
8817 + "int $4\n0:\n"
8818 + _ASM_EXTABLE(0b, 0b)
8819 +#endif
8820 +
8821 /* subtracts 0xffff0001, returns the old value */
8822 " jns 1f\n\t"
8823 " call call_rwsem_wake\n" /* expects old value in %edx */
8824 @@ -176,6 +216,14 @@ static inline void __downgrade_write(str
8825 {
8826 asm volatile("# beginning __downgrade_write\n\t"
8827 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8828 +
8829 +#ifdef CONFIG_PAX_REFCOUNT
8830 + "jno 0f\n"
8831 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8832 + "int $4\n0:\n"
8833 + _ASM_EXTABLE(0b, 0b)
8834 +#endif
8835 +
8836 /*
8837 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8838 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8839 @@ -194,7 +242,15 @@ static inline void __downgrade_write(str
8840 */
8841 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8842 {
8843 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8844 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8845 +
8846 +#ifdef CONFIG_PAX_REFCOUNT
8847 + "jno 0f\n"
8848 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
8849 + "int $4\n0:\n"
8850 + _ASM_EXTABLE(0b, 0b)
8851 +#endif
8852 +
8853 : "+m" (sem->count)
8854 : "er" (delta));
8855 }
8856 @@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
8857 {
8858 long tmp = delta;
8859
8860 - asm volatile(LOCK_PREFIX "xadd %0,%1"
8861 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8862 +
8863 +#ifdef CONFIG_PAX_REFCOUNT
8864 + "jno 0f\n"
8865 + "mov %0,%1\n"
8866 + "int $4\n0:\n"
8867 + _ASM_EXTABLE(0b, 0b)
8868 +#endif
8869 +
8870 : "+r" (tmp), "+m" (sem->count)
8871 : : "memory");
8872
8873 diff -urNp linux-3.0.7/arch/x86/include/asm/segment.h linux-3.0.7/arch/x86/include/asm/segment.h
8874 --- linux-3.0.7/arch/x86/include/asm/segment.h 2011-07-21 22:17:23.000000000 -0400
8875 +++ linux-3.0.7/arch/x86/include/asm/segment.h 2011-09-17 00:53:42.000000000 -0400
8876 @@ -64,10 +64,15 @@
8877 * 26 - ESPFIX small SS
8878 * 27 - per-cpu [ offset to per-cpu data area ]
8879 * 28 - stack_canary-20 [ for stack protector ]
8880 - * 29 - unused
8881 - * 30 - unused
8882 + * 29 - PCI BIOS CS
8883 + * 30 - PCI BIOS DS
8884 * 31 - TSS for double fault handler
8885 */
8886 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
8887 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
8888 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
8889 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
8890 +
8891 #define GDT_ENTRY_TLS_MIN 6
8892 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
8893
8894 @@ -79,6 +84,8 @@
8895
8896 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
8897
8898 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
8899 +
8900 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
8901
8902 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
8903 @@ -104,6 +111,12 @@
8904 #define __KERNEL_STACK_CANARY 0
8905 #endif
8906
8907 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
8908 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
8909 +
8910 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
8911 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
8912 +
8913 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
8914
8915 /*
8916 @@ -141,7 +154,7 @@
8917 */
8918
8919 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
8920 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
8921 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
8922
8923
8924 #else
8925 @@ -165,6 +178,8 @@
8926 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
8927 #define __USER32_DS __USER_DS
8928
8929 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
8930 +
8931 #define GDT_ENTRY_TSS 8 /* needs two entries */
8932 #define GDT_ENTRY_LDT 10 /* needs two entries */
8933 #define GDT_ENTRY_TLS_MIN 12
8934 @@ -185,6 +200,7 @@
8935 #endif
8936
8937 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
8938 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
8939 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
8940 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
8941 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
8942 diff -urNp linux-3.0.7/arch/x86/include/asm/smp.h linux-3.0.7/arch/x86/include/asm/smp.h
8943 --- linux-3.0.7/arch/x86/include/asm/smp.h 2011-07-21 22:17:23.000000000 -0400
8944 +++ linux-3.0.7/arch/x86/include/asm/smp.h 2011-08-23 21:47:55.000000000 -0400
8945 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
8946 /* cpus sharing the last level cache: */
8947 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
8948 DECLARE_PER_CPU(u16, cpu_llc_id);
8949 -DECLARE_PER_CPU(int, cpu_number);
8950 +DECLARE_PER_CPU(unsigned int, cpu_number);
8951
8952 static inline struct cpumask *cpu_sibling_mask(int cpu)
8953 {
8954 @@ -77,7 +77,7 @@ struct smp_ops {
8955
8956 void (*send_call_func_ipi)(const struct cpumask *mask);
8957 void (*send_call_func_single_ipi)(int cpu);
8958 -};
8959 +} __no_const;
8960
8961 /* Globals due to paravirt */
8962 extern void set_cpu_sibling_map(int cpu);
8963 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
8964 extern int safe_smp_processor_id(void);
8965
8966 #elif defined(CONFIG_X86_64_SMP)
8967 -#define raw_smp_processor_id() (percpu_read(cpu_number))
8968 -
8969 -#define stack_smp_processor_id() \
8970 -({ \
8971 - struct thread_info *ti; \
8972 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
8973 - ti->cpu; \
8974 -})
8975 +#define raw_smp_processor_id() (percpu_read(cpu_number))
8976 +#define stack_smp_processor_id() raw_smp_processor_id()
8977 #define safe_smp_processor_id() smp_processor_id()
8978
8979 #endif
8980 diff -urNp linux-3.0.7/arch/x86/include/asm/spinlock.h linux-3.0.7/arch/x86/include/asm/spinlock.h
8981 --- linux-3.0.7/arch/x86/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
8982 +++ linux-3.0.7/arch/x86/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
8983 @@ -249,6 +249,14 @@ static inline int arch_write_can_lock(ar
8984 static inline void arch_read_lock(arch_rwlock_t *rw)
8985 {
8986 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
8987 +
8988 +#ifdef CONFIG_PAX_REFCOUNT
8989 + "jno 0f\n"
8990 + LOCK_PREFIX " addl $1,(%0)\n"
8991 + "int $4\n0:\n"
8992 + _ASM_EXTABLE(0b, 0b)
8993 +#endif
8994 +
8995 "jns 1f\n"
8996 "call __read_lock_failed\n\t"
8997 "1:\n"
8998 @@ -258,6 +266,14 @@ static inline void arch_read_lock(arch_r
8999 static inline void arch_write_lock(arch_rwlock_t *rw)
9000 {
9001 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
9002 +
9003 +#ifdef CONFIG_PAX_REFCOUNT
9004 + "jno 0f\n"
9005 + LOCK_PREFIX " addl %1,(%0)\n"
9006 + "int $4\n0:\n"
9007 + _ASM_EXTABLE(0b, 0b)
9008 +#endif
9009 +
9010 "jz 1f\n"
9011 "call __write_lock_failed\n\t"
9012 "1:\n"
9013 @@ -286,12 +302,29 @@ static inline int arch_write_trylock(arc
9014
9015 static inline void arch_read_unlock(arch_rwlock_t *rw)
9016 {
9017 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
9018 + asm volatile(LOCK_PREFIX "incl %0\n"
9019 +
9020 +#ifdef CONFIG_PAX_REFCOUNT
9021 + "jno 0f\n"
9022 + LOCK_PREFIX "decl %0\n"
9023 + "int $4\n0:\n"
9024 + _ASM_EXTABLE(0b, 0b)
9025 +#endif
9026 +
9027 + :"+m" (rw->lock) : : "memory");
9028 }
9029
9030 static inline void arch_write_unlock(arch_rwlock_t *rw)
9031 {
9032 - asm volatile(LOCK_PREFIX "addl %1, %0"
9033 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
9034 +
9035 +#ifdef CONFIG_PAX_REFCOUNT
9036 + "jno 0f\n"
9037 + LOCK_PREFIX "subl %1, %0\n"
9038 + "int $4\n0:\n"
9039 + _ASM_EXTABLE(0b, 0b)
9040 +#endif
9041 +
9042 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
9043 }
9044
9045 diff -urNp linux-3.0.7/arch/x86/include/asm/stackprotector.h linux-3.0.7/arch/x86/include/asm/stackprotector.h
9046 --- linux-3.0.7/arch/x86/include/asm/stackprotector.h 2011-07-21 22:17:23.000000000 -0400
9047 +++ linux-3.0.7/arch/x86/include/asm/stackprotector.h 2011-08-23 21:47:55.000000000 -0400
9048 @@ -48,7 +48,7 @@
9049 * head_32 for boot CPU and setup_per_cpu_areas() for others.
9050 */
9051 #define GDT_STACK_CANARY_INIT \
9052 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
9053 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
9054
9055 /*
9056 * Initialize the stackprotector canary value.
9057 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
9058
9059 static inline void load_stack_canary_segment(void)
9060 {
9061 -#ifdef CONFIG_X86_32
9062 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
9063 asm volatile ("mov %0, %%gs" : : "r" (0));
9064 #endif
9065 }
9066 diff -urNp linux-3.0.7/arch/x86/include/asm/stacktrace.h linux-3.0.7/arch/x86/include/asm/stacktrace.h
9067 --- linux-3.0.7/arch/x86/include/asm/stacktrace.h 2011-07-21 22:17:23.000000000 -0400
9068 +++ linux-3.0.7/arch/x86/include/asm/stacktrace.h 2011-08-23 21:47:55.000000000 -0400
9069 @@ -11,28 +11,20 @@
9070
9071 extern int kstack_depth_to_print;
9072
9073 -struct thread_info;
9074 +struct task_struct;
9075 struct stacktrace_ops;
9076
9077 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
9078 - unsigned long *stack,
9079 - unsigned long bp,
9080 - const struct stacktrace_ops *ops,
9081 - void *data,
9082 - unsigned long *end,
9083 - int *graph);
9084 -
9085 -extern unsigned long
9086 -print_context_stack(struct thread_info *tinfo,
9087 - unsigned long *stack, unsigned long bp,
9088 - const struct stacktrace_ops *ops, void *data,
9089 - unsigned long *end, int *graph);
9090 -
9091 -extern unsigned long
9092 -print_context_stack_bp(struct thread_info *tinfo,
9093 - unsigned long *stack, unsigned long bp,
9094 - const struct stacktrace_ops *ops, void *data,
9095 - unsigned long *end, int *graph);
9096 +typedef unsigned long walk_stack_t(struct task_struct *task,
9097 + void *stack_start,
9098 + unsigned long *stack,
9099 + unsigned long bp,
9100 + const struct stacktrace_ops *ops,
9101 + void *data,
9102 + unsigned long *end,
9103 + int *graph);
9104 +
9105 +extern walk_stack_t print_context_stack;
9106 +extern walk_stack_t print_context_stack_bp;
9107
9108 /* Generic stack tracer with callbacks */
9109
9110 @@ -40,7 +32,7 @@ struct stacktrace_ops {
9111 void (*address)(void *data, unsigned long address, int reliable);
9112 /* On negative return stop dumping */
9113 int (*stack)(void *data, char *name);
9114 - walk_stack_t walk_stack;
9115 + walk_stack_t *walk_stack;
9116 };
9117
9118 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
9119 diff -urNp linux-3.0.7/arch/x86/include/asm/sys_ia32.h linux-3.0.7/arch/x86/include/asm/sys_ia32.h
9120 --- linux-3.0.7/arch/x86/include/asm/sys_ia32.h 2011-07-21 22:17:23.000000000 -0400
9121 +++ linux-3.0.7/arch/x86/include/asm/sys_ia32.h 2011-10-06 04:17:55.000000000 -0400
9122 @@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int
9123 compat_sigset_t __user *, unsigned int);
9124 asmlinkage long sys32_alarm(unsigned int);
9125
9126 -asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
9127 +asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
9128 asmlinkage long sys32_sysfs(int, u32, u32);
9129
9130 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
9131 diff -urNp linux-3.0.7/arch/x86/include/asm/system.h linux-3.0.7/arch/x86/include/asm/system.h
9132 --- linux-3.0.7/arch/x86/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
9133 +++ linux-3.0.7/arch/x86/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
9134 @@ -129,7 +129,7 @@ do { \
9135 "call __switch_to\n\t" \
9136 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
9137 __switch_canary \
9138 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
9139 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
9140 "movq %%rax,%%rdi\n\t" \
9141 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
9142 "jnz ret_from_fork\n\t" \
9143 @@ -140,7 +140,7 @@ do { \
9144 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
9145 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
9146 [_tif_fork] "i" (_TIF_FORK), \
9147 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
9148 + [thread_info] "m" (current_tinfo), \
9149 [current_task] "m" (current_task) \
9150 __switch_canary_iparam \
9151 : "memory", "cc" __EXTRA_CLOBBER)
9152 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
9153 {
9154 unsigned long __limit;
9155 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
9156 - return __limit + 1;
9157 + return __limit;
9158 }
9159
9160 static inline void native_clts(void)
9161 @@ -397,12 +397,12 @@ void enable_hlt(void);
9162
9163 void cpu_idle_wait(void);
9164
9165 -extern unsigned long arch_align_stack(unsigned long sp);
9166 +#define arch_align_stack(x) ((x) & ~0xfUL)
9167 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
9168
9169 void default_idle(void);
9170
9171 -void stop_this_cpu(void *dummy);
9172 +void stop_this_cpu(void *dummy) __noreturn;
9173
9174 /*
9175 * Force strict CPU ordering.
9176 diff -urNp linux-3.0.7/arch/x86/include/asm/thread_info.h linux-3.0.7/arch/x86/include/asm/thread_info.h
9177 --- linux-3.0.7/arch/x86/include/asm/thread_info.h 2011-07-21 22:17:23.000000000 -0400
9178 +++ linux-3.0.7/arch/x86/include/asm/thread_info.h 2011-08-23 21:47:55.000000000 -0400
9179 @@ -10,6 +10,7 @@
9180 #include <linux/compiler.h>
9181 #include <asm/page.h>
9182 #include <asm/types.h>
9183 +#include <asm/percpu.h>
9184
9185 /*
9186 * low level task data that entry.S needs immediate access to
9187 @@ -24,7 +25,6 @@ struct exec_domain;
9188 #include <asm/atomic.h>
9189
9190 struct thread_info {
9191 - struct task_struct *task; /* main task structure */
9192 struct exec_domain *exec_domain; /* execution domain */
9193 __u32 flags; /* low level flags */
9194 __u32 status; /* thread synchronous flags */
9195 @@ -34,18 +34,12 @@ struct thread_info {
9196 mm_segment_t addr_limit;
9197 struct restart_block restart_block;
9198 void __user *sysenter_return;
9199 -#ifdef CONFIG_X86_32
9200 - unsigned long previous_esp; /* ESP of the previous stack in
9201 - case of nested (IRQ) stacks
9202 - */
9203 - __u8 supervisor_stack[0];
9204 -#endif
9205 + unsigned long lowest_stack;
9206 int uaccess_err;
9207 };
9208
9209 -#define INIT_THREAD_INFO(tsk) \
9210 +#define INIT_THREAD_INFO \
9211 { \
9212 - .task = &tsk, \
9213 .exec_domain = &default_exec_domain, \
9214 .flags = 0, \
9215 .cpu = 0, \
9216 @@ -56,7 +50,7 @@ struct thread_info {
9217 }, \
9218 }
9219
9220 -#define init_thread_info (init_thread_union.thread_info)
9221 +#define init_thread_info (init_thread_union.stack)
9222 #define init_stack (init_thread_union.stack)
9223
9224 #else /* !__ASSEMBLY__ */
9225 @@ -170,6 +164,23 @@ struct thread_info {
9226 ret; \
9227 })
9228
9229 +#ifdef __ASSEMBLY__
9230 +/* how to get the thread information struct from ASM */
9231 +#define GET_THREAD_INFO(reg) \
9232 + mov PER_CPU_VAR(current_tinfo), reg
9233 +
9234 +/* use this one if reg already contains %esp */
9235 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9236 +#else
9237 +/* how to get the thread information struct from C */
9238 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9239 +
9240 +static __always_inline struct thread_info *current_thread_info(void)
9241 +{
9242 + return percpu_read_stable(current_tinfo);
9243 +}
9244 +#endif
9245 +
9246 #ifdef CONFIG_X86_32
9247
9248 #define STACK_WARN (THREAD_SIZE/8)
9249 @@ -180,35 +191,13 @@ struct thread_info {
9250 */
9251 #ifndef __ASSEMBLY__
9252
9253 -
9254 /* how to get the current stack pointer from C */
9255 register unsigned long current_stack_pointer asm("esp") __used;
9256
9257 -/* how to get the thread information struct from C */
9258 -static inline struct thread_info *current_thread_info(void)
9259 -{
9260 - return (struct thread_info *)
9261 - (current_stack_pointer & ~(THREAD_SIZE - 1));
9262 -}
9263 -
9264 -#else /* !__ASSEMBLY__ */
9265 -
9266 -/* how to get the thread information struct from ASM */
9267 -#define GET_THREAD_INFO(reg) \
9268 - movl $-THREAD_SIZE, reg; \
9269 - andl %esp, reg
9270 -
9271 -/* use this one if reg already contains %esp */
9272 -#define GET_THREAD_INFO_WITH_ESP(reg) \
9273 - andl $-THREAD_SIZE, reg
9274 -
9275 #endif
9276
9277 #else /* X86_32 */
9278
9279 -#include <asm/percpu.h>
9280 -#define KERNEL_STACK_OFFSET (5*8)
9281 -
9282 /*
9283 * macros/functions for gaining access to the thread information structure
9284 * preempt_count needs to be 1 initially, until the scheduler is functional.
9285 @@ -216,21 +205,8 @@ static inline struct thread_info *curren
9286 #ifndef __ASSEMBLY__
9287 DECLARE_PER_CPU(unsigned long, kernel_stack);
9288
9289 -static inline struct thread_info *current_thread_info(void)
9290 -{
9291 - struct thread_info *ti;
9292 - ti = (void *)(percpu_read_stable(kernel_stack) +
9293 - KERNEL_STACK_OFFSET - THREAD_SIZE);
9294 - return ti;
9295 -}
9296 -
9297 -#else /* !__ASSEMBLY__ */
9298 -
9299 -/* how to get the thread information struct from ASM */
9300 -#define GET_THREAD_INFO(reg) \
9301 - movq PER_CPU_VAR(kernel_stack),reg ; \
9302 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9303 -
9304 +/* how to get the current stack pointer from C */
9305 +register unsigned long current_stack_pointer asm("rsp") __used;
9306 #endif
9307
9308 #endif /* !X86_32 */
9309 @@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9310 extern void free_thread_info(struct thread_info *ti);
9311 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9312 #define arch_task_cache_init arch_task_cache_init
9313 +
9314 +#define __HAVE_THREAD_FUNCTIONS
9315 +#define task_thread_info(task) (&(task)->tinfo)
9316 +#define task_stack_page(task) ((task)->stack)
9317 +#define setup_thread_stack(p, org) do {} while (0)
9318 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9319 +
9320 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9321 +extern struct task_struct *alloc_task_struct_node(int node);
9322 +extern void free_task_struct(struct task_struct *);
9323 +
9324 #endif
9325 #endif /* _ASM_X86_THREAD_INFO_H */
9326 diff -urNp linux-3.0.7/arch/x86/include/asm/uaccess_32.h linux-3.0.7/arch/x86/include/asm/uaccess_32.h
9327 --- linux-3.0.7/arch/x86/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
9328 +++ linux-3.0.7/arch/x86/include/asm/uaccess_32.h 2011-08-23 21:48:14.000000000 -0400
9329 @@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_u
9330 static __always_inline unsigned long __must_check
9331 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9332 {
9333 + pax_track_stack();
9334 +
9335 + if ((long)n < 0)
9336 + return n;
9337 +
9338 if (__builtin_constant_p(n)) {
9339 unsigned long ret;
9340
9341 @@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to,
9342 return ret;
9343 }
9344 }
9345 + if (!__builtin_constant_p(n))
9346 + check_object_size(from, n, true);
9347 return __copy_to_user_ll(to, from, n);
9348 }
9349
9350 @@ -82,12 +89,16 @@ static __always_inline unsigned long __m
9351 __copy_to_user(void __user *to, const void *from, unsigned long n)
9352 {
9353 might_fault();
9354 +
9355 return __copy_to_user_inatomic(to, from, n);
9356 }
9357
9358 static __always_inline unsigned long
9359 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9360 {
9361 + if ((long)n < 0)
9362 + return n;
9363 +
9364 /* Avoid zeroing the tail if the copy fails..
9365 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9366 * but as the zeroing behaviour is only significant when n is not
9367 @@ -137,6 +148,12 @@ static __always_inline unsigned long
9368 __copy_from_user(void *to, const void __user *from, unsigned long n)
9369 {
9370 might_fault();
9371 +
9372 + pax_track_stack();
9373 +
9374 + if ((long)n < 0)
9375 + return n;
9376 +
9377 if (__builtin_constant_p(n)) {
9378 unsigned long ret;
9379
9380 @@ -152,6 +169,8 @@ __copy_from_user(void *to, const void __
9381 return ret;
9382 }
9383 }
9384 + if (!__builtin_constant_p(n))
9385 + check_object_size(to, n, false);
9386 return __copy_from_user_ll(to, from, n);
9387 }
9388
9389 @@ -159,6 +178,10 @@ static __always_inline unsigned long __c
9390 const void __user *from, unsigned long n)
9391 {
9392 might_fault();
9393 +
9394 + if ((long)n < 0)
9395 + return n;
9396 +
9397 if (__builtin_constant_p(n)) {
9398 unsigned long ret;
9399
9400 @@ -181,15 +204,19 @@ static __always_inline unsigned long
9401 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9402 unsigned long n)
9403 {
9404 - return __copy_from_user_ll_nocache_nozero(to, from, n);
9405 -}
9406 + if ((long)n < 0)
9407 + return n;
9408
9409 -unsigned long __must_check copy_to_user(void __user *to,
9410 - const void *from, unsigned long n);
9411 -unsigned long __must_check _copy_from_user(void *to,
9412 - const void __user *from,
9413 - unsigned long n);
9414 + return __copy_from_user_ll_nocache_nozero(to, from, n);
9415 +}
9416
9417 +extern void copy_to_user_overflow(void)
9418 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9419 + __compiletime_error("copy_to_user() buffer size is not provably correct")
9420 +#else
9421 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
9422 +#endif
9423 +;
9424
9425 extern void copy_from_user_overflow(void)
9426 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9427 @@ -199,17 +226,61 @@ extern void copy_from_user_overflow(void
9428 #endif
9429 ;
9430
9431 -static inline unsigned long __must_check copy_from_user(void *to,
9432 - const void __user *from,
9433 - unsigned long n)
9434 +/**
9435 + * copy_to_user: - Copy a block of data into user space.
9436 + * @to: Destination address, in user space.
9437 + * @from: Source address, in kernel space.
9438 + * @n: Number of bytes to copy.
9439 + *
9440 + * Context: User context only. This function may sleep.
9441 + *
9442 + * Copy data from kernel space to user space.
9443 + *
9444 + * Returns number of bytes that could not be copied.
9445 + * On success, this will be zero.
9446 + */
9447 +static inline unsigned long __must_check
9448 +copy_to_user(void __user *to, const void *from, unsigned long n)
9449 +{
9450 + int sz = __compiletime_object_size(from);
9451 +
9452 + if (unlikely(sz != -1 && sz < n))
9453 + copy_to_user_overflow();
9454 + else if (access_ok(VERIFY_WRITE, to, n))
9455 + n = __copy_to_user(to, from, n);
9456 + return n;
9457 +}
9458 +
9459 +/**
9460 + * copy_from_user: - Copy a block of data from user space.
9461 + * @to: Destination address, in kernel space.
9462 + * @from: Source address, in user space.
9463 + * @n: Number of bytes to copy.
9464 + *
9465 + * Context: User context only. This function may sleep.
9466 + *
9467 + * Copy data from user space to kernel space.
9468 + *
9469 + * Returns number of bytes that could not be copied.
9470 + * On success, this will be zero.
9471 + *
9472 + * If some data could not be copied, this function will pad the copied
9473 + * data to the requested size using zero bytes.
9474 + */
9475 +static inline unsigned long __must_check
9476 +copy_from_user(void *to, const void __user *from, unsigned long n)
9477 {
9478 int sz = __compiletime_object_size(to);
9479
9480 - if (likely(sz == -1 || sz >= n))
9481 - n = _copy_from_user(to, from, n);
9482 - else
9483 + if (unlikely(sz != -1 && sz < n))
9484 copy_from_user_overflow();
9485 -
9486 + else if (access_ok(VERIFY_READ, from, n))
9487 + n = __copy_from_user(to, from, n);
9488 + else if ((long)n > 0) {
9489 + if (!__builtin_constant_p(n))
9490 + check_object_size(to, n, false);
9491 + memset(to, 0, n);
9492 + }
9493 return n;
9494 }
9495
9496 diff -urNp linux-3.0.7/arch/x86/include/asm/uaccess_64.h linux-3.0.7/arch/x86/include/asm/uaccess_64.h
9497 --- linux-3.0.7/arch/x86/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
9498 +++ linux-3.0.7/arch/x86/include/asm/uaccess_64.h 2011-10-06 04:17:55.000000000 -0400
9499 @@ -10,6 +10,9 @@
9500 #include <asm/alternative.h>
9501 #include <asm/cpufeature.h>
9502 #include <asm/page.h>
9503 +#include <asm/pgtable.h>
9504 +
9505 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
9506
9507 /*
9508 * Copy To/From Userspace
9509 @@ -36,26 +39,26 @@ copy_user_generic(void *to, const void *
9510 return ret;
9511 }
9512
9513 -__must_check unsigned long
9514 -_copy_to_user(void __user *to, const void *from, unsigned len);
9515 -__must_check unsigned long
9516 -_copy_from_user(void *to, const void __user *from, unsigned len);
9517 +static __always_inline __must_check unsigned long
9518 +__copy_to_user(void __user *to, const void *from, unsigned len);
9519 +static __always_inline __must_check unsigned long
9520 +__copy_from_user(void *to, const void __user *from, unsigned len);
9521 __must_check unsigned long
9522 copy_in_user(void __user *to, const void __user *from, unsigned len);
9523
9524 static inline unsigned long __must_check copy_from_user(void *to,
9525 const void __user *from,
9526 - unsigned long n)
9527 + unsigned n)
9528 {
9529 - int sz = __compiletime_object_size(to);
9530 -
9531 might_fault();
9532 - if (likely(sz == -1 || sz >= n))
9533 - n = _copy_from_user(to, from, n);
9534 -#ifdef CONFIG_DEBUG_VM
9535 - else
9536 - WARN(1, "Buffer overflow detected!\n");
9537 -#endif
9538 +
9539 + if (access_ok(VERIFY_READ, from, n))
9540 + n = __copy_from_user(to, from, n);
9541 + else if ((int)n > 0) {
9542 + if (!__builtin_constant_p(n))
9543 + check_object_size(to, n, false);
9544 + memset(to, 0, n);
9545 + }
9546 return n;
9547 }
9548
9549 @@ -64,110 +67,198 @@ int copy_to_user(void __user *dst, const
9550 {
9551 might_fault();
9552
9553 - return _copy_to_user(dst, src, size);
9554 + if (access_ok(VERIFY_WRITE, dst, size))
9555 + size = __copy_to_user(dst, src, size);
9556 + return size;
9557 }
9558
9559 static __always_inline __must_check
9560 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
9561 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9562 {
9563 - int ret = 0;
9564 + int sz = __compiletime_object_size(dst);
9565 + unsigned ret = 0;
9566
9567 might_fault();
9568 - if (!__builtin_constant_p(size))
9569 - return copy_user_generic(dst, (__force void *)src, size);
9570 +
9571 + pax_track_stack();
9572 +
9573 + if ((int)size < 0)
9574 + return size;
9575 +
9576 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9577 + if (!__access_ok(VERIFY_READ, src, size))
9578 + return size;
9579 +#endif
9580 +
9581 + if (unlikely(sz != -1 && sz < size)) {
9582 +#ifdef CONFIG_DEBUG_VM
9583 + WARN(1, "Buffer overflow detected!\n");
9584 +#endif
9585 + return size;
9586 + }
9587 +
9588 + if (!__builtin_constant_p(size)) {
9589 + check_object_size(dst, size, false);
9590 +
9591 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9592 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9593 + src += PAX_USER_SHADOW_BASE;
9594 +#endif
9595 +
9596 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
9597 + }
9598 switch (size) {
9599 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9600 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
9601 ret, "b", "b", "=q", 1);
9602 return ret;
9603 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
9604 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
9605 ret, "w", "w", "=r", 2);
9606 return ret;
9607 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
9608 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
9609 ret, "l", "k", "=r", 4);
9610 return ret;
9611 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
9612 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9613 ret, "q", "", "=r", 8);
9614 return ret;
9615 case 10:
9616 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9617 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9618 ret, "q", "", "=r", 10);
9619 if (unlikely(ret))
9620 return ret;
9621 __get_user_asm(*(u16 *)(8 + (char *)dst),
9622 - (u16 __user *)(8 + (char __user *)src),
9623 + (const u16 __user *)(8 + (const char __user *)src),
9624 ret, "w", "w", "=r", 2);
9625 return ret;
9626 case 16:
9627 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9628 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9629 ret, "q", "", "=r", 16);
9630 if (unlikely(ret))
9631 return ret;
9632 __get_user_asm(*(u64 *)(8 + (char *)dst),
9633 - (u64 __user *)(8 + (char __user *)src),
9634 + (const u64 __user *)(8 + (const char __user *)src),
9635 ret, "q", "", "=r", 8);
9636 return ret;
9637 default:
9638 - return copy_user_generic(dst, (__force void *)src, size);
9639 +
9640 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9641 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9642 + src += PAX_USER_SHADOW_BASE;
9643 +#endif
9644 +
9645 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
9646 }
9647 }
9648
9649 static __always_inline __must_check
9650 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
9651 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9652 {
9653 - int ret = 0;
9654 + int sz = __compiletime_object_size(src);
9655 + unsigned ret = 0;
9656
9657 might_fault();
9658 - if (!__builtin_constant_p(size))
9659 - return copy_user_generic((__force void *)dst, src, size);
9660 +
9661 + pax_track_stack();
9662 +
9663 + if ((int)size < 0)
9664 + return size;
9665 +
9666 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9667 + if (!__access_ok(VERIFY_WRITE, dst, size))
9668 + return size;
9669 +#endif
9670 +
9671 + if (unlikely(sz != -1 && sz < size)) {
9672 +#ifdef CONFIG_DEBUG_VM
9673 + WARN(1, "Buffer overflow detected!\n");
9674 +#endif
9675 + return size;
9676 + }
9677 +
9678 + if (!__builtin_constant_p(size)) {
9679 + check_object_size(src, size, true);
9680 +
9681 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9682 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9683 + dst += PAX_USER_SHADOW_BASE;
9684 +#endif
9685 +
9686 + return copy_user_generic((__force_kernel void *)dst, src, size);
9687 + }
9688 switch (size) {
9689 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9690 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
9691 ret, "b", "b", "iq", 1);
9692 return ret;
9693 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
9694 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
9695 ret, "w", "w", "ir", 2);
9696 return ret;
9697 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
9698 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
9699 ret, "l", "k", "ir", 4);
9700 return ret;
9701 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
9702 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9703 ret, "q", "", "er", 8);
9704 return ret;
9705 case 10:
9706 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9707 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9708 ret, "q", "", "er", 10);
9709 if (unlikely(ret))
9710 return ret;
9711 asm("":::"memory");
9712 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
9713 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
9714 ret, "w", "w", "ir", 2);
9715 return ret;
9716 case 16:
9717 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9718 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9719 ret, "q", "", "er", 16);
9720 if (unlikely(ret))
9721 return ret;
9722 asm("":::"memory");
9723 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
9724 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
9725 ret, "q", "", "er", 8);
9726 return ret;
9727 default:
9728 - return copy_user_generic((__force void *)dst, src, size);
9729 +
9730 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9731 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9732 + dst += PAX_USER_SHADOW_BASE;
9733 +#endif
9734 +
9735 + return copy_user_generic((__force_kernel void *)dst, src, size);
9736 }
9737 }
9738
9739 static __always_inline __must_check
9740 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9741 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9742 {
9743 - int ret = 0;
9744 + unsigned ret = 0;
9745
9746 might_fault();
9747 - if (!__builtin_constant_p(size))
9748 - return copy_user_generic((__force void *)dst,
9749 - (__force void *)src, size);
9750 +
9751 + if ((int)size < 0)
9752 + return size;
9753 +
9754 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9755 + if (!__access_ok(VERIFY_READ, src, size))
9756 + return size;
9757 + if (!__access_ok(VERIFY_WRITE, dst, size))
9758 + return size;
9759 +#endif
9760 +
9761 + if (!__builtin_constant_p(size)) {
9762 +
9763 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9764 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9765 + src += PAX_USER_SHADOW_BASE;
9766 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9767 + dst += PAX_USER_SHADOW_BASE;
9768 +#endif
9769 +
9770 + return copy_user_generic((__force_kernel void *)dst,
9771 + (__force_kernel const void *)src, size);
9772 + }
9773 switch (size) {
9774 case 1: {
9775 u8 tmp;
9776 - __get_user_asm(tmp, (u8 __user *)src,
9777 + __get_user_asm(tmp, (const u8 __user *)src,
9778 ret, "b", "b", "=q", 1);
9779 if (likely(!ret))
9780 __put_user_asm(tmp, (u8 __user *)dst,
9781 @@ -176,7 +267,7 @@ int __copy_in_user(void __user *dst, con
9782 }
9783 case 2: {
9784 u16 tmp;
9785 - __get_user_asm(tmp, (u16 __user *)src,
9786 + __get_user_asm(tmp, (const u16 __user *)src,
9787 ret, "w", "w", "=r", 2);
9788 if (likely(!ret))
9789 __put_user_asm(tmp, (u16 __user *)dst,
9790 @@ -186,7 +277,7 @@ int __copy_in_user(void __user *dst, con
9791
9792 case 4: {
9793 u32 tmp;
9794 - __get_user_asm(tmp, (u32 __user *)src,
9795 + __get_user_asm(tmp, (const u32 __user *)src,
9796 ret, "l", "k", "=r", 4);
9797 if (likely(!ret))
9798 __put_user_asm(tmp, (u32 __user *)dst,
9799 @@ -195,7 +286,7 @@ int __copy_in_user(void __user *dst, con
9800 }
9801 case 8: {
9802 u64 tmp;
9803 - __get_user_asm(tmp, (u64 __user *)src,
9804 + __get_user_asm(tmp, (const u64 __user *)src,
9805 ret, "q", "", "=r", 8);
9806 if (likely(!ret))
9807 __put_user_asm(tmp, (u64 __user *)dst,
9808 @@ -203,8 +294,16 @@ int __copy_in_user(void __user *dst, con
9809 return ret;
9810 }
9811 default:
9812 - return copy_user_generic((__force void *)dst,
9813 - (__force void *)src, size);
9814 +
9815 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9816 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9817 + src += PAX_USER_SHADOW_BASE;
9818 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9819 + dst += PAX_USER_SHADOW_BASE;
9820 +#endif
9821 +
9822 + return copy_user_generic((__force_kernel void *)dst,
9823 + (__force_kernel const void *)src, size);
9824 }
9825 }
9826
9827 @@ -221,33 +320,72 @@ __must_check unsigned long __clear_user(
9828 static __must_check __always_inline int
9829 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9830 {
9831 - return copy_user_generic(dst, (__force const void *)src, size);
9832 + pax_track_stack();
9833 +
9834 + if ((int)size < 0)
9835 + return size;
9836 +
9837 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9838 + if (!__access_ok(VERIFY_READ, src, size))
9839 + return size;
9840 +
9841 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9842 + src += PAX_USER_SHADOW_BASE;
9843 +#endif
9844 +
9845 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
9846 }
9847
9848 -static __must_check __always_inline int
9849 +static __must_check __always_inline unsigned long
9850 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9851 {
9852 - return copy_user_generic((__force void *)dst, src, size);
9853 + if ((int)size < 0)
9854 + return size;
9855 +
9856 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9857 + if (!__access_ok(VERIFY_WRITE, dst, size))
9858 + return size;
9859 +
9860 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9861 + dst += PAX_USER_SHADOW_BASE;
9862 +#endif
9863 +
9864 + return copy_user_generic((__force_kernel void *)dst, src, size);
9865 }
9866
9867 -extern long __copy_user_nocache(void *dst, const void __user *src,
9868 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9869 unsigned size, int zerorest);
9870
9871 -static inline int
9872 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9873 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9874 {
9875 might_sleep();
9876 +
9877 + if ((int)size < 0)
9878 + return size;
9879 +
9880 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9881 + if (!__access_ok(VERIFY_READ, src, size))
9882 + return size;
9883 +#endif
9884 +
9885 return __copy_user_nocache(dst, src, size, 1);
9886 }
9887
9888 -static inline int
9889 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9890 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9891 unsigned size)
9892 {
9893 + if ((int)size < 0)
9894 + return size;
9895 +
9896 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9897 + if (!__access_ok(VERIFY_READ, src, size))
9898 + return size;
9899 +#endif
9900 +
9901 return __copy_user_nocache(dst, src, size, 0);
9902 }
9903
9904 -unsigned long
9905 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9906 +extern unsigned long
9907 +copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest);
9908
9909 #endif /* _ASM_X86_UACCESS_64_H */
9910 diff -urNp linux-3.0.7/arch/x86/include/asm/uaccess.h linux-3.0.7/arch/x86/include/asm/uaccess.h
9911 --- linux-3.0.7/arch/x86/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
9912 +++ linux-3.0.7/arch/x86/include/asm/uaccess.h 2011-10-06 04:17:55.000000000 -0400
9913 @@ -7,12 +7,15 @@
9914 #include <linux/compiler.h>
9915 #include <linux/thread_info.h>
9916 #include <linux/string.h>
9917 +#include <linux/sched.h>
9918 #include <asm/asm.h>
9919 #include <asm/page.h>
9920
9921 #define VERIFY_READ 0
9922 #define VERIFY_WRITE 1
9923
9924 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
9925 +
9926 /*
9927 * The fs value determines whether argument validity checking should be
9928 * performed or not. If get_fs() == USER_DS, checking is performed, with
9929 @@ -28,7 +31,12 @@
9930
9931 #define get_ds() (KERNEL_DS)
9932 #define get_fs() (current_thread_info()->addr_limit)
9933 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9934 +void __set_fs(mm_segment_t x);
9935 +void set_fs(mm_segment_t x);
9936 +#else
9937 #define set_fs(x) (current_thread_info()->addr_limit = (x))
9938 +#endif
9939
9940 #define segment_eq(a, b) ((a).seg == (b).seg)
9941
9942 @@ -76,7 +84,33 @@
9943 * checks that the pointer is in the user space range - after calling
9944 * this function, memory access functions may still return -EFAULT.
9945 */
9946 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9947 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9948 +#define access_ok(type, addr, size) \
9949 +({ \
9950 + long __size = size; \
9951 + unsigned long __addr = (unsigned long)addr; \
9952 + unsigned long __addr_ao = __addr & PAGE_MASK; \
9953 + unsigned long __end_ao = __addr + __size - 1; \
9954 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
9955 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
9956 + while(__addr_ao <= __end_ao) { \
9957 + char __c_ao; \
9958 + __addr_ao += PAGE_SIZE; \
9959 + if (__size > PAGE_SIZE) \
9960 + cond_resched(); \
9961 + if (__get_user(__c_ao, (char __user *)__addr)) \
9962 + break; \
9963 + if (type != VERIFY_WRITE) { \
9964 + __addr = __addr_ao; \
9965 + continue; \
9966 + } \
9967 + if (__put_user(__c_ao, (char __user *)__addr)) \
9968 + break; \
9969 + __addr = __addr_ao; \
9970 + } \
9971 + } \
9972 + __ret_ao; \
9973 +})
9974
9975 /*
9976 * The exception table consists of pairs of addresses: the first is the
9977 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
9978 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
9979 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
9980
9981 -
9982 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9983 +#define __copyuser_seg "gs;"
9984 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
9985 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
9986 +#else
9987 +#define __copyuser_seg
9988 +#define __COPYUSER_SET_ES
9989 +#define __COPYUSER_RESTORE_ES
9990 +#endif
9991
9992 #ifdef CONFIG_X86_32
9993 #define __put_user_asm_u64(x, addr, err, errret) \
9994 - asm volatile("1: movl %%eax,0(%2)\n" \
9995 - "2: movl %%edx,4(%2)\n" \
9996 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
9997 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
9998 "3:\n" \
9999 ".section .fixup,\"ax\"\n" \
10000 "4: movl %3,%0\n" \
10001 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
10002 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10003
10004 #define __put_user_asm_ex_u64(x, addr) \
10005 - asm volatile("1: movl %%eax,0(%1)\n" \
10006 - "2: movl %%edx,4(%1)\n" \
10007 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10008 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10009 "3:\n" \
10010 _ASM_EXTABLE(1b, 2b - 1b) \
10011 _ASM_EXTABLE(2b, 3b - 2b) \
10012 @@ -252,7 +294,7 @@ extern void __put_user_8(void);
10013 __typeof__(*(ptr)) __pu_val; \
10014 __chk_user_ptr(ptr); \
10015 might_fault(); \
10016 - __pu_val = x; \
10017 + __pu_val = (x); \
10018 switch (sizeof(*(ptr))) { \
10019 case 1: \
10020 __put_user_x(1, __pu_val, ptr, __ret_pu); \
10021 @@ -373,7 +415,7 @@ do { \
10022 } while (0)
10023
10024 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10025 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10026 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10027 "2:\n" \
10028 ".section .fixup,\"ax\"\n" \
10029 "3: mov %3,%0\n" \
10030 @@ -381,7 +423,7 @@ do { \
10031 " jmp 2b\n" \
10032 ".previous\n" \
10033 _ASM_EXTABLE(1b, 3b) \
10034 - : "=r" (err), ltype(x) \
10035 + : "=r" (err), ltype (x) \
10036 : "m" (__m(addr)), "i" (errret), "0" (err))
10037
10038 #define __get_user_size_ex(x, ptr, size) \
10039 @@ -406,7 +448,7 @@ do { \
10040 } while (0)
10041
10042 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10043 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10044 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10045 "2:\n" \
10046 _ASM_EXTABLE(1b, 2b - 1b) \
10047 : ltype(x) : "m" (__m(addr)))
10048 @@ -423,13 +465,24 @@ do { \
10049 int __gu_err; \
10050 unsigned long __gu_val; \
10051 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10052 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
10053 + (x) = (__typeof__(*(ptr)))__gu_val; \
10054 __gu_err; \
10055 })
10056
10057 /* FIXME: this hack is definitely wrong -AK */
10058 struct __large_struct { unsigned long buf[100]; };
10059 -#define __m(x) (*(struct __large_struct __user *)(x))
10060 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10061 +#define ____m(x) \
10062 +({ \
10063 + unsigned long ____x = (unsigned long)(x); \
10064 + if (____x < PAX_USER_SHADOW_BASE) \
10065 + ____x += PAX_USER_SHADOW_BASE; \
10066 + (void __user *)____x; \
10067 +})
10068 +#else
10069 +#define ____m(x) (x)
10070 +#endif
10071 +#define __m(x) (*(struct __large_struct __user *)____m(x))
10072
10073 /*
10074 * Tell gcc we read from memory instead of writing: this is because
10075 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long bu
10076 * aliasing issues.
10077 */
10078 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10079 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10080 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10081 "2:\n" \
10082 ".section .fixup,\"ax\"\n" \
10083 "3: mov %3,%0\n" \
10084 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long bu
10085 ".previous\n" \
10086 _ASM_EXTABLE(1b, 3b) \
10087 : "=r"(err) \
10088 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10089 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10090
10091 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10092 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10093 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10094 "2:\n" \
10095 _ASM_EXTABLE(1b, 2b - 1b) \
10096 : : ltype(x), "m" (__m(addr)))
10097 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long bu
10098 * On error, the variable @x is set to zero.
10099 */
10100
10101 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10102 +#define __get_user(x, ptr) get_user((x), (ptr))
10103 +#else
10104 #define __get_user(x, ptr) \
10105 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10106 +#endif
10107
10108 /**
10109 * __put_user: - Write a simple value into user space, with less checking.
10110 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long bu
10111 * Returns zero on success, or -EFAULT on error.
10112 */
10113
10114 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10115 +#define __put_user(x, ptr) put_user((x), (ptr))
10116 +#else
10117 #define __put_user(x, ptr) \
10118 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10119 +#endif
10120
10121 #define __get_user_unaligned __get_user
10122 #define __put_user_unaligned __put_user
10123 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long bu
10124 #define get_user_ex(x, ptr) do { \
10125 unsigned long __gue_val; \
10126 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10127 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
10128 + (x) = (__typeof__(*(ptr)))__gue_val; \
10129 } while (0)
10130
10131 #ifdef CONFIG_X86_WP_WORKS_OK
10132 diff -urNp linux-3.0.7/arch/x86/include/asm/vdso.h linux-3.0.7/arch/x86/include/asm/vdso.h
10133 --- linux-3.0.7/arch/x86/include/asm/vdso.h 2011-07-21 22:17:23.000000000 -0400
10134 +++ linux-3.0.7/arch/x86/include/asm/vdso.h 2011-10-06 04:17:55.000000000 -0400
10135 @@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
10136 #define VDSO32_SYMBOL(base, name) \
10137 ({ \
10138 extern const char VDSO32_##name[]; \
10139 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
10140 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
10141 })
10142 #endif
10143
10144 diff -urNp linux-3.0.7/arch/x86/include/asm/x86_init.h linux-3.0.7/arch/x86/include/asm/x86_init.h
10145 --- linux-3.0.7/arch/x86/include/asm/x86_init.h 2011-07-21 22:17:23.000000000 -0400
10146 +++ linux-3.0.7/arch/x86/include/asm/x86_init.h 2011-08-23 21:47:55.000000000 -0400
10147 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
10148 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
10149 void (*find_smp_config)(void);
10150 void (*get_smp_config)(unsigned int early);
10151 -};
10152 +} __no_const;
10153
10154 /**
10155 * struct x86_init_resources - platform specific resource related ops
10156 @@ -42,7 +42,7 @@ struct x86_init_resources {
10157 void (*probe_roms)(void);
10158 void (*reserve_resources)(void);
10159 char *(*memory_setup)(void);
10160 -};
10161 +} __no_const;
10162
10163 /**
10164 * struct x86_init_irqs - platform specific interrupt setup
10165 @@ -55,7 +55,7 @@ struct x86_init_irqs {
10166 void (*pre_vector_init)(void);
10167 void (*intr_init)(void);
10168 void (*trap_init)(void);
10169 -};
10170 +} __no_const;
10171
10172 /**
10173 * struct x86_init_oem - oem platform specific customizing functions
10174 @@ -65,7 +65,7 @@ struct x86_init_irqs {
10175 struct x86_init_oem {
10176 void (*arch_setup)(void);
10177 void (*banner)(void);
10178 -};
10179 +} __no_const;
10180
10181 /**
10182 * struct x86_init_mapping - platform specific initial kernel pagetable setup
10183 @@ -76,7 +76,7 @@ struct x86_init_oem {
10184 */
10185 struct x86_init_mapping {
10186 void (*pagetable_reserve)(u64 start, u64 end);
10187 -};
10188 +} __no_const;
10189
10190 /**
10191 * struct x86_init_paging - platform specific paging functions
10192 @@ -86,7 +86,7 @@ struct x86_init_mapping {
10193 struct x86_init_paging {
10194 void (*pagetable_setup_start)(pgd_t *base);
10195 void (*pagetable_setup_done)(pgd_t *base);
10196 -};
10197 +} __no_const;
10198
10199 /**
10200 * struct x86_init_timers - platform specific timer setup
10201 @@ -101,7 +101,7 @@ struct x86_init_timers {
10202 void (*tsc_pre_init)(void);
10203 void (*timer_init)(void);
10204 void (*wallclock_init)(void);
10205 -};
10206 +} __no_const;
10207
10208 /**
10209 * struct x86_init_iommu - platform specific iommu setup
10210 @@ -109,7 +109,7 @@ struct x86_init_timers {
10211 */
10212 struct x86_init_iommu {
10213 int (*iommu_init)(void);
10214 -};
10215 +} __no_const;
10216
10217 /**
10218 * struct x86_init_pci - platform specific pci init functions
10219 @@ -123,7 +123,7 @@ struct x86_init_pci {
10220 int (*init)(void);
10221 void (*init_irq)(void);
10222 void (*fixup_irqs)(void);
10223 -};
10224 +} __no_const;
10225
10226 /**
10227 * struct x86_init_ops - functions for platform specific setup
10228 @@ -139,7 +139,7 @@ struct x86_init_ops {
10229 struct x86_init_timers timers;
10230 struct x86_init_iommu iommu;
10231 struct x86_init_pci pci;
10232 -};
10233 +} __no_const;
10234
10235 /**
10236 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10237 @@ -147,7 +147,7 @@ struct x86_init_ops {
10238 */
10239 struct x86_cpuinit_ops {
10240 void (*setup_percpu_clockev)(void);
10241 -};
10242 +} __no_const;
10243
10244 /**
10245 * struct x86_platform_ops - platform specific runtime functions
10246 @@ -166,7 +166,7 @@ struct x86_platform_ops {
10247 bool (*is_untracked_pat_range)(u64 start, u64 end);
10248 void (*nmi_init)(void);
10249 int (*i8042_detect)(void);
10250 -};
10251 +} __no_const;
10252
10253 struct pci_dev;
10254
10255 @@ -174,7 +174,7 @@ struct x86_msi_ops {
10256 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10257 void (*teardown_msi_irq)(unsigned int irq);
10258 void (*teardown_msi_irqs)(struct pci_dev *dev);
10259 -};
10260 +} __no_const;
10261
10262 extern struct x86_init_ops x86_init;
10263 extern struct x86_cpuinit_ops x86_cpuinit;
10264 diff -urNp linux-3.0.7/arch/x86/include/asm/xsave.h linux-3.0.7/arch/x86/include/asm/xsave.h
10265 --- linux-3.0.7/arch/x86/include/asm/xsave.h 2011-07-21 22:17:23.000000000 -0400
10266 +++ linux-3.0.7/arch/x86/include/asm/xsave.h 2011-10-06 04:17:55.000000000 -0400
10267 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10268 {
10269 int err;
10270
10271 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10272 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10273 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10274 +#endif
10275 +
10276 /*
10277 * Clear the xsave header first, so that reserved fields are
10278 * initialized to zero.
10279 @@ -96,10 +101,15 @@ static inline int xsave_user(struct xsav
10280 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
10281 {
10282 int err;
10283 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
10284 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
10285 u32 lmask = mask;
10286 u32 hmask = mask >> 32;
10287
10288 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10289 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10290 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10291 +#endif
10292 +
10293 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10294 "2:\n"
10295 ".section .fixup,\"ax\"\n"
10296 diff -urNp linux-3.0.7/arch/x86/Kconfig linux-3.0.7/arch/x86/Kconfig
10297 --- linux-3.0.7/arch/x86/Kconfig 2011-07-21 22:17:23.000000000 -0400
10298 +++ linux-3.0.7/arch/x86/Kconfig 2011-09-17 00:58:36.000000000 -0400
10299 @@ -229,7 +229,7 @@ config X86_HT
10300
10301 config X86_32_LAZY_GS
10302 def_bool y
10303 - depends on X86_32 && !CC_STACKPROTECTOR
10304 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10305
10306 config ARCH_HWEIGHT_CFLAGS
10307 string
10308 @@ -1018,7 +1018,7 @@ choice
10309
10310 config NOHIGHMEM
10311 bool "off"
10312 - depends on !X86_NUMAQ
10313 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10314 ---help---
10315 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10316 However, the address space of 32-bit x86 processors is only 4
10317 @@ -1055,7 +1055,7 @@ config NOHIGHMEM
10318
10319 config HIGHMEM4G
10320 bool "4GB"
10321 - depends on !X86_NUMAQ
10322 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10323 ---help---
10324 Select this if you have a 32-bit processor and between 1 and 4
10325 gigabytes of physical RAM.
10326 @@ -1109,7 +1109,7 @@ config PAGE_OFFSET
10327 hex
10328 default 0xB0000000 if VMSPLIT_3G_OPT
10329 default 0x80000000 if VMSPLIT_2G
10330 - default 0x78000000 if VMSPLIT_2G_OPT
10331 + default 0x70000000 if VMSPLIT_2G_OPT
10332 default 0x40000000 if VMSPLIT_1G
10333 default 0xC0000000
10334 depends on X86_32
10335 @@ -1483,6 +1483,7 @@ config SECCOMP
10336
10337 config CC_STACKPROTECTOR
10338 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10339 + depends on X86_64 || !PAX_MEMORY_UDEREF
10340 ---help---
10341 This option turns on the -fstack-protector GCC feature. This
10342 feature puts, at the beginning of functions, a canary value on
10343 @@ -1540,6 +1541,7 @@ config KEXEC_JUMP
10344 config PHYSICAL_START
10345 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10346 default "0x1000000"
10347 + range 0x400000 0x40000000
10348 ---help---
10349 This gives the physical address where the kernel is loaded.
10350
10351 @@ -1603,6 +1605,7 @@ config X86_NEED_RELOCS
10352 config PHYSICAL_ALIGN
10353 hex "Alignment value to which kernel should be aligned" if X86_32
10354 default "0x1000000"
10355 + range 0x400000 0x1000000 if PAX_KERNEXEC
10356 range 0x2000 0x1000000
10357 ---help---
10358 This value puts the alignment restrictions on physical address
10359 @@ -1634,9 +1637,10 @@ config HOTPLUG_CPU
10360 Say N if you want to disable CPU hotplug.
10361
10362 config COMPAT_VDSO
10363 - def_bool y
10364 + def_bool n
10365 prompt "Compat VDSO support"
10366 depends on X86_32 || IA32_EMULATION
10367 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10368 ---help---
10369 Map the 32-bit VDSO to the predictable old-style address too.
10370
10371 diff -urNp linux-3.0.7/arch/x86/Kconfig.cpu linux-3.0.7/arch/x86/Kconfig.cpu
10372 --- linux-3.0.7/arch/x86/Kconfig.cpu 2011-07-21 22:17:23.000000000 -0400
10373 +++ linux-3.0.7/arch/x86/Kconfig.cpu 2011-08-23 21:47:55.000000000 -0400
10374 @@ -338,7 +338,7 @@ config X86_PPRO_FENCE
10375
10376 config X86_F00F_BUG
10377 def_bool y
10378 - depends on M586MMX || M586TSC || M586 || M486 || M386
10379 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
10380
10381 config X86_INVD_BUG
10382 def_bool y
10383 @@ -362,7 +362,7 @@ config X86_POPAD_OK
10384
10385 config X86_ALIGNMENT_16
10386 def_bool y
10387 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10388 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10389
10390 config X86_INTEL_USERCOPY
10391 def_bool y
10392 @@ -408,7 +408,7 @@ config X86_CMPXCHG64
10393 # generates cmov.
10394 config X86_CMOV
10395 def_bool y
10396 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10397 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10398
10399 config X86_MINIMUM_CPU_FAMILY
10400 int
10401 diff -urNp linux-3.0.7/arch/x86/Kconfig.debug linux-3.0.7/arch/x86/Kconfig.debug
10402 --- linux-3.0.7/arch/x86/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
10403 +++ linux-3.0.7/arch/x86/Kconfig.debug 2011-08-23 21:47:55.000000000 -0400
10404 @@ -81,7 +81,7 @@ config X86_PTDUMP
10405 config DEBUG_RODATA
10406 bool "Write protect kernel read-only data structures"
10407 default y
10408 - depends on DEBUG_KERNEL
10409 + depends on DEBUG_KERNEL && BROKEN
10410 ---help---
10411 Mark the kernel read-only data as write-protected in the pagetables,
10412 in order to catch accidental (and incorrect) writes to such const
10413 @@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
10414
10415 config DEBUG_SET_MODULE_RONX
10416 bool "Set loadable kernel module data as NX and text as RO"
10417 - depends on MODULES
10418 + depends on MODULES && BROKEN
10419 ---help---
10420 This option helps catch unintended modifications to loadable
10421 kernel module's text and read-only data. It also prevents execution
10422 diff -urNp linux-3.0.7/arch/x86/kernel/acpi/realmode/Makefile linux-3.0.7/arch/x86/kernel/acpi/realmode/Makefile
10423 --- linux-3.0.7/arch/x86/kernel/acpi/realmode/Makefile 2011-07-21 22:17:23.000000000 -0400
10424 +++ linux-3.0.7/arch/x86/kernel/acpi/realmode/Makefile 2011-08-23 21:47:55.000000000 -0400
10425 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
10426 $(call cc-option, -fno-stack-protector) \
10427 $(call cc-option, -mpreferred-stack-boundary=2)
10428 KBUILD_CFLAGS += $(call cc-option, -m32)
10429 +ifdef CONSTIFY_PLUGIN
10430 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10431 +endif
10432 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10433 GCOV_PROFILE := n
10434
10435 diff -urNp linux-3.0.7/arch/x86/kernel/acpi/realmode/wakeup.S linux-3.0.7/arch/x86/kernel/acpi/realmode/wakeup.S
10436 --- linux-3.0.7/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-21 22:17:23.000000000 -0400
10437 +++ linux-3.0.7/arch/x86/kernel/acpi/realmode/wakeup.S 2011-08-23 21:48:14.000000000 -0400
10438 @@ -108,6 +108,9 @@ wakeup_code:
10439 /* Do any other stuff... */
10440
10441 #ifndef CONFIG_64BIT
10442 + /* Recheck NX bit overrides (64bit path does this in trampoline */
10443 + call verify_cpu
10444 +
10445 /* This could also be done in C code... */
10446 movl pmode_cr3, %eax
10447 movl %eax, %cr3
10448 @@ -131,6 +134,7 @@ wakeup_code:
10449 movl pmode_cr0, %eax
10450 movl %eax, %cr0
10451 jmp pmode_return
10452 +# include "../../verify_cpu.S"
10453 #else
10454 pushw $0
10455 pushw trampoline_segment
10456 diff -urNp linux-3.0.7/arch/x86/kernel/acpi/sleep.c linux-3.0.7/arch/x86/kernel/acpi/sleep.c
10457 --- linux-3.0.7/arch/x86/kernel/acpi/sleep.c 2011-07-21 22:17:23.000000000 -0400
10458 +++ linux-3.0.7/arch/x86/kernel/acpi/sleep.c 2011-08-23 21:47:55.000000000 -0400
10459 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10460 header->trampoline_segment = trampoline_address() >> 4;
10461 #ifdef CONFIG_SMP
10462 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10463 +
10464 + pax_open_kernel();
10465 early_gdt_descr.address =
10466 (unsigned long)get_cpu_gdt_table(smp_processor_id());
10467 + pax_close_kernel();
10468 +
10469 initial_gs = per_cpu_offset(smp_processor_id());
10470 #endif
10471 initial_code = (unsigned long)wakeup_long64;
10472 diff -urNp linux-3.0.7/arch/x86/kernel/acpi/wakeup_32.S linux-3.0.7/arch/x86/kernel/acpi/wakeup_32.S
10473 --- linux-3.0.7/arch/x86/kernel/acpi/wakeup_32.S 2011-07-21 22:17:23.000000000 -0400
10474 +++ linux-3.0.7/arch/x86/kernel/acpi/wakeup_32.S 2011-08-23 21:47:55.000000000 -0400
10475 @@ -30,13 +30,11 @@ wakeup_pmode_return:
10476 # and restore the stack ... but you need gdt for this to work
10477 movl saved_context_esp, %esp
10478
10479 - movl %cs:saved_magic, %eax
10480 - cmpl $0x12345678, %eax
10481 + cmpl $0x12345678, saved_magic
10482 jne bogus_magic
10483
10484 # jump to place where we left off
10485 - movl saved_eip, %eax
10486 - jmp *%eax
10487 + jmp *(saved_eip)
10488
10489 bogus_magic:
10490 jmp bogus_magic
10491 diff -urNp linux-3.0.7/arch/x86/kernel/alternative.c linux-3.0.7/arch/x86/kernel/alternative.c
10492 --- linux-3.0.7/arch/x86/kernel/alternative.c 2011-07-21 22:17:23.000000000 -0400
10493 +++ linux-3.0.7/arch/x86/kernel/alternative.c 2011-08-23 21:47:55.000000000 -0400
10494 @@ -313,7 +313,7 @@ static void alternatives_smp_lock(const
10495 if (!*poff || ptr < text || ptr >= text_end)
10496 continue;
10497 /* turn DS segment override prefix into lock prefix */
10498 - if (*ptr == 0x3e)
10499 + if (*ktla_ktva(ptr) == 0x3e)
10500 text_poke(ptr, ((unsigned char []){0xf0}), 1);
10501 };
10502 mutex_unlock(&text_mutex);
10503 @@ -334,7 +334,7 @@ static void alternatives_smp_unlock(cons
10504 if (!*poff || ptr < text || ptr >= text_end)
10505 continue;
10506 /* turn lock prefix into DS segment override prefix */
10507 - if (*ptr == 0xf0)
10508 + if (*ktla_ktva(ptr) == 0xf0)
10509 text_poke(ptr, ((unsigned char []){0x3E}), 1);
10510 };
10511 mutex_unlock(&text_mutex);
10512 @@ -503,7 +503,7 @@ void __init_or_module apply_paravirt(str
10513
10514 BUG_ON(p->len > MAX_PATCH_LEN);
10515 /* prep the buffer with the original instructions */
10516 - memcpy(insnbuf, p->instr, p->len);
10517 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10518 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10519 (unsigned long)p->instr, p->len);
10520
10521 @@ -571,7 +571,7 @@ void __init alternative_instructions(voi
10522 if (smp_alt_once)
10523 free_init_pages("SMP alternatives",
10524 (unsigned long)__smp_locks,
10525 - (unsigned long)__smp_locks_end);
10526 + PAGE_ALIGN((unsigned long)__smp_locks_end));
10527
10528 restart_nmi();
10529 }
10530 @@ -588,13 +588,17 @@ void __init alternative_instructions(voi
10531 * instructions. And on the local CPU you need to be protected again NMI or MCE
10532 * handlers seeing an inconsistent instruction while you patch.
10533 */
10534 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
10535 +void *__kprobes text_poke_early(void *addr, const void *opcode,
10536 size_t len)
10537 {
10538 unsigned long flags;
10539 local_irq_save(flags);
10540 - memcpy(addr, opcode, len);
10541 +
10542 + pax_open_kernel();
10543 + memcpy(ktla_ktva(addr), opcode, len);
10544 sync_core();
10545 + pax_close_kernel();
10546 +
10547 local_irq_restore(flags);
10548 /* Could also do a CLFLUSH here to speed up CPU recovery; but
10549 that causes hangs on some VIA CPUs. */
10550 @@ -616,36 +620,22 @@ void *__init_or_module text_poke_early(v
10551 */
10552 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10553 {
10554 - unsigned long flags;
10555 - char *vaddr;
10556 + unsigned char *vaddr = ktla_ktva(addr);
10557 struct page *pages[2];
10558 - int i;
10559 + size_t i;
10560
10561 if (!core_kernel_text((unsigned long)addr)) {
10562 - pages[0] = vmalloc_to_page(addr);
10563 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10564 + pages[0] = vmalloc_to_page(vaddr);
10565 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10566 } else {
10567 - pages[0] = virt_to_page(addr);
10568 + pages[0] = virt_to_page(vaddr);
10569 WARN_ON(!PageReserved(pages[0]));
10570 - pages[1] = virt_to_page(addr + PAGE_SIZE);
10571 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10572 }
10573 BUG_ON(!pages[0]);
10574 - local_irq_save(flags);
10575 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10576 - if (pages[1])
10577 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10578 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10579 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10580 - clear_fixmap(FIX_TEXT_POKE0);
10581 - if (pages[1])
10582 - clear_fixmap(FIX_TEXT_POKE1);
10583 - local_flush_tlb();
10584 - sync_core();
10585 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
10586 - that causes hangs on some VIA CPUs. */
10587 + text_poke_early(addr, opcode, len);
10588 for (i = 0; i < len; i++)
10589 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10590 - local_irq_restore(flags);
10591 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10592 return addr;
10593 }
10594
10595 diff -urNp linux-3.0.7/arch/x86/kernel/apic/apic.c linux-3.0.7/arch/x86/kernel/apic/apic.c
10596 --- linux-3.0.7/arch/x86/kernel/apic/apic.c 2011-07-21 22:17:23.000000000 -0400
10597 +++ linux-3.0.7/arch/x86/kernel/apic/apic.c 2011-08-23 21:48:14.000000000 -0400
10598 @@ -173,7 +173,7 @@ int first_system_vector = 0xfe;
10599 /*
10600 * Debug level, exported for io_apic.c
10601 */
10602 -unsigned int apic_verbosity;
10603 +int apic_verbosity;
10604
10605 int pic_mode;
10606
10607 @@ -1834,7 +1834,7 @@ void smp_error_interrupt(struct pt_regs
10608 apic_write(APIC_ESR, 0);
10609 v1 = apic_read(APIC_ESR);
10610 ack_APIC_irq();
10611 - atomic_inc(&irq_err_count);
10612 + atomic_inc_unchecked(&irq_err_count);
10613
10614 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
10615 smp_processor_id(), v0 , v1);
10616 @@ -2190,6 +2190,8 @@ static int __cpuinit apic_cluster_num(vo
10617 u16 *bios_cpu_apicid;
10618 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
10619
10620 + pax_track_stack();
10621 +
10622 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
10623 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
10624
10625 diff -urNp linux-3.0.7/arch/x86/kernel/apic/io_apic.c linux-3.0.7/arch/x86/kernel/apic/io_apic.c
10626 --- linux-3.0.7/arch/x86/kernel/apic/io_apic.c 2011-07-21 22:17:23.000000000 -0400
10627 +++ linux-3.0.7/arch/x86/kernel/apic/io_apic.c 2011-08-23 21:47:55.000000000 -0400
10628 @@ -1028,7 +1028,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
10629 }
10630 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10631
10632 -void lock_vector_lock(void)
10633 +void lock_vector_lock(void) __acquires(vector_lock)
10634 {
10635 /* Used to the online set of cpus does not change
10636 * during assign_irq_vector.
10637 @@ -1036,7 +1036,7 @@ void lock_vector_lock(void)
10638 raw_spin_lock(&vector_lock);
10639 }
10640
10641 -void unlock_vector_lock(void)
10642 +void unlock_vector_lock(void) __releases(vector_lock)
10643 {
10644 raw_spin_unlock(&vector_lock);
10645 }
10646 @@ -2364,7 +2364,7 @@ static void ack_apic_edge(struct irq_dat
10647 ack_APIC_irq();
10648 }
10649
10650 -atomic_t irq_mis_count;
10651 +atomic_unchecked_t irq_mis_count;
10652
10653 /*
10654 * IO-APIC versions below 0x20 don't support EOI register.
10655 @@ -2472,7 +2472,7 @@ static void ack_apic_level(struct irq_da
10656 * at the cpu.
10657 */
10658 if (!(v & (1 << (i & 0x1f)))) {
10659 - atomic_inc(&irq_mis_count);
10660 + atomic_inc_unchecked(&irq_mis_count);
10661
10662 eoi_ioapic_irq(irq, cfg);
10663 }
10664 diff -urNp linux-3.0.7/arch/x86/kernel/apm_32.c linux-3.0.7/arch/x86/kernel/apm_32.c
10665 --- linux-3.0.7/arch/x86/kernel/apm_32.c 2011-07-21 22:17:23.000000000 -0400
10666 +++ linux-3.0.7/arch/x86/kernel/apm_32.c 2011-08-23 21:47:55.000000000 -0400
10667 @@ -413,7 +413,7 @@ static DEFINE_MUTEX(apm_mutex);
10668 * This is for buggy BIOS's that refer to (real mode) segment 0x40
10669 * even though they are called in protected mode.
10670 */
10671 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
10672 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
10673 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
10674
10675 static const char driver_version[] = "1.16ac"; /* no spaces */
10676 @@ -591,7 +591,10 @@ static long __apm_bios_call(void *_call)
10677 BUG_ON(cpu != 0);
10678 gdt = get_cpu_gdt_table(cpu);
10679 save_desc_40 = gdt[0x40 / 8];
10680 +
10681 + pax_open_kernel();
10682 gdt[0x40 / 8] = bad_bios_desc;
10683 + pax_close_kernel();
10684
10685 apm_irq_save(flags);
10686 APM_DO_SAVE_SEGS;
10687 @@ -600,7 +603,11 @@ static long __apm_bios_call(void *_call)
10688 &call->esi);
10689 APM_DO_RESTORE_SEGS;
10690 apm_irq_restore(flags);
10691 +
10692 + pax_open_kernel();
10693 gdt[0x40 / 8] = save_desc_40;
10694 + pax_close_kernel();
10695 +
10696 put_cpu();
10697
10698 return call->eax & 0xff;
10699 @@ -667,7 +674,10 @@ static long __apm_bios_call_simple(void
10700 BUG_ON(cpu != 0);
10701 gdt = get_cpu_gdt_table(cpu);
10702 save_desc_40 = gdt[0x40 / 8];
10703 +
10704 + pax_open_kernel();
10705 gdt[0x40 / 8] = bad_bios_desc;
10706 + pax_close_kernel();
10707
10708 apm_irq_save(flags);
10709 APM_DO_SAVE_SEGS;
10710 @@ -675,7 +685,11 @@ static long __apm_bios_call_simple(void
10711 &call->eax);
10712 APM_DO_RESTORE_SEGS;
10713 apm_irq_restore(flags);
10714 +
10715 + pax_open_kernel();
10716 gdt[0x40 / 8] = save_desc_40;
10717 + pax_close_kernel();
10718 +
10719 put_cpu();
10720 return error;
10721 }
10722 @@ -2349,12 +2363,15 @@ static int __init apm_init(void)
10723 * code to that CPU.
10724 */
10725 gdt = get_cpu_gdt_table(0);
10726 +
10727 + pax_open_kernel();
10728 set_desc_base(&gdt[APM_CS >> 3],
10729 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
10730 set_desc_base(&gdt[APM_CS_16 >> 3],
10731 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
10732 set_desc_base(&gdt[APM_DS >> 3],
10733 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
10734 + pax_close_kernel();
10735
10736 proc_create("apm", 0, NULL, &apm_file_ops);
10737
10738 diff -urNp linux-3.0.7/arch/x86/kernel/asm-offsets_64.c linux-3.0.7/arch/x86/kernel/asm-offsets_64.c
10739 --- linux-3.0.7/arch/x86/kernel/asm-offsets_64.c 2011-07-21 22:17:23.000000000 -0400
10740 +++ linux-3.0.7/arch/x86/kernel/asm-offsets_64.c 2011-08-23 21:47:55.000000000 -0400
10741 @@ -69,6 +69,7 @@ int main(void)
10742 BLANK();
10743 #undef ENTRY
10744
10745 + DEFINE(TSS_size, sizeof(struct tss_struct));
10746 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
10747 BLANK();
10748
10749 diff -urNp linux-3.0.7/arch/x86/kernel/asm-offsets.c linux-3.0.7/arch/x86/kernel/asm-offsets.c
10750 --- linux-3.0.7/arch/x86/kernel/asm-offsets.c 2011-07-21 22:17:23.000000000 -0400
10751 +++ linux-3.0.7/arch/x86/kernel/asm-offsets.c 2011-08-23 21:47:55.000000000 -0400
10752 @@ -33,6 +33,8 @@ void common(void) {
10753 OFFSET(TI_status, thread_info, status);
10754 OFFSET(TI_addr_limit, thread_info, addr_limit);
10755 OFFSET(TI_preempt_count, thread_info, preempt_count);
10756 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
10757 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
10758
10759 BLANK();
10760 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
10761 @@ -53,8 +55,26 @@ void common(void) {
10762 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10763 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10764 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
10765 +
10766 +#ifdef CONFIG_PAX_KERNEXEC
10767 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10768 +#endif
10769 +
10770 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10771 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
10772 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
10773 +#ifdef CONFIG_X86_64
10774 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
10775 +#endif
10776 #endif
10777
10778 +#endif
10779 +
10780 + BLANK();
10781 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
10782 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
10783 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
10784 +
10785 #ifdef CONFIG_XEN
10786 BLANK();
10787 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
10788 diff -urNp linux-3.0.7/arch/x86/kernel/cpu/amd.c linux-3.0.7/arch/x86/kernel/cpu/amd.c
10789 --- linux-3.0.7/arch/x86/kernel/cpu/amd.c 2011-07-21 22:17:23.000000000 -0400
10790 +++ linux-3.0.7/arch/x86/kernel/cpu/amd.c 2011-08-23 21:47:55.000000000 -0400
10791 @@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
10792 unsigned int size)
10793 {
10794 /* AMD errata T13 (order #21922) */
10795 - if ((c->x86 == 6)) {
10796 + if (c->x86 == 6) {
10797 /* Duron Rev A0 */
10798 if (c->x86_model == 3 && c->x86_mask == 0)
10799 size = 64;
10800 diff -urNp linux-3.0.7/arch/x86/kernel/cpu/common.c linux-3.0.7/arch/x86/kernel/cpu/common.c
10801 --- linux-3.0.7/arch/x86/kernel/cpu/common.c 2011-07-21 22:17:23.000000000 -0400
10802 +++ linux-3.0.7/arch/x86/kernel/cpu/common.c 2011-08-23 21:47:55.000000000 -0400
10803 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10804
10805 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10806
10807 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10808 -#ifdef CONFIG_X86_64
10809 - /*
10810 - * We need valid kernel segments for data and code in long mode too
10811 - * IRET will check the segment types kkeil 2000/10/28
10812 - * Also sysret mandates a special GDT layout
10813 - *
10814 - * TLS descriptors are currently at a different place compared to i386.
10815 - * Hopefully nobody expects them at a fixed place (Wine?)
10816 - */
10817 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10818 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10819 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10820 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10821 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10822 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10823 -#else
10824 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10825 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10826 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10827 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10828 - /*
10829 - * Segments used for calling PnP BIOS have byte granularity.
10830 - * They code segments and data segments have fixed 64k limits,
10831 - * the transfer segment sizes are set at run time.
10832 - */
10833 - /* 32-bit code */
10834 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10835 - /* 16-bit code */
10836 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10837 - /* 16-bit data */
10838 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10839 - /* 16-bit data */
10840 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
10841 - /* 16-bit data */
10842 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
10843 - /*
10844 - * The APM segments have byte granularity and their bases
10845 - * are set at run time. All have 64k limits.
10846 - */
10847 - /* 32-bit code */
10848 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10849 - /* 16-bit code */
10850 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10851 - /* data */
10852 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10853 -
10854 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10855 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10856 - GDT_STACK_CANARY_INIT
10857 -#endif
10858 -} };
10859 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10860 -
10861 static int __init x86_xsave_setup(char *s)
10862 {
10863 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10864 @@ -371,7 +317,7 @@ void switch_to_new_gdt(int cpu)
10865 {
10866 struct desc_ptr gdt_descr;
10867
10868 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10869 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10870 gdt_descr.size = GDT_SIZE - 1;
10871 load_gdt(&gdt_descr);
10872 /* Reload the per-cpu base */
10873 @@ -840,6 +786,10 @@ static void __cpuinit identify_cpu(struc
10874 /* Filter out anything that depends on CPUID levels we don't have */
10875 filter_cpuid_features(c, true);
10876
10877 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10878 + setup_clear_cpu_cap(X86_FEATURE_SEP);
10879 +#endif
10880 +
10881 /* If the model name is still unset, do table lookup. */
10882 if (!c->x86_model_id[0]) {
10883 const char *p;
10884 @@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(cha
10885 }
10886 __setup("clearcpuid=", setup_disablecpuid);
10887
10888 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
10889 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
10890 +
10891 #ifdef CONFIG_X86_64
10892 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
10893
10894 @@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
10895 EXPORT_PER_CPU_SYMBOL(current_task);
10896
10897 DEFINE_PER_CPU(unsigned long, kernel_stack) =
10898 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
10899 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
10900 EXPORT_PER_CPU_SYMBOL(kernel_stack);
10901
10902 DEFINE_PER_CPU(char *, irq_stack_ptr) =
10903 @@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(str
10904 {
10905 memset(regs, 0, sizeof(struct pt_regs));
10906 regs->fs = __KERNEL_PERCPU;
10907 - regs->gs = __KERNEL_STACK_CANARY;
10908 + savesegment(gs, regs->gs);
10909
10910 return regs;
10911 }
10912 @@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void)
10913 int i;
10914
10915 cpu = stack_smp_processor_id();
10916 - t = &per_cpu(init_tss, cpu);
10917 + t = init_tss + cpu;
10918 oist = &per_cpu(orig_ist, cpu);
10919
10920 #ifdef CONFIG_NUMA
10921 @@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void)
10922 switch_to_new_gdt(cpu);
10923 loadsegment(fs, 0);
10924
10925 - load_idt((const struct desc_ptr *)&idt_descr);
10926 + load_idt(&idt_descr);
10927
10928 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10929 syscall_init();
10930 @@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void)
10931 wrmsrl(MSR_KERNEL_GS_BASE, 0);
10932 barrier();
10933
10934 - x86_configure_nx();
10935 if (cpu != 0)
10936 enable_x2apic();
10937
10938 @@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void)
10939 {
10940 int cpu = smp_processor_id();
10941 struct task_struct *curr = current;
10942 - struct tss_struct *t = &per_cpu(init_tss, cpu);
10943 + struct tss_struct *t = init_tss + cpu;
10944 struct thread_struct *thread = &curr->thread;
10945
10946 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
10947 diff -urNp linux-3.0.7/arch/x86/kernel/cpu/intel.c linux-3.0.7/arch/x86/kernel/cpu/intel.c
10948 --- linux-3.0.7/arch/x86/kernel/cpu/intel.c 2011-09-02 18:11:26.000000000 -0400
10949 +++ linux-3.0.7/arch/x86/kernel/cpu/intel.c 2011-08-29 23:30:14.000000000 -0400
10950 @@ -172,7 +172,7 @@ static void __cpuinit trap_init_f00f_bug
10951 * Update the IDT descriptor and reload the IDT so that
10952 * it uses the read-only mapped virtual address.
10953 */
10954 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
10955 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
10956 load_idt(&idt_descr);
10957 }
10958 #endif
10959 diff -urNp linux-3.0.7/arch/x86/kernel/cpu/Makefile linux-3.0.7/arch/x86/kernel/cpu/Makefile
10960 --- linux-3.0.7/arch/x86/kernel/cpu/Makefile 2011-07-21 22:17:23.000000000 -0400
10961 +++ linux-3.0.7/arch/x86/kernel/cpu/Makefile 2011-08-23 21:47:55.000000000 -0400
10962 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
10963 CFLAGS_REMOVE_perf_event.o = -pg
10964 endif
10965
10966 -# Make sure load_percpu_segment has no stackprotector
10967 -nostackp := $(call cc-option, -fno-stack-protector)
10968 -CFLAGS_common.o := $(nostackp)
10969 -
10970 obj-y := intel_cacheinfo.o scattered.o topology.o
10971 obj-y += proc.o capflags.o powerflags.o common.o
10972 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
10973 diff -urNp linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce.c linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce.c
10974 --- linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce.c 2011-07-21 22:17:23.000000000 -0400
10975 +++ linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce.c 2011-08-23 21:47:55.000000000 -0400
10976 @@ -46,6 +46,7 @@
10977 #include <asm/ipi.h>
10978 #include <asm/mce.h>
10979 #include <asm/msr.h>
10980 +#include <asm/local.h>
10981
10982 #include "mce-internal.h"
10983
10984 @@ -208,7 +209,7 @@ static void print_mce(struct mce *m)
10985 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
10986 m->cs, m->ip);
10987
10988 - if (m->cs == __KERNEL_CS)
10989 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
10990 print_symbol("{%s}", m->ip);
10991 pr_cont("\n");
10992 }
10993 @@ -236,10 +237,10 @@ static void print_mce(struct mce *m)
10994
10995 #define PANIC_TIMEOUT 5 /* 5 seconds */
10996
10997 -static atomic_t mce_paniced;
10998 +static atomic_unchecked_t mce_paniced;
10999
11000 static int fake_panic;
11001 -static atomic_t mce_fake_paniced;
11002 +static atomic_unchecked_t mce_fake_paniced;
11003
11004 /* Panic in progress. Enable interrupts and wait for final IPI */
11005 static void wait_for_panic(void)
11006 @@ -263,7 +264,7 @@ static void mce_panic(char *msg, struct
11007 /*
11008 * Make sure only one CPU runs in machine check panic
11009 */
11010 - if (atomic_inc_return(&mce_paniced) > 1)
11011 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
11012 wait_for_panic();
11013 barrier();
11014
11015 @@ -271,7 +272,7 @@ static void mce_panic(char *msg, struct
11016 console_verbose();
11017 } else {
11018 /* Don't log too much for fake panic */
11019 - if (atomic_inc_return(&mce_fake_paniced) > 1)
11020 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
11021 return;
11022 }
11023 /* First print corrected ones that are still unlogged */
11024 @@ -638,7 +639,7 @@ static int mce_timed_out(u64 *t)
11025 * might have been modified by someone else.
11026 */
11027 rmb();
11028 - if (atomic_read(&mce_paniced))
11029 + if (atomic_read_unchecked(&mce_paniced))
11030 wait_for_panic();
11031 if (!monarch_timeout)
11032 goto out;
11033 @@ -1452,14 +1453,14 @@ void __cpuinit mcheck_cpu_init(struct cp
11034 */
11035
11036 static DEFINE_SPINLOCK(mce_state_lock);
11037 -static int open_count; /* #times opened */
11038 +static local_t open_count; /* #times opened */
11039 static int open_exclu; /* already open exclusive? */
11040
11041 static int mce_open(struct inode *inode, struct file *file)
11042 {
11043 spin_lock(&mce_state_lock);
11044
11045 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
11046 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
11047 spin_unlock(&mce_state_lock);
11048
11049 return -EBUSY;
11050 @@ -1467,7 +1468,7 @@ static int mce_open(struct inode *inode,
11051
11052 if (file->f_flags & O_EXCL)
11053 open_exclu = 1;
11054 - open_count++;
11055 + local_inc(&open_count);
11056
11057 spin_unlock(&mce_state_lock);
11058
11059 @@ -1478,7 +1479,7 @@ static int mce_release(struct inode *ino
11060 {
11061 spin_lock(&mce_state_lock);
11062
11063 - open_count--;
11064 + local_dec(&open_count);
11065 open_exclu = 0;
11066
11067 spin_unlock(&mce_state_lock);
11068 @@ -2163,7 +2164,7 @@ struct dentry *mce_get_debugfs_dir(void)
11069 static void mce_reset(void)
11070 {
11071 cpu_missing = 0;
11072 - atomic_set(&mce_fake_paniced, 0);
11073 + atomic_set_unchecked(&mce_fake_paniced, 0);
11074 atomic_set(&mce_executing, 0);
11075 atomic_set(&mce_callin, 0);
11076 atomic_set(&global_nwo, 0);
11077 diff -urNp linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce-inject.c
11078 --- linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-07-21 22:17:23.000000000 -0400
11079 +++ linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-23 21:47:55.000000000 -0400
11080 @@ -215,7 +215,9 @@ static int inject_init(void)
11081 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
11082 return -ENOMEM;
11083 printk(KERN_INFO "Machine check injector initialized\n");
11084 - mce_chrdev_ops.write = mce_write;
11085 + pax_open_kernel();
11086 + *(void **)&mce_chrdev_ops.write = mce_write;
11087 + pax_close_kernel();
11088 register_die_notifier(&mce_raise_nb);
11089 return 0;
11090 }
11091 diff -urNp linux-3.0.7/arch/x86/kernel/cpu/mtrr/main.c linux-3.0.7/arch/x86/kernel/cpu/mtrr/main.c
11092 --- linux-3.0.7/arch/x86/kernel/cpu/mtrr/main.c 2011-09-02 18:11:26.000000000 -0400
11093 +++ linux-3.0.7/arch/x86/kernel/cpu/mtrr/main.c 2011-08-29 23:26:21.000000000 -0400
11094 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
11095 u64 size_or_mask, size_and_mask;
11096 static bool mtrr_aps_delayed_init;
11097
11098 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
11099 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
11100
11101 const struct mtrr_ops *mtrr_if;
11102
11103 diff -urNp linux-3.0.7/arch/x86/kernel/cpu/mtrr/mtrr.h linux-3.0.7/arch/x86/kernel/cpu/mtrr/mtrr.h
11104 --- linux-3.0.7/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-07-21 22:17:23.000000000 -0400
11105 +++ linux-3.0.7/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-08-26 19:49:56.000000000 -0400
11106 @@ -25,7 +25,7 @@ struct mtrr_ops {
11107 int (*validate_add_page)(unsigned long base, unsigned long size,
11108 unsigned int type);
11109 int (*have_wrcomb)(void);
11110 -};
11111 +} __do_const;
11112
11113 extern int generic_get_free_region(unsigned long base, unsigned long size,
11114 int replace_reg);
11115 diff -urNp linux-3.0.7/arch/x86/kernel/cpu/perf_event.c linux-3.0.7/arch/x86/kernel/cpu/perf_event.c
11116 --- linux-3.0.7/arch/x86/kernel/cpu/perf_event.c 2011-10-16 21:54:53.000000000 -0400
11117 +++ linux-3.0.7/arch/x86/kernel/cpu/perf_event.c 2011-10-16 21:55:27.000000000 -0400
11118 @@ -781,6 +781,8 @@ static int x86_schedule_events(struct cp
11119 int i, j, w, wmax, num = 0;
11120 struct hw_perf_event *hwc;
11121
11122 + pax_track_stack();
11123 +
11124 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
11125
11126 for (i = 0; i < n; i++) {
11127 @@ -1875,7 +1877,7 @@ perf_callchain_user(struct perf_callchai
11128 break;
11129
11130 perf_callchain_store(entry, frame.return_address);
11131 - fp = frame.next_frame;
11132 + fp = (const void __force_user *)frame.next_frame;
11133 }
11134 }
11135
11136 diff -urNp linux-3.0.7/arch/x86/kernel/crash.c linux-3.0.7/arch/x86/kernel/crash.c
11137 --- linux-3.0.7/arch/x86/kernel/crash.c 2011-07-21 22:17:23.000000000 -0400
11138 +++ linux-3.0.7/arch/x86/kernel/crash.c 2011-08-23 21:47:55.000000000 -0400
11139 @@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
11140 regs = args->regs;
11141
11142 #ifdef CONFIG_X86_32
11143 - if (!user_mode_vm(regs)) {
11144 + if (!user_mode(regs)) {
11145 crash_fixup_ss_esp(&fixed_regs, regs);
11146 regs = &fixed_regs;
11147 }
11148 diff -urNp linux-3.0.7/arch/x86/kernel/doublefault_32.c linux-3.0.7/arch/x86/kernel/doublefault_32.c
11149 --- linux-3.0.7/arch/x86/kernel/doublefault_32.c 2011-07-21 22:17:23.000000000 -0400
11150 +++ linux-3.0.7/arch/x86/kernel/doublefault_32.c 2011-08-23 21:47:55.000000000 -0400
11151 @@ -11,7 +11,7 @@
11152
11153 #define DOUBLEFAULT_STACKSIZE (1024)
11154 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
11155 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
11156 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
11157
11158 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
11159
11160 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
11161 unsigned long gdt, tss;
11162
11163 store_gdt(&gdt_desc);
11164 - gdt = gdt_desc.address;
11165 + gdt = (unsigned long)gdt_desc.address;
11166
11167 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
11168
11169 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
11170 /* 0x2 bit is always set */
11171 .flags = X86_EFLAGS_SF | 0x2,
11172 .sp = STACK_START,
11173 - .es = __USER_DS,
11174 + .es = __KERNEL_DS,
11175 .cs = __KERNEL_CS,
11176 .ss = __KERNEL_DS,
11177 - .ds = __USER_DS,
11178 + .ds = __KERNEL_DS,
11179 .fs = __KERNEL_PERCPU,
11180
11181 .__cr3 = __pa_nodebug(swapper_pg_dir),
11182 diff -urNp linux-3.0.7/arch/x86/kernel/dumpstack_32.c linux-3.0.7/arch/x86/kernel/dumpstack_32.c
11183 --- linux-3.0.7/arch/x86/kernel/dumpstack_32.c 2011-07-21 22:17:23.000000000 -0400
11184 +++ linux-3.0.7/arch/x86/kernel/dumpstack_32.c 2011-08-23 21:47:55.000000000 -0400
11185 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
11186 bp = stack_frame(task, regs);
11187
11188 for (;;) {
11189 - struct thread_info *context;
11190 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11191
11192 - context = (struct thread_info *)
11193 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
11194 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
11195 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11196
11197 - stack = (unsigned long *)context->previous_esp;
11198 - if (!stack)
11199 + if (stack_start == task_stack_page(task))
11200 break;
11201 + stack = *(unsigned long **)stack_start;
11202 if (ops->stack(data, "IRQ") < 0)
11203 break;
11204 touch_nmi_watchdog();
11205 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
11206 * When in-kernel, we also print out the stack and code at the
11207 * time of the fault..
11208 */
11209 - if (!user_mode_vm(regs)) {
11210 + if (!user_mode(regs)) {
11211 unsigned int code_prologue = code_bytes * 43 / 64;
11212 unsigned int code_len = code_bytes;
11213 unsigned char c;
11214 u8 *ip;
11215 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
11216
11217 printk(KERN_EMERG "Stack:\n");
11218 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
11219
11220 printk(KERN_EMERG "Code: ");
11221
11222 - ip = (u8 *)regs->ip - code_prologue;
11223 + ip = (u8 *)regs->ip - code_prologue + cs_base;
11224 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
11225 /* try starting at IP */
11226 - ip = (u8 *)regs->ip;
11227 + ip = (u8 *)regs->ip + cs_base;
11228 code_len = code_len - code_prologue + 1;
11229 }
11230 for (i = 0; i < code_len; i++, ip++) {
11231 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
11232 printk(" Bad EIP value.");
11233 break;
11234 }
11235 - if (ip == (u8 *)regs->ip)
11236 + if (ip == (u8 *)regs->ip + cs_base)
11237 printk("<%02x> ", c);
11238 else
11239 printk("%02x ", c);
11240 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11241 {
11242 unsigned short ud2;
11243
11244 + ip = ktla_ktva(ip);
11245 if (ip < PAGE_OFFSET)
11246 return 0;
11247 if (probe_kernel_address((unsigned short *)ip, ud2))
11248 diff -urNp linux-3.0.7/arch/x86/kernel/dumpstack_64.c linux-3.0.7/arch/x86/kernel/dumpstack_64.c
11249 --- linux-3.0.7/arch/x86/kernel/dumpstack_64.c 2011-07-21 22:17:23.000000000 -0400
11250 +++ linux-3.0.7/arch/x86/kernel/dumpstack_64.c 2011-08-23 21:47:55.000000000 -0400
11251 @@ -147,9 +147,9 @@ void dump_trace(struct task_struct *task
11252 unsigned long *irq_stack_end =
11253 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11254 unsigned used = 0;
11255 - struct thread_info *tinfo;
11256 int graph = 0;
11257 unsigned long dummy;
11258 + void *stack_start;
11259
11260 if (!task)
11261 task = current;
11262 @@ -167,10 +167,10 @@ void dump_trace(struct task_struct *task
11263 * current stack address. If the stacks consist of nested
11264 * exceptions
11265 */
11266 - tinfo = task_thread_info(task);
11267 for (;;) {
11268 char *id;
11269 unsigned long *estack_end;
11270 +
11271 estack_end = in_exception_stack(cpu, (unsigned long)stack,
11272 &used, &id);
11273
11274 @@ -178,7 +178,7 @@ void dump_trace(struct task_struct *task
11275 if (ops->stack(data, id) < 0)
11276 break;
11277
11278 - bp = ops->walk_stack(tinfo, stack, bp, ops,
11279 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11280 data, estack_end, &graph);
11281 ops->stack(data, "<EOE>");
11282 /*
11283 @@ -197,7 +197,7 @@ void dump_trace(struct task_struct *task
11284 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11285 if (ops->stack(data, "IRQ") < 0)
11286 break;
11287 - bp = ops->walk_stack(tinfo, stack, bp,
11288 + bp = ops->walk_stack(task, irq_stack, stack, bp,
11289 ops, data, irq_stack_end, &graph);
11290 /*
11291 * We link to the next stack (which would be
11292 @@ -218,7 +218,8 @@ void dump_trace(struct task_struct *task
11293 /*
11294 * This handles the process stack:
11295 */
11296 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11297 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11298 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11299 put_cpu();
11300 }
11301 EXPORT_SYMBOL(dump_trace);
11302 diff -urNp linux-3.0.7/arch/x86/kernel/dumpstack.c linux-3.0.7/arch/x86/kernel/dumpstack.c
11303 --- linux-3.0.7/arch/x86/kernel/dumpstack.c 2011-07-21 22:17:23.000000000 -0400
11304 +++ linux-3.0.7/arch/x86/kernel/dumpstack.c 2011-08-23 21:48:14.000000000 -0400
11305 @@ -2,6 +2,9 @@
11306 * Copyright (C) 1991, 1992 Linus Torvalds
11307 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11308 */
11309 +#ifdef CONFIG_GRKERNSEC_HIDESYM
11310 +#define __INCLUDED_BY_HIDESYM 1
11311 +#endif
11312 #include <linux/kallsyms.h>
11313 #include <linux/kprobes.h>
11314 #include <linux/uaccess.h>
11315 @@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11316 static void
11317 print_ftrace_graph_addr(unsigned long addr, void *data,
11318 const struct stacktrace_ops *ops,
11319 - struct thread_info *tinfo, int *graph)
11320 + struct task_struct *task, int *graph)
11321 {
11322 - struct task_struct *task = tinfo->task;
11323 unsigned long ret_addr;
11324 int index = task->curr_ret_stack;
11325
11326 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11327 static inline void
11328 print_ftrace_graph_addr(unsigned long addr, void *data,
11329 const struct stacktrace_ops *ops,
11330 - struct thread_info *tinfo, int *graph)
11331 + struct task_struct *task, int *graph)
11332 { }
11333 #endif
11334
11335 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11336 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11337 */
11338
11339 -static inline int valid_stack_ptr(struct thread_info *tinfo,
11340 - void *p, unsigned int size, void *end)
11341 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11342 {
11343 - void *t = tinfo;
11344 if (end) {
11345 if (p < end && p >= (end-THREAD_SIZE))
11346 return 1;
11347 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11348 }
11349
11350 unsigned long
11351 -print_context_stack(struct thread_info *tinfo,
11352 +print_context_stack(struct task_struct *task, void *stack_start,
11353 unsigned long *stack, unsigned long bp,
11354 const struct stacktrace_ops *ops, void *data,
11355 unsigned long *end, int *graph)
11356 {
11357 struct stack_frame *frame = (struct stack_frame *)bp;
11358
11359 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11360 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11361 unsigned long addr;
11362
11363 addr = *stack;
11364 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11365 } else {
11366 ops->address(data, addr, 0);
11367 }
11368 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11369 + print_ftrace_graph_addr(addr, data, ops, task, graph);
11370 }
11371 stack++;
11372 }
11373 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11374 EXPORT_SYMBOL_GPL(print_context_stack);
11375
11376 unsigned long
11377 -print_context_stack_bp(struct thread_info *tinfo,
11378 +print_context_stack_bp(struct task_struct *task, void *stack_start,
11379 unsigned long *stack, unsigned long bp,
11380 const struct stacktrace_ops *ops, void *data,
11381 unsigned long *end, int *graph)
11382 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11383 struct stack_frame *frame = (struct stack_frame *)bp;
11384 unsigned long *ret_addr = &frame->return_address;
11385
11386 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11387 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11388 unsigned long addr = *ret_addr;
11389
11390 if (!__kernel_text_address(addr))
11391 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11392 ops->address(data, addr, 1);
11393 frame = frame->next_frame;
11394 ret_addr = &frame->return_address;
11395 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11396 + print_ftrace_graph_addr(addr, data, ops, task, graph);
11397 }
11398
11399 return (unsigned long)frame;
11400 @@ -186,7 +186,7 @@ void dump_stack(void)
11401
11402 bp = stack_frame(current, NULL);
11403 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
11404 - current->pid, current->comm, print_tainted(),
11405 + task_pid_nr(current), current->comm, print_tainted(),
11406 init_utsname()->release,
11407 (int)strcspn(init_utsname()->version, " "),
11408 init_utsname()->version);
11409 @@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
11410 }
11411 EXPORT_SYMBOL_GPL(oops_begin);
11412
11413 +extern void gr_handle_kernel_exploit(void);
11414 +
11415 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11416 {
11417 if (regs && kexec_should_crash(current))
11418 @@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long fl
11419 panic("Fatal exception in interrupt");
11420 if (panic_on_oops)
11421 panic("Fatal exception");
11422 - do_exit(signr);
11423 +
11424 + gr_handle_kernel_exploit();
11425 +
11426 + do_group_exit(signr);
11427 }
11428
11429 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11430 @@ -269,7 +274,7 @@ int __kprobes __die(const char *str, str
11431
11432 show_registers(regs);
11433 #ifdef CONFIG_X86_32
11434 - if (user_mode_vm(regs)) {
11435 + if (user_mode(regs)) {
11436 sp = regs->sp;
11437 ss = regs->ss & 0xffff;
11438 } else {
11439 @@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs
11440 unsigned long flags = oops_begin();
11441 int sig = SIGSEGV;
11442
11443 - if (!user_mode_vm(regs))
11444 + if (!user_mode(regs))
11445 report_bug(regs->ip, regs);
11446
11447 if (__die(str, regs, err))
11448 diff -urNp linux-3.0.7/arch/x86/kernel/early_printk.c linux-3.0.7/arch/x86/kernel/early_printk.c
11449 --- linux-3.0.7/arch/x86/kernel/early_printk.c 2011-07-21 22:17:23.000000000 -0400
11450 +++ linux-3.0.7/arch/x86/kernel/early_printk.c 2011-08-23 21:48:14.000000000 -0400
11451 @@ -7,6 +7,7 @@
11452 #include <linux/pci_regs.h>
11453 #include <linux/pci_ids.h>
11454 #include <linux/errno.h>
11455 +#include <linux/sched.h>
11456 #include <asm/io.h>
11457 #include <asm/processor.h>
11458 #include <asm/fcntl.h>
11459 @@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
11460 int n;
11461 va_list ap;
11462
11463 + pax_track_stack();
11464 +
11465 va_start(ap, fmt);
11466 n = vscnprintf(buf, sizeof(buf), fmt, ap);
11467 early_console->write(early_console, buf, n);
11468 diff -urNp linux-3.0.7/arch/x86/kernel/entry_32.S linux-3.0.7/arch/x86/kernel/entry_32.S
11469 --- linux-3.0.7/arch/x86/kernel/entry_32.S 2011-07-21 22:17:23.000000000 -0400
11470 +++ linux-3.0.7/arch/x86/kernel/entry_32.S 2011-08-30 18:23:52.000000000 -0400
11471 @@ -185,13 +185,146 @@
11472 /*CFI_REL_OFFSET gs, PT_GS*/
11473 .endm
11474 .macro SET_KERNEL_GS reg
11475 +
11476 +#ifdef CONFIG_CC_STACKPROTECTOR
11477 movl $(__KERNEL_STACK_CANARY), \reg
11478 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11479 + movl $(__USER_DS), \reg
11480 +#else
11481 + xorl \reg, \reg
11482 +#endif
11483 +
11484 movl \reg, %gs
11485 .endm
11486
11487 #endif /* CONFIG_X86_32_LAZY_GS */
11488
11489 -.macro SAVE_ALL
11490 +.macro pax_enter_kernel
11491 +#ifdef CONFIG_PAX_KERNEXEC
11492 + call pax_enter_kernel
11493 +#endif
11494 +.endm
11495 +
11496 +.macro pax_exit_kernel
11497 +#ifdef CONFIG_PAX_KERNEXEC
11498 + call pax_exit_kernel
11499 +#endif
11500 +.endm
11501 +
11502 +#ifdef CONFIG_PAX_KERNEXEC
11503 +ENTRY(pax_enter_kernel)
11504 +#ifdef CONFIG_PARAVIRT
11505 + pushl %eax
11506 + pushl %ecx
11507 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11508 + mov %eax, %esi
11509 +#else
11510 + mov %cr0, %esi
11511 +#endif
11512 + bts $16, %esi
11513 + jnc 1f
11514 + mov %cs, %esi
11515 + cmp $__KERNEL_CS, %esi
11516 + jz 3f
11517 + ljmp $__KERNEL_CS, $3f
11518 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
11519 +2:
11520 +#ifdef CONFIG_PARAVIRT
11521 + mov %esi, %eax
11522 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11523 +#else
11524 + mov %esi, %cr0
11525 +#endif
11526 +3:
11527 +#ifdef CONFIG_PARAVIRT
11528 + popl %ecx
11529 + popl %eax
11530 +#endif
11531 + ret
11532 +ENDPROC(pax_enter_kernel)
11533 +
11534 +ENTRY(pax_exit_kernel)
11535 +#ifdef CONFIG_PARAVIRT
11536 + pushl %eax
11537 + pushl %ecx
11538 +#endif
11539 + mov %cs, %esi
11540 + cmp $__KERNEXEC_KERNEL_CS, %esi
11541 + jnz 2f
11542 +#ifdef CONFIG_PARAVIRT
11543 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11544 + mov %eax, %esi
11545 +#else
11546 + mov %cr0, %esi
11547 +#endif
11548 + btr $16, %esi
11549 + ljmp $__KERNEL_CS, $1f
11550 +1:
11551 +#ifdef CONFIG_PARAVIRT
11552 + mov %esi, %eax
11553 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11554 +#else
11555 + mov %esi, %cr0
11556 +#endif
11557 +2:
11558 +#ifdef CONFIG_PARAVIRT
11559 + popl %ecx
11560 + popl %eax
11561 +#endif
11562 + ret
11563 +ENDPROC(pax_exit_kernel)
11564 +#endif
11565 +
11566 +.macro pax_erase_kstack
11567 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11568 + call pax_erase_kstack
11569 +#endif
11570 +.endm
11571 +
11572 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11573 +/*
11574 + * ebp: thread_info
11575 + * ecx, edx: can be clobbered
11576 + */
11577 +ENTRY(pax_erase_kstack)
11578 + pushl %edi
11579 + pushl %eax
11580 +
11581 + mov TI_lowest_stack(%ebp), %edi
11582 + mov $-0xBEEF, %eax
11583 + std
11584 +
11585 +1: mov %edi, %ecx
11586 + and $THREAD_SIZE_asm - 1, %ecx
11587 + shr $2, %ecx
11588 + repne scasl
11589 + jecxz 2f
11590 +
11591 + cmp $2*16, %ecx
11592 + jc 2f
11593 +
11594 + mov $2*16, %ecx
11595 + repe scasl
11596 + jecxz 2f
11597 + jne 1b
11598 +
11599 +2: cld
11600 + mov %esp, %ecx
11601 + sub %edi, %ecx
11602 + shr $2, %ecx
11603 + rep stosl
11604 +
11605 + mov TI_task_thread_sp0(%ebp), %edi
11606 + sub $128, %edi
11607 + mov %edi, TI_lowest_stack(%ebp)
11608 +
11609 + popl %eax
11610 + popl %edi
11611 + ret
11612 +ENDPROC(pax_erase_kstack)
11613 +#endif
11614 +
11615 +.macro __SAVE_ALL _DS
11616 cld
11617 PUSH_GS
11618 pushl_cfi %fs
11619 @@ -214,7 +347,7 @@
11620 CFI_REL_OFFSET ecx, 0
11621 pushl_cfi %ebx
11622 CFI_REL_OFFSET ebx, 0
11623 - movl $(__USER_DS), %edx
11624 + movl $\_DS, %edx
11625 movl %edx, %ds
11626 movl %edx, %es
11627 movl $(__KERNEL_PERCPU), %edx
11628 @@ -222,6 +355,15 @@
11629 SET_KERNEL_GS %edx
11630 .endm
11631
11632 +.macro SAVE_ALL
11633 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11634 + __SAVE_ALL __KERNEL_DS
11635 + pax_enter_kernel
11636 +#else
11637 + __SAVE_ALL __USER_DS
11638 +#endif
11639 +.endm
11640 +
11641 .macro RESTORE_INT_REGS
11642 popl_cfi %ebx
11643 CFI_RESTORE ebx
11644 @@ -332,7 +474,15 @@ check_userspace:
11645 movb PT_CS(%esp), %al
11646 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11647 cmpl $USER_RPL, %eax
11648 +
11649 +#ifdef CONFIG_PAX_KERNEXEC
11650 + jae resume_userspace
11651 +
11652 + PAX_EXIT_KERNEL
11653 + jmp resume_kernel
11654 +#else
11655 jb resume_kernel # not returning to v8086 or userspace
11656 +#endif
11657
11658 ENTRY(resume_userspace)
11659 LOCKDEP_SYS_EXIT
11660 @@ -344,7 +494,7 @@ ENTRY(resume_userspace)
11661 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
11662 # int/exception return?
11663 jne work_pending
11664 - jmp restore_all
11665 + jmp restore_all_pax
11666 END(ret_from_exception)
11667
11668 #ifdef CONFIG_PREEMPT
11669 @@ -394,23 +544,34 @@ sysenter_past_esp:
11670 /*CFI_REL_OFFSET cs, 0*/
11671 /*
11672 * Push current_thread_info()->sysenter_return to the stack.
11673 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
11674 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
11675 */
11676 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
11677 + pushl_cfi $0
11678 CFI_REL_OFFSET eip, 0
11679
11680 pushl_cfi %eax
11681 SAVE_ALL
11682 + GET_THREAD_INFO(%ebp)
11683 + movl TI_sysenter_return(%ebp),%ebp
11684 + movl %ebp,PT_EIP(%esp)
11685 ENABLE_INTERRUPTS(CLBR_NONE)
11686
11687 /*
11688 * Load the potential sixth argument from user stack.
11689 * Careful about security.
11690 */
11691 + movl PT_OLDESP(%esp),%ebp
11692 +
11693 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11694 + mov PT_OLDSS(%esp),%ds
11695 +1: movl %ds:(%ebp),%ebp
11696 + push %ss
11697 + pop %ds
11698 +#else
11699 cmpl $__PAGE_OFFSET-3,%ebp
11700 jae syscall_fault
11701 1: movl (%ebp),%ebp
11702 +#endif
11703 +
11704 movl %ebp,PT_EBP(%esp)
11705 .section __ex_table,"a"
11706 .align 4
11707 @@ -433,12 +594,24 @@ sysenter_do_call:
11708 testl $_TIF_ALLWORK_MASK, %ecx
11709 jne sysexit_audit
11710 sysenter_exit:
11711 +
11712 +#ifdef CONFIG_PAX_RANDKSTACK
11713 + pushl_cfi %eax
11714 + movl %esp, %eax
11715 + call pax_randomize_kstack
11716 + popl_cfi %eax
11717 +#endif
11718 +
11719 + pax_erase_kstack
11720 +
11721 /* if something modifies registers it must also disable sysexit */
11722 movl PT_EIP(%esp), %edx
11723 movl PT_OLDESP(%esp), %ecx
11724 xorl %ebp,%ebp
11725 TRACE_IRQS_ON
11726 1: mov PT_FS(%esp), %fs
11727 +2: mov PT_DS(%esp), %ds
11728 +3: mov PT_ES(%esp), %es
11729 PTGS_TO_GS
11730 ENABLE_INTERRUPTS_SYSEXIT
11731
11732 @@ -455,6 +628,9 @@ sysenter_audit:
11733 movl %eax,%edx /* 2nd arg: syscall number */
11734 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
11735 call audit_syscall_entry
11736 +
11737 + pax_erase_kstack
11738 +
11739 pushl_cfi %ebx
11740 movl PT_EAX(%esp),%eax /* reload syscall number */
11741 jmp sysenter_do_call
11742 @@ -481,11 +657,17 @@ sysexit_audit:
11743
11744 CFI_ENDPROC
11745 .pushsection .fixup,"ax"
11746 -2: movl $0,PT_FS(%esp)
11747 +4: movl $0,PT_FS(%esp)
11748 + jmp 1b
11749 +5: movl $0,PT_DS(%esp)
11750 + jmp 1b
11751 +6: movl $0,PT_ES(%esp)
11752 jmp 1b
11753 .section __ex_table,"a"
11754 .align 4
11755 - .long 1b,2b
11756 + .long 1b,4b
11757 + .long 2b,5b
11758 + .long 3b,6b
11759 .popsection
11760 PTGS_TO_GS_EX
11761 ENDPROC(ia32_sysenter_target)
11762 @@ -518,6 +700,15 @@ syscall_exit:
11763 testl $_TIF_ALLWORK_MASK, %ecx # current->work
11764 jne syscall_exit_work
11765
11766 +restore_all_pax:
11767 +
11768 +#ifdef CONFIG_PAX_RANDKSTACK
11769 + movl %esp, %eax
11770 + call pax_randomize_kstack
11771 +#endif
11772 +
11773 + pax_erase_kstack
11774 +
11775 restore_all:
11776 TRACE_IRQS_IRET
11777 restore_all_notrace:
11778 @@ -577,14 +768,34 @@ ldt_ss:
11779 * compensating for the offset by changing to the ESPFIX segment with
11780 * a base address that matches for the difference.
11781 */
11782 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
11783 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
11784 mov %esp, %edx /* load kernel esp */
11785 mov PT_OLDESP(%esp), %eax /* load userspace esp */
11786 mov %dx, %ax /* eax: new kernel esp */
11787 sub %eax, %edx /* offset (low word is 0) */
11788 +#ifdef CONFIG_SMP
11789 + movl PER_CPU_VAR(cpu_number), %ebx
11790 + shll $PAGE_SHIFT_asm, %ebx
11791 + addl $cpu_gdt_table, %ebx
11792 +#else
11793 + movl $cpu_gdt_table, %ebx
11794 +#endif
11795 shr $16, %edx
11796 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
11797 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
11798 +
11799 +#ifdef CONFIG_PAX_KERNEXEC
11800 + mov %cr0, %esi
11801 + btr $16, %esi
11802 + mov %esi, %cr0
11803 +#endif
11804 +
11805 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
11806 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
11807 +
11808 +#ifdef CONFIG_PAX_KERNEXEC
11809 + bts $16, %esi
11810 + mov %esi, %cr0
11811 +#endif
11812 +
11813 pushl_cfi $__ESPFIX_SS
11814 pushl_cfi %eax /* new kernel esp */
11815 /* Disable interrupts, but do not irqtrace this section: we
11816 @@ -613,29 +824,23 @@ work_resched:
11817 movl TI_flags(%ebp), %ecx
11818 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
11819 # than syscall tracing?
11820 - jz restore_all
11821 + jz restore_all_pax
11822 testb $_TIF_NEED_RESCHED, %cl
11823 jnz work_resched
11824
11825 work_notifysig: # deal with pending signals and
11826 # notify-resume requests
11827 + movl %esp, %eax
11828 #ifdef CONFIG_VM86
11829 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
11830 - movl %esp, %eax
11831 - jne work_notifysig_v86 # returning to kernel-space or
11832 + jz 1f # returning to kernel-space or
11833 # vm86-space
11834 - xorl %edx, %edx
11835 - call do_notify_resume
11836 - jmp resume_userspace_sig
11837
11838 - ALIGN
11839 -work_notifysig_v86:
11840 pushl_cfi %ecx # save ti_flags for do_notify_resume
11841 call save_v86_state # %eax contains pt_regs pointer
11842 popl_cfi %ecx
11843 movl %eax, %esp
11844 -#else
11845 - movl %esp, %eax
11846 +1:
11847 #endif
11848 xorl %edx, %edx
11849 call do_notify_resume
11850 @@ -648,6 +853,9 @@ syscall_trace_entry:
11851 movl $-ENOSYS,PT_EAX(%esp)
11852 movl %esp, %eax
11853 call syscall_trace_enter
11854 +
11855 + pax_erase_kstack
11856 +
11857 /* What it returned is what we'll actually use. */
11858 cmpl $(nr_syscalls), %eax
11859 jnae syscall_call
11860 @@ -670,6 +878,10 @@ END(syscall_exit_work)
11861
11862 RING0_INT_FRAME # can't unwind into user space anyway
11863 syscall_fault:
11864 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11865 + push %ss
11866 + pop %ds
11867 +#endif
11868 GET_THREAD_INFO(%ebp)
11869 movl $-EFAULT,PT_EAX(%esp)
11870 jmp resume_userspace
11871 @@ -752,6 +964,36 @@ ptregs_clone:
11872 CFI_ENDPROC
11873 ENDPROC(ptregs_clone)
11874
11875 + ALIGN;
11876 +ENTRY(kernel_execve)
11877 + CFI_STARTPROC
11878 + pushl_cfi %ebp
11879 + sub $PT_OLDSS+4,%esp
11880 + pushl_cfi %edi
11881 + pushl_cfi %ecx
11882 + pushl_cfi %eax
11883 + lea 3*4(%esp),%edi
11884 + mov $PT_OLDSS/4+1,%ecx
11885 + xorl %eax,%eax
11886 + rep stosl
11887 + popl_cfi %eax
11888 + popl_cfi %ecx
11889 + popl_cfi %edi
11890 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
11891 + pushl_cfi %esp
11892 + call sys_execve
11893 + add $4,%esp
11894 + CFI_ADJUST_CFA_OFFSET -4
11895 + GET_THREAD_INFO(%ebp)
11896 + test %eax,%eax
11897 + jz syscall_exit
11898 + add $PT_OLDSS+4,%esp
11899 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
11900 + popl_cfi %ebp
11901 + ret
11902 + CFI_ENDPROC
11903 +ENDPROC(kernel_execve)
11904 +
11905 .macro FIXUP_ESPFIX_STACK
11906 /*
11907 * Switch back for ESPFIX stack to the normal zerobased stack
11908 @@ -761,8 +1003,15 @@ ENDPROC(ptregs_clone)
11909 * normal stack and adjusts ESP with the matching offset.
11910 */
11911 /* fixup the stack */
11912 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
11913 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
11914 +#ifdef CONFIG_SMP
11915 + movl PER_CPU_VAR(cpu_number), %ebx
11916 + shll $PAGE_SHIFT_asm, %ebx
11917 + addl $cpu_gdt_table, %ebx
11918 +#else
11919 + movl $cpu_gdt_table, %ebx
11920 +#endif
11921 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
11922 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
11923 shl $16, %eax
11924 addl %esp, %eax /* the adjusted stack pointer */
11925 pushl_cfi $__KERNEL_DS
11926 @@ -1213,7 +1462,6 @@ return_to_handler:
11927 jmp *%ecx
11928 #endif
11929
11930 -.section .rodata,"a"
11931 #include "syscall_table_32.S"
11932
11933 syscall_table_size=(.-sys_call_table)
11934 @@ -1259,9 +1507,12 @@ error_code:
11935 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
11936 REG_TO_PTGS %ecx
11937 SET_KERNEL_GS %ecx
11938 - movl $(__USER_DS), %ecx
11939 + movl $(__KERNEL_DS), %ecx
11940 movl %ecx, %ds
11941 movl %ecx, %es
11942 +
11943 + pax_enter_kernel
11944 +
11945 TRACE_IRQS_OFF
11946 movl %esp,%eax # pt_regs pointer
11947 call *%edi
11948 @@ -1346,6 +1597,9 @@ nmi_stack_correct:
11949 xorl %edx,%edx # zero error code
11950 movl %esp,%eax # pt_regs pointer
11951 call do_nmi
11952 +
11953 + pax_exit_kernel
11954 +
11955 jmp restore_all_notrace
11956 CFI_ENDPROC
11957
11958 @@ -1382,6 +1636,9 @@ nmi_espfix_stack:
11959 FIXUP_ESPFIX_STACK # %eax == %esp
11960 xorl %edx,%edx # zero error code
11961 call do_nmi
11962 +
11963 + pax_exit_kernel
11964 +
11965 RESTORE_REGS
11966 lss 12+4(%esp), %esp # back to espfix stack
11967 CFI_ADJUST_CFA_OFFSET -24
11968 diff -urNp linux-3.0.7/arch/x86/kernel/entry_64.S linux-3.0.7/arch/x86/kernel/entry_64.S
11969 --- linux-3.0.7/arch/x86/kernel/entry_64.S 2011-07-21 22:17:23.000000000 -0400
11970 +++ linux-3.0.7/arch/x86/kernel/entry_64.S 2011-10-11 10:44:33.000000000 -0400
11971 @@ -53,6 +53,8 @@
11972 #include <asm/paravirt.h>
11973 #include <asm/ftrace.h>
11974 #include <asm/percpu.h>
11975 +#include <asm/pgtable.h>
11976 +#include <asm/alternative-asm.h>
11977
11978 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11979 #include <linux/elf-em.h>
11980 @@ -66,6 +68,7 @@
11981 #ifdef CONFIG_FUNCTION_TRACER
11982 #ifdef CONFIG_DYNAMIC_FTRACE
11983 ENTRY(mcount)
11984 + pax_force_retaddr
11985 retq
11986 END(mcount)
11987
11988 @@ -90,6 +93,7 @@ GLOBAL(ftrace_graph_call)
11989 #endif
11990
11991 GLOBAL(ftrace_stub)
11992 + pax_force_retaddr
11993 retq
11994 END(ftrace_caller)
11995
11996 @@ -110,6 +114,7 @@ ENTRY(mcount)
11997 #endif
11998
11999 GLOBAL(ftrace_stub)
12000 + pax_force_retaddr
12001 retq
12002
12003 trace:
12004 @@ -119,6 +124,7 @@ trace:
12005 movq 8(%rbp), %rsi
12006 subq $MCOUNT_INSN_SIZE, %rdi
12007
12008 + pax_force_fptr ftrace_trace_function
12009 call *ftrace_trace_function
12010
12011 MCOUNT_RESTORE_FRAME
12012 @@ -144,6 +150,7 @@ ENTRY(ftrace_graph_caller)
12013
12014 MCOUNT_RESTORE_FRAME
12015
12016 + pax_force_retaddr
12017 retq
12018 END(ftrace_graph_caller)
12019
12020 @@ -161,6 +168,7 @@ GLOBAL(return_to_handler)
12021 movq 8(%rsp), %rdx
12022 movq (%rsp), %rax
12023 addq $24, %rsp
12024 + pax_force_fptr %rdi
12025 jmp *%rdi
12026 #endif
12027
12028 @@ -176,6 +184,269 @@ ENTRY(native_usergs_sysret64)
12029 ENDPROC(native_usergs_sysret64)
12030 #endif /* CONFIG_PARAVIRT */
12031
12032 + .macro ljmpq sel, off
12033 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
12034 + .byte 0x48; ljmp *1234f(%rip)
12035 + .pushsection .rodata
12036 + .align 16
12037 + 1234: .quad \off; .word \sel
12038 + .popsection
12039 +#else
12040 + pushq $\sel
12041 + pushq $\off
12042 + lretq
12043 +#endif
12044 + .endm
12045 +
12046 + .macro pax_enter_kernel
12047 +#ifdef CONFIG_PAX_KERNEXEC
12048 + call pax_enter_kernel
12049 +#endif
12050 + .endm
12051 +
12052 + .macro pax_exit_kernel
12053 +#ifdef CONFIG_PAX_KERNEXEC
12054 + call pax_exit_kernel
12055 +#endif
12056 + .endm
12057 +
12058 +#ifdef CONFIG_PAX_KERNEXEC
12059 +ENTRY(pax_enter_kernel)
12060 + pushq %rdi
12061 +
12062 +#ifdef CONFIG_PARAVIRT
12063 + PV_SAVE_REGS(CLBR_RDI)
12064 +#endif
12065 +
12066 + GET_CR0_INTO_RDI
12067 + bts $16,%rdi
12068 + jnc 1f
12069 + mov %cs,%edi
12070 + cmp $__KERNEL_CS,%edi
12071 + jz 3f
12072 + ljmpq __KERNEL_CS,3f
12073 +1: ljmpq __KERNEXEC_KERNEL_CS,2f
12074 +2: SET_RDI_INTO_CR0
12075 +3:
12076 +
12077 +#ifdef CONFIG_PARAVIRT
12078 + PV_RESTORE_REGS(CLBR_RDI)
12079 +#endif
12080 +
12081 + popq %rdi
12082 + pax_force_retaddr
12083 + retq
12084 +ENDPROC(pax_enter_kernel)
12085 +
12086 +ENTRY(pax_exit_kernel)
12087 + pushq %rdi
12088 +
12089 +#ifdef CONFIG_PARAVIRT
12090 + PV_SAVE_REGS(CLBR_RDI)
12091 +#endif
12092 +
12093 + mov %cs,%rdi
12094 + cmp $__KERNEXEC_KERNEL_CS,%edi
12095 + jnz 2f
12096 + GET_CR0_INTO_RDI
12097 + btr $16,%rdi
12098 + ljmpq __KERNEL_CS,1f
12099 +1: SET_RDI_INTO_CR0
12100 +2:
12101 +
12102 +#ifdef CONFIG_PARAVIRT
12103 + PV_RESTORE_REGS(CLBR_RDI);
12104 +#endif
12105 +
12106 + popq %rdi
12107 + pax_force_retaddr
12108 + retq
12109 +ENDPROC(pax_exit_kernel)
12110 +#endif
12111 +
12112 + .macro pax_enter_kernel_user
12113 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12114 + call pax_enter_kernel_user
12115 +#endif
12116 + .endm
12117 +
12118 + .macro pax_exit_kernel_user
12119 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12120 + call pax_exit_kernel_user
12121 +#endif
12122 +#ifdef CONFIG_PAX_RANDKSTACK
12123 + push %rax
12124 + call pax_randomize_kstack
12125 + pop %rax
12126 +#endif
12127 + .endm
12128 +
12129 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12130 +ENTRY(pax_enter_kernel_user)
12131 + pushq %rdi
12132 + pushq %rbx
12133 +
12134 +#ifdef CONFIG_PARAVIRT
12135 + PV_SAVE_REGS(CLBR_RDI)
12136 +#endif
12137 +
12138 + GET_CR3_INTO_RDI
12139 + mov %rdi,%rbx
12140 + add $__START_KERNEL_map,%rbx
12141 + sub phys_base(%rip),%rbx
12142 +
12143 +#ifdef CONFIG_PARAVIRT
12144 + pushq %rdi
12145 + cmpl $0, pv_info+PARAVIRT_enabled
12146 + jz 1f
12147 + i = 0
12148 + .rept USER_PGD_PTRS
12149 + mov i*8(%rbx),%rsi
12150 + mov $0,%sil
12151 + lea i*8(%rbx),%rdi
12152 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
12153 + i = i + 1
12154 + .endr
12155 + jmp 2f
12156 +1:
12157 +#endif
12158 +
12159 + i = 0
12160 + .rept USER_PGD_PTRS
12161 + movb $0,i*8(%rbx)
12162 + i = i + 1
12163 + .endr
12164 +
12165 +#ifdef CONFIG_PARAVIRT
12166 +2: popq %rdi
12167 +#endif
12168 + SET_RDI_INTO_CR3
12169 +
12170 +#ifdef CONFIG_PAX_KERNEXEC
12171 + GET_CR0_INTO_RDI
12172 + bts $16,%rdi
12173 + SET_RDI_INTO_CR0
12174 +#endif
12175 +
12176 +#ifdef CONFIG_PARAVIRT
12177 + PV_RESTORE_REGS(CLBR_RDI)
12178 +#endif
12179 +
12180 + popq %rbx
12181 + popq %rdi
12182 + pax_force_retaddr
12183 + retq
12184 +ENDPROC(pax_enter_kernel_user)
12185 +
12186 +ENTRY(pax_exit_kernel_user)
12187 + push %rdi
12188 +
12189 +#ifdef CONFIG_PARAVIRT
12190 + pushq %rbx
12191 + PV_SAVE_REGS(CLBR_RDI)
12192 +#endif
12193 +
12194 +#ifdef CONFIG_PAX_KERNEXEC
12195 + GET_CR0_INTO_RDI
12196 + btr $16,%rdi
12197 + SET_RDI_INTO_CR0
12198 +#endif
12199 +
12200 + GET_CR3_INTO_RDI
12201 + add $__START_KERNEL_map,%rdi
12202 + sub phys_base(%rip),%rdi
12203 +
12204 +#ifdef CONFIG_PARAVIRT
12205 + cmpl $0, pv_info+PARAVIRT_enabled
12206 + jz 1f
12207 + mov %rdi,%rbx
12208 + i = 0
12209 + .rept USER_PGD_PTRS
12210 + mov i*8(%rbx),%rsi
12211 + mov $0x67,%sil
12212 + lea i*8(%rbx),%rdi
12213 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
12214 + i = i + 1
12215 + .endr
12216 + jmp 2f
12217 +1:
12218 +#endif
12219 +
12220 + i = 0
12221 + .rept USER_PGD_PTRS
12222 + movb $0x67,i*8(%rdi)
12223 + i = i + 1
12224 + .endr
12225 +
12226 +#ifdef CONFIG_PARAVIRT
12227 +2: PV_RESTORE_REGS(CLBR_RDI)
12228 + popq %rbx
12229 +#endif
12230 +
12231 + popq %rdi
12232 + pax_force_retaddr
12233 + retq
12234 +ENDPROC(pax_exit_kernel_user)
12235 +#endif
12236 +
12237 + .macro pax_erase_kstack
12238 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12239 + call pax_erase_kstack
12240 +#endif
12241 + .endm
12242 +
12243 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12244 +/*
12245 + * r10: thread_info
12246 + * rcx, rdx: can be clobbered
12247 + */
12248 +ENTRY(pax_erase_kstack)
12249 + pushq %rdi
12250 + pushq %rax
12251 + pushq %r10
12252 +
12253 + GET_THREAD_INFO(%r10)
12254 + mov TI_lowest_stack(%r10), %rdi
12255 + mov $-0xBEEF, %rax
12256 + std
12257 +
12258 +1: mov %edi, %ecx
12259 + and $THREAD_SIZE_asm - 1, %ecx
12260 + shr $3, %ecx
12261 + repne scasq
12262 + jecxz 2f
12263 +
12264 + cmp $2*8, %ecx
12265 + jc 2f
12266 +
12267 + mov $2*8, %ecx
12268 + repe scasq
12269 + jecxz 2f
12270 + jne 1b
12271 +
12272 +2: cld
12273 + mov %esp, %ecx
12274 + sub %edi, %ecx
12275 +
12276 + cmp $THREAD_SIZE_asm, %rcx
12277 + jb 3f
12278 + ud2
12279 +3:
12280 +
12281 + shr $3, %ecx
12282 + rep stosq
12283 +
12284 + mov TI_task_thread_sp0(%r10), %rdi
12285 + sub $256, %rdi
12286 + mov %rdi, TI_lowest_stack(%r10)
12287 +
12288 + popq %r10
12289 + popq %rax
12290 + popq %rdi
12291 + pax_force_retaddr
12292 + ret
12293 +ENDPROC(pax_erase_kstack)
12294 +#endif
12295
12296 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
12297 #ifdef CONFIG_TRACE_IRQFLAGS
12298 @@ -318,7 +589,7 @@ ENTRY(save_args)
12299 leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
12300 movq_cfi rbp, 8 /* push %rbp */
12301 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
12302 - testl $3, CS(%rdi)
12303 + testb $3, CS(%rdi)
12304 je 1f
12305 SWAPGS
12306 /*
12307 @@ -338,6 +609,7 @@ ENTRY(save_args)
12308 * We entered an interrupt context - irqs are off:
12309 */
12310 2: TRACE_IRQS_OFF
12311 + pax_force_retaddr
12312 ret
12313 CFI_ENDPROC
12314 END(save_args)
12315 @@ -354,6 +626,7 @@ ENTRY(save_rest)
12316 movq_cfi r15, R15+16
12317 movq %r11, 8(%rsp) /* return address */
12318 FIXUP_TOP_OF_STACK %r11, 16
12319 + pax_force_retaddr
12320 ret
12321 CFI_ENDPROC
12322 END(save_rest)
12323 @@ -385,7 +658,8 @@ ENTRY(save_paranoid)
12324 js 1f /* negative -> in kernel */
12325 SWAPGS
12326 xorl %ebx,%ebx
12327 -1: ret
12328 +1: pax_force_retaddr
12329 + ret
12330 CFI_ENDPROC
12331 END(save_paranoid)
12332 .popsection
12333 @@ -409,7 +683,7 @@ ENTRY(ret_from_fork)
12334
12335 RESTORE_REST
12336
12337 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12338 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12339 je int_ret_from_sys_call
12340
12341 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
12342 @@ -455,7 +729,7 @@ END(ret_from_fork)
12343 ENTRY(system_call)
12344 CFI_STARTPROC simple
12345 CFI_SIGNAL_FRAME
12346 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12347 + CFI_DEF_CFA rsp,0
12348 CFI_REGISTER rip,rcx
12349 /*CFI_REGISTER rflags,r11*/
12350 SWAPGS_UNSAFE_STACK
12351 @@ -468,12 +742,13 @@ ENTRY(system_call_after_swapgs)
12352
12353 movq %rsp,PER_CPU_VAR(old_rsp)
12354 movq PER_CPU_VAR(kernel_stack),%rsp
12355 + pax_enter_kernel_user
12356 /*
12357 * No need to follow this irqs off/on section - it's straight
12358 * and short:
12359 */
12360 ENABLE_INTERRUPTS(CLBR_NONE)
12361 - SAVE_ARGS 8,1
12362 + SAVE_ARGS 8*6,1
12363 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12364 movq %rcx,RIP-ARGOFFSET(%rsp)
12365 CFI_REL_OFFSET rip,RIP-ARGOFFSET
12366 @@ -502,6 +777,8 @@ sysret_check:
12367 andl %edi,%edx
12368 jnz sysret_careful
12369 CFI_REMEMBER_STATE
12370 + pax_exit_kernel_user
12371 + pax_erase_kstack
12372 /*
12373 * sysretq will re-enable interrupts:
12374 */
12375 @@ -560,6 +837,9 @@ auditsys:
12376 movq %rax,%rsi /* 2nd arg: syscall number */
12377 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
12378 call audit_syscall_entry
12379 +
12380 + pax_erase_kstack
12381 +
12382 LOAD_ARGS 0 /* reload call-clobbered registers */
12383 jmp system_call_fastpath
12384
12385 @@ -590,6 +870,9 @@ tracesys:
12386 FIXUP_TOP_OF_STACK %rdi
12387 movq %rsp,%rdi
12388 call syscall_trace_enter
12389 +
12390 + pax_erase_kstack
12391 +
12392 /*
12393 * Reload arg registers from stack in case ptrace changed them.
12394 * We don't reload %rax because syscall_trace_enter() returned
12395 @@ -611,7 +894,7 @@ tracesys:
12396 GLOBAL(int_ret_from_sys_call)
12397 DISABLE_INTERRUPTS(CLBR_NONE)
12398 TRACE_IRQS_OFF
12399 - testl $3,CS-ARGOFFSET(%rsp)
12400 + testb $3,CS-ARGOFFSET(%rsp)
12401 je retint_restore_args
12402 movl $_TIF_ALLWORK_MASK,%edi
12403 /* edi: mask to check */
12404 @@ -702,6 +985,7 @@ ENTRY(ptregscall_common)
12405 movq_cfi_restore R12+8, r12
12406 movq_cfi_restore RBP+8, rbp
12407 movq_cfi_restore RBX+8, rbx
12408 + pax_force_retaddr
12409 ret $REST_SKIP /* pop extended registers */
12410 CFI_ENDPROC
12411 END(ptregscall_common)
12412 @@ -793,6 +1077,16 @@ END(interrupt)
12413 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
12414 call save_args
12415 PARTIAL_FRAME 0
12416 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12417 + testb $3, CS(%rdi)
12418 + jnz 1f
12419 + pax_enter_kernel
12420 + jmp 2f
12421 +1: pax_enter_kernel_user
12422 +2:
12423 +#else
12424 + pax_enter_kernel
12425 +#endif
12426 call \func
12427 .endm
12428
12429 @@ -825,7 +1119,7 @@ ret_from_intr:
12430 CFI_ADJUST_CFA_OFFSET -8
12431 exit_intr:
12432 GET_THREAD_INFO(%rcx)
12433 - testl $3,CS-ARGOFFSET(%rsp)
12434 + testb $3,CS-ARGOFFSET(%rsp)
12435 je retint_kernel
12436
12437 /* Interrupt came from user space */
12438 @@ -847,12 +1141,16 @@ retint_swapgs: /* return to user-space
12439 * The iretq could re-enable interrupts:
12440 */
12441 DISABLE_INTERRUPTS(CLBR_ANY)
12442 + pax_exit_kernel_user
12443 + pax_erase_kstack
12444 TRACE_IRQS_IRETQ
12445 SWAPGS
12446 jmp restore_args
12447
12448 retint_restore_args: /* return to kernel space */
12449 DISABLE_INTERRUPTS(CLBR_ANY)
12450 + pax_exit_kernel
12451 + pax_force_retaddr RIP-ARGOFFSET
12452 /*
12453 * The iretq could re-enable interrupts:
12454 */
12455 @@ -1027,6 +1325,16 @@ ENTRY(\sym)
12456 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12457 call error_entry
12458 DEFAULT_FRAME 0
12459 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12460 + testb $3, CS(%rsp)
12461 + jnz 1f
12462 + pax_enter_kernel
12463 + jmp 2f
12464 +1: pax_enter_kernel_user
12465 +2:
12466 +#else
12467 + pax_enter_kernel
12468 +#endif
12469 movq %rsp,%rdi /* pt_regs pointer */
12470 xorl %esi,%esi /* no error code */
12471 call \do_sym
12472 @@ -1044,6 +1352,16 @@ ENTRY(\sym)
12473 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12474 call save_paranoid
12475 TRACE_IRQS_OFF
12476 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12477 + testb $3, CS(%rsp)
12478 + jnz 1f
12479 + pax_enter_kernel
12480 + jmp 2f
12481 +1: pax_enter_kernel_user
12482 +2:
12483 +#else
12484 + pax_enter_kernel
12485 +#endif
12486 movq %rsp,%rdi /* pt_regs pointer */
12487 xorl %esi,%esi /* no error code */
12488 call \do_sym
12489 @@ -1052,7 +1370,7 @@ ENTRY(\sym)
12490 END(\sym)
12491 .endm
12492
12493 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
12494 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
12495 .macro paranoidzeroentry_ist sym do_sym ist
12496 ENTRY(\sym)
12497 INTR_FRAME
12498 @@ -1062,8 +1380,24 @@ ENTRY(\sym)
12499 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12500 call save_paranoid
12501 TRACE_IRQS_OFF
12502 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12503 + testb $3, CS(%rsp)
12504 + jnz 1f
12505 + pax_enter_kernel
12506 + jmp 2f
12507 +1: pax_enter_kernel_user
12508 +2:
12509 +#else
12510 + pax_enter_kernel
12511 +#endif
12512 movq %rsp,%rdi /* pt_regs pointer */
12513 xorl %esi,%esi /* no error code */
12514 +#ifdef CONFIG_SMP
12515 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
12516 + lea init_tss(%r12), %r12
12517 +#else
12518 + lea init_tss(%rip), %r12
12519 +#endif
12520 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12521 call \do_sym
12522 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12523 @@ -1080,6 +1414,16 @@ ENTRY(\sym)
12524 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12525 call error_entry
12526 DEFAULT_FRAME 0
12527 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12528 + testb $3, CS(%rsp)
12529 + jnz 1f
12530 + pax_enter_kernel
12531 + jmp 2f
12532 +1: pax_enter_kernel_user
12533 +2:
12534 +#else
12535 + pax_enter_kernel
12536 +#endif
12537 movq %rsp,%rdi /* pt_regs pointer */
12538 movq ORIG_RAX(%rsp),%rsi /* get error code */
12539 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12540 @@ -1099,6 +1443,16 @@ ENTRY(\sym)
12541 call save_paranoid
12542 DEFAULT_FRAME 0
12543 TRACE_IRQS_OFF
12544 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12545 + testb $3, CS(%rsp)
12546 + jnz 1f
12547 + pax_enter_kernel
12548 + jmp 2f
12549 +1: pax_enter_kernel_user
12550 +2:
12551 +#else
12552 + pax_enter_kernel
12553 +#endif
12554 movq %rsp,%rdi /* pt_regs pointer */
12555 movq ORIG_RAX(%rsp),%rsi /* get error code */
12556 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12557 @@ -1134,6 +1488,7 @@ gs_change:
12558 2: mfence /* workaround */
12559 SWAPGS
12560 popfq_cfi
12561 + pax_force_retaddr
12562 ret
12563 CFI_ENDPROC
12564 END(native_load_gs_index)
12565 @@ -1158,6 +1513,7 @@ ENTRY(kernel_thread_helper)
12566 * Here we are in the child and the registers are set as they were
12567 * at kernel_thread() invocation in the parent.
12568 */
12569 + pax_force_fptr %rsi
12570 call *%rsi
12571 # exit
12572 mov %eax, %edi
12573 @@ -1193,9 +1549,10 @@ ENTRY(kernel_execve)
12574 je int_ret_from_sys_call
12575 RESTORE_ARGS
12576 UNFAKE_STACK_FRAME
12577 + pax_force_retaddr
12578 ret
12579 CFI_ENDPROC
12580 -END(kernel_execve)
12581 +ENDPROC(kernel_execve)
12582
12583 /* Call softirq on interrupt stack. Interrupts are off. */
12584 ENTRY(call_softirq)
12585 @@ -1213,9 +1570,10 @@ ENTRY(call_softirq)
12586 CFI_DEF_CFA_REGISTER rsp
12587 CFI_ADJUST_CFA_OFFSET -8
12588 decl PER_CPU_VAR(irq_count)
12589 + pax_force_retaddr
12590 ret
12591 CFI_ENDPROC
12592 -END(call_softirq)
12593 +ENDPROC(call_softirq)
12594
12595 #ifdef CONFIG_XEN
12596 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
12597 @@ -1361,16 +1719,31 @@ ENTRY(paranoid_exit)
12598 TRACE_IRQS_OFF
12599 testl %ebx,%ebx /* swapgs needed? */
12600 jnz paranoid_restore
12601 - testl $3,CS(%rsp)
12602 + testb $3,CS(%rsp)
12603 jnz paranoid_userspace
12604 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12605 + pax_exit_kernel
12606 + TRACE_IRQS_IRETQ 0
12607 + SWAPGS_UNSAFE_STACK
12608 + RESTORE_ALL 8
12609 + pax_force_retaddr
12610 + jmp irq_return
12611 +#endif
12612 paranoid_swapgs:
12613 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12614 + pax_exit_kernel_user
12615 +#else
12616 + pax_exit_kernel
12617 +#endif
12618 TRACE_IRQS_IRETQ 0
12619 SWAPGS_UNSAFE_STACK
12620 RESTORE_ALL 8
12621 jmp irq_return
12622 paranoid_restore:
12623 + pax_exit_kernel
12624 TRACE_IRQS_IRETQ 0
12625 RESTORE_ALL 8
12626 + pax_force_retaddr
12627 jmp irq_return
12628 paranoid_userspace:
12629 GET_THREAD_INFO(%rcx)
12630 @@ -1426,12 +1799,13 @@ ENTRY(error_entry)
12631 movq_cfi r14, R14+8
12632 movq_cfi r15, R15+8
12633 xorl %ebx,%ebx
12634 - testl $3,CS+8(%rsp)
12635 + testb $3,CS+8(%rsp)
12636 je error_kernelspace
12637 error_swapgs:
12638 SWAPGS
12639 error_sti:
12640 TRACE_IRQS_OFF
12641 + pax_force_retaddr
12642 ret
12643
12644 /*
12645 @@ -1490,6 +1864,16 @@ ENTRY(nmi)
12646 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12647 call save_paranoid
12648 DEFAULT_FRAME 0
12649 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12650 + testb $3, CS(%rsp)
12651 + jnz 1f
12652 + pax_enter_kernel
12653 + jmp 2f
12654 +1: pax_enter_kernel_user
12655 +2:
12656 +#else
12657 + pax_enter_kernel
12658 +#endif
12659 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
12660 movq %rsp,%rdi
12661 movq $-1,%rsi
12662 @@ -1500,12 +1884,28 @@ ENTRY(nmi)
12663 DISABLE_INTERRUPTS(CLBR_NONE)
12664 testl %ebx,%ebx /* swapgs needed? */
12665 jnz nmi_restore
12666 - testl $3,CS(%rsp)
12667 + testb $3,CS(%rsp)
12668 jnz nmi_userspace
12669 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12670 + pax_exit_kernel
12671 + SWAPGS_UNSAFE_STACK
12672 + RESTORE_ALL 8
12673 + pax_force_retaddr
12674 + jmp irq_return
12675 +#endif
12676 nmi_swapgs:
12677 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12678 + pax_exit_kernel_user
12679 +#else
12680 + pax_exit_kernel
12681 +#endif
12682 SWAPGS_UNSAFE_STACK
12683 + RESTORE_ALL 8
12684 + jmp irq_return
12685 nmi_restore:
12686 + pax_exit_kernel
12687 RESTORE_ALL 8
12688 + pax_force_retaddr
12689 jmp irq_return
12690 nmi_userspace:
12691 GET_THREAD_INFO(%rcx)
12692 diff -urNp linux-3.0.7/arch/x86/kernel/ftrace.c linux-3.0.7/arch/x86/kernel/ftrace.c
12693 --- linux-3.0.7/arch/x86/kernel/ftrace.c 2011-07-21 22:17:23.000000000 -0400
12694 +++ linux-3.0.7/arch/x86/kernel/ftrace.c 2011-08-23 21:47:55.000000000 -0400
12695 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
12696 static const void *mod_code_newcode; /* holds the text to write to the IP */
12697
12698 static unsigned nmi_wait_count;
12699 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
12700 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
12701
12702 int ftrace_arch_read_dyn_info(char *buf, int size)
12703 {
12704 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
12705
12706 r = snprintf(buf, size, "%u %u",
12707 nmi_wait_count,
12708 - atomic_read(&nmi_update_count));
12709 + atomic_read_unchecked(&nmi_update_count));
12710 return r;
12711 }
12712
12713 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
12714
12715 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
12716 smp_rmb();
12717 + pax_open_kernel();
12718 ftrace_mod_code();
12719 - atomic_inc(&nmi_update_count);
12720 + pax_close_kernel();
12721 + atomic_inc_unchecked(&nmi_update_count);
12722 }
12723 /* Must have previous changes seen before executions */
12724 smp_mb();
12725 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
12726 {
12727 unsigned char replaced[MCOUNT_INSN_SIZE];
12728
12729 + ip = ktla_ktva(ip);
12730 +
12731 /*
12732 * Note: Due to modules and __init, code can
12733 * disappear and change, we need to protect against faulting
12734 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
12735 unsigned char old[MCOUNT_INSN_SIZE], *new;
12736 int ret;
12737
12738 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
12739 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
12740 new = ftrace_call_replace(ip, (unsigned long)func);
12741 ret = ftrace_modify_code(ip, old, new);
12742
12743 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
12744 {
12745 unsigned char code[MCOUNT_INSN_SIZE];
12746
12747 + ip = ktla_ktva(ip);
12748 +
12749 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
12750 return -EFAULT;
12751
12752 diff -urNp linux-3.0.7/arch/x86/kernel/head32.c linux-3.0.7/arch/x86/kernel/head32.c
12753 --- linux-3.0.7/arch/x86/kernel/head32.c 2011-07-21 22:17:23.000000000 -0400
12754 +++ linux-3.0.7/arch/x86/kernel/head32.c 2011-08-23 21:47:55.000000000 -0400
12755 @@ -19,6 +19,7 @@
12756 #include <asm/io_apic.h>
12757 #include <asm/bios_ebda.h>
12758 #include <asm/tlbflush.h>
12759 +#include <asm/boot.h>
12760
12761 static void __init i386_default_early_setup(void)
12762 {
12763 @@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
12764 {
12765 memblock_init();
12766
12767 - memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12768 + memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12769
12770 #ifdef CONFIG_BLK_DEV_INITRD
12771 /* Reserve INITRD */
12772 diff -urNp linux-3.0.7/arch/x86/kernel/head_32.S linux-3.0.7/arch/x86/kernel/head_32.S
12773 --- linux-3.0.7/arch/x86/kernel/head_32.S 2011-07-21 22:17:23.000000000 -0400
12774 +++ linux-3.0.7/arch/x86/kernel/head_32.S 2011-08-23 21:47:55.000000000 -0400
12775 @@ -25,6 +25,12 @@
12776 /* Physical address */
12777 #define pa(X) ((X) - __PAGE_OFFSET)
12778
12779 +#ifdef CONFIG_PAX_KERNEXEC
12780 +#define ta(X) (X)
12781 +#else
12782 +#define ta(X) ((X) - __PAGE_OFFSET)
12783 +#endif
12784 +
12785 /*
12786 * References to members of the new_cpu_data structure.
12787 */
12788 @@ -54,11 +60,7 @@
12789 * and small than max_low_pfn, otherwise will waste some page table entries
12790 */
12791
12792 -#if PTRS_PER_PMD > 1
12793 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
12794 -#else
12795 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
12796 -#endif
12797 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
12798
12799 /* Number of possible pages in the lowmem region */
12800 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
12801 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
12802 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12803
12804 /*
12805 + * Real beginning of normal "text" segment
12806 + */
12807 +ENTRY(stext)
12808 +ENTRY(_stext)
12809 +
12810 +/*
12811 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
12812 * %esi points to the real-mode code as a 32-bit pointer.
12813 * CS and DS must be 4 GB flat segments, but we don't depend on
12814 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12815 * can.
12816 */
12817 __HEAD
12818 +
12819 +#ifdef CONFIG_PAX_KERNEXEC
12820 + jmp startup_32
12821 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
12822 +.fill PAGE_SIZE-5,1,0xcc
12823 +#endif
12824 +
12825 ENTRY(startup_32)
12826 movl pa(stack_start),%ecx
12827
12828 @@ -105,6 +120,57 @@ ENTRY(startup_32)
12829 2:
12830 leal -__PAGE_OFFSET(%ecx),%esp
12831
12832 +#ifdef CONFIG_SMP
12833 + movl $pa(cpu_gdt_table),%edi
12834 + movl $__per_cpu_load,%eax
12835 + movw %ax,__KERNEL_PERCPU + 2(%edi)
12836 + rorl $16,%eax
12837 + movb %al,__KERNEL_PERCPU + 4(%edi)
12838 + movb %ah,__KERNEL_PERCPU + 7(%edi)
12839 + movl $__per_cpu_end - 1,%eax
12840 + subl $__per_cpu_start,%eax
12841 + movw %ax,__KERNEL_PERCPU + 0(%edi)
12842 +#endif
12843 +
12844 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12845 + movl $NR_CPUS,%ecx
12846 + movl $pa(cpu_gdt_table),%edi
12847 +1:
12848 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
12849 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
12850 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
12851 + addl $PAGE_SIZE_asm,%edi
12852 + loop 1b
12853 +#endif
12854 +
12855 +#ifdef CONFIG_PAX_KERNEXEC
12856 + movl $pa(boot_gdt),%edi
12857 + movl $__LOAD_PHYSICAL_ADDR,%eax
12858 + movw %ax,__BOOT_CS + 2(%edi)
12859 + rorl $16,%eax
12860 + movb %al,__BOOT_CS + 4(%edi)
12861 + movb %ah,__BOOT_CS + 7(%edi)
12862 + rorl $16,%eax
12863 +
12864 + ljmp $(__BOOT_CS),$1f
12865 +1:
12866 +
12867 + movl $NR_CPUS,%ecx
12868 + movl $pa(cpu_gdt_table),%edi
12869 + addl $__PAGE_OFFSET,%eax
12870 +1:
12871 + movw %ax,__KERNEL_CS + 2(%edi)
12872 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
12873 + rorl $16,%eax
12874 + movb %al,__KERNEL_CS + 4(%edi)
12875 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
12876 + movb %ah,__KERNEL_CS + 7(%edi)
12877 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
12878 + rorl $16,%eax
12879 + addl $PAGE_SIZE_asm,%edi
12880 + loop 1b
12881 +#endif
12882 +
12883 /*
12884 * Clear BSS first so that there are no surprises...
12885 */
12886 @@ -195,8 +261,11 @@ ENTRY(startup_32)
12887 movl %eax, pa(max_pfn_mapped)
12888
12889 /* Do early initialization of the fixmap area */
12890 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12891 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
12892 +#ifdef CONFIG_COMPAT_VDSO
12893 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
12894 +#else
12895 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
12896 +#endif
12897 #else /* Not PAE */
12898
12899 page_pde_offset = (__PAGE_OFFSET >> 20);
12900 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12901 movl %eax, pa(max_pfn_mapped)
12902
12903 /* Do early initialization of the fixmap area */
12904 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12905 - movl %eax,pa(initial_page_table+0xffc)
12906 +#ifdef CONFIG_COMPAT_VDSO
12907 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
12908 +#else
12909 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
12910 +#endif
12911 #endif
12912
12913 #ifdef CONFIG_PARAVIRT
12914 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12915 cmpl $num_subarch_entries, %eax
12916 jae bad_subarch
12917
12918 - movl pa(subarch_entries)(,%eax,4), %eax
12919 - subl $__PAGE_OFFSET, %eax
12920 - jmp *%eax
12921 + jmp *pa(subarch_entries)(,%eax,4)
12922
12923 bad_subarch:
12924 WEAK(lguest_entry)
12925 @@ -255,10 +325,10 @@ WEAK(xen_entry)
12926 __INITDATA
12927
12928 subarch_entries:
12929 - .long default_entry /* normal x86/PC */
12930 - .long lguest_entry /* lguest hypervisor */
12931 - .long xen_entry /* Xen hypervisor */
12932 - .long default_entry /* Moorestown MID */
12933 + .long ta(default_entry) /* normal x86/PC */
12934 + .long ta(lguest_entry) /* lguest hypervisor */
12935 + .long ta(xen_entry) /* Xen hypervisor */
12936 + .long ta(default_entry) /* Moorestown MID */
12937 num_subarch_entries = (. - subarch_entries) / 4
12938 .previous
12939 #else
12940 @@ -312,6 +382,7 @@ default_entry:
12941 orl %edx,%eax
12942 movl %eax,%cr4
12943
12944 +#ifdef CONFIG_X86_PAE
12945 testb $X86_CR4_PAE, %al # check if PAE is enabled
12946 jz 6f
12947
12948 @@ -340,6 +411,9 @@ default_entry:
12949 /* Make changes effective */
12950 wrmsr
12951
12952 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
12953 +#endif
12954 +
12955 6:
12956
12957 /*
12958 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
12959 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
12960 movl %eax,%ss # after changing gdt.
12961
12962 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
12963 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
12964 movl %eax,%ds
12965 movl %eax,%es
12966
12967 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
12968 */
12969 cmpb $0,ready
12970 jne 1f
12971 - movl $gdt_page,%eax
12972 + movl $cpu_gdt_table,%eax
12973 movl $stack_canary,%ecx
12974 +#ifdef CONFIG_SMP
12975 + addl $__per_cpu_load,%ecx
12976 +#endif
12977 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
12978 shrl $16, %ecx
12979 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
12980 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
12981 1:
12982 -#endif
12983 movl $(__KERNEL_STACK_CANARY),%eax
12984 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12985 + movl $(__USER_DS),%eax
12986 +#else
12987 + xorl %eax,%eax
12988 +#endif
12989 movl %eax,%gs
12990
12991 xorl %eax,%eax # Clear LDT
12992 @@ -558,22 +639,22 @@ early_page_fault:
12993 jmp early_fault
12994
12995 early_fault:
12996 - cld
12997 #ifdef CONFIG_PRINTK
12998 + cmpl $1,%ss:early_recursion_flag
12999 + je hlt_loop
13000 + incl %ss:early_recursion_flag
13001 + cld
13002 pusha
13003 movl $(__KERNEL_DS),%eax
13004 movl %eax,%ds
13005 movl %eax,%es
13006 - cmpl $2,early_recursion_flag
13007 - je hlt_loop
13008 - incl early_recursion_flag
13009 movl %cr2,%eax
13010 pushl %eax
13011 pushl %edx /* trapno */
13012 pushl $fault_msg
13013 call printk
13014 +; call dump_stack
13015 #endif
13016 - call dump_stack
13017 hlt_loop:
13018 hlt
13019 jmp hlt_loop
13020 @@ -581,8 +662,11 @@ hlt_loop:
13021 /* This is the default interrupt "handler" :-) */
13022 ALIGN
13023 ignore_int:
13024 - cld
13025 #ifdef CONFIG_PRINTK
13026 + cmpl $2,%ss:early_recursion_flag
13027 + je hlt_loop
13028 + incl %ss:early_recursion_flag
13029 + cld
13030 pushl %eax
13031 pushl %ecx
13032 pushl %edx
13033 @@ -591,9 +675,6 @@ ignore_int:
13034 movl $(__KERNEL_DS),%eax
13035 movl %eax,%ds
13036 movl %eax,%es
13037 - cmpl $2,early_recursion_flag
13038 - je hlt_loop
13039 - incl early_recursion_flag
13040 pushl 16(%esp)
13041 pushl 24(%esp)
13042 pushl 32(%esp)
13043 @@ -622,29 +703,43 @@ ENTRY(initial_code)
13044 /*
13045 * BSS section
13046 */
13047 -__PAGE_ALIGNED_BSS
13048 - .align PAGE_SIZE
13049 #ifdef CONFIG_X86_PAE
13050 +.section .initial_pg_pmd,"a",@progbits
13051 initial_pg_pmd:
13052 .fill 1024*KPMDS,4,0
13053 #else
13054 +.section .initial_page_table,"a",@progbits
13055 ENTRY(initial_page_table)
13056 .fill 1024,4,0
13057 #endif
13058 +.section .initial_pg_fixmap,"a",@progbits
13059 initial_pg_fixmap:
13060 .fill 1024,4,0
13061 +.section .empty_zero_page,"a",@progbits
13062 ENTRY(empty_zero_page)
13063 .fill 4096,1,0
13064 +.section .swapper_pg_dir,"a",@progbits
13065 ENTRY(swapper_pg_dir)
13066 +#ifdef CONFIG_X86_PAE
13067 + .fill 4,8,0
13068 +#else
13069 .fill 1024,4,0
13070 +#endif
13071 +
13072 +/*
13073 + * The IDT has to be page-aligned to simplify the Pentium
13074 + * F0 0F bug workaround.. We have a special link segment
13075 + * for this.
13076 + */
13077 +.section .idt,"a",@progbits
13078 +ENTRY(idt_table)
13079 + .fill 256,8,0
13080
13081 /*
13082 * This starts the data section.
13083 */
13084 #ifdef CONFIG_X86_PAE
13085 -__PAGE_ALIGNED_DATA
13086 - /* Page-aligned for the benefit of paravirt? */
13087 - .align PAGE_SIZE
13088 +.section .initial_page_table,"a",@progbits
13089 ENTRY(initial_page_table)
13090 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
13091 # if KPMDS == 3
13092 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
13093 # error "Kernel PMDs should be 1, 2 or 3"
13094 # endif
13095 .align PAGE_SIZE /* needs to be page-sized too */
13096 +
13097 +#ifdef CONFIG_PAX_PER_CPU_PGD
13098 +ENTRY(cpu_pgd)
13099 + .rept NR_CPUS
13100 + .fill 4,8,0
13101 + .endr
13102 +#endif
13103 +
13104 #endif
13105
13106 .data
13107 .balign 4
13108 ENTRY(stack_start)
13109 - .long init_thread_union+THREAD_SIZE
13110 + .long init_thread_union+THREAD_SIZE-8
13111 +
13112 +ready: .byte 0
13113
13114 +.section .rodata,"a",@progbits
13115 early_recursion_flag:
13116 .long 0
13117
13118 -ready: .byte 0
13119 -
13120 int_msg:
13121 .asciz "Unknown interrupt or fault at: %p %p %p\n"
13122
13123 @@ -707,7 +811,7 @@ fault_msg:
13124 .word 0 # 32 bit align gdt_desc.address
13125 boot_gdt_descr:
13126 .word __BOOT_DS+7
13127 - .long boot_gdt - __PAGE_OFFSET
13128 + .long pa(boot_gdt)
13129
13130 .word 0 # 32-bit align idt_desc.address
13131 idt_descr:
13132 @@ -718,7 +822,7 @@ idt_descr:
13133 .word 0 # 32 bit align gdt_desc.address
13134 ENTRY(early_gdt_descr)
13135 .word GDT_ENTRIES*8-1
13136 - .long gdt_page /* Overwritten for secondary CPUs */
13137 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
13138
13139 /*
13140 * The boot_gdt must mirror the equivalent in setup.S and is
13141 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
13142 .align L1_CACHE_BYTES
13143 ENTRY(boot_gdt)
13144 .fill GDT_ENTRY_BOOT_CS,8,0
13145 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
13146 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
13147 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
13148 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
13149 +
13150 + .align PAGE_SIZE_asm
13151 +ENTRY(cpu_gdt_table)
13152 + .rept NR_CPUS
13153 + .quad 0x0000000000000000 /* NULL descriptor */
13154 + .quad 0x0000000000000000 /* 0x0b reserved */
13155 + .quad 0x0000000000000000 /* 0x13 reserved */
13156 + .quad 0x0000000000000000 /* 0x1b reserved */
13157 +
13158 +#ifdef CONFIG_PAX_KERNEXEC
13159 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
13160 +#else
13161 + .quad 0x0000000000000000 /* 0x20 unused */
13162 +#endif
13163 +
13164 + .quad 0x0000000000000000 /* 0x28 unused */
13165 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
13166 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
13167 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
13168 + .quad 0x0000000000000000 /* 0x4b reserved */
13169 + .quad 0x0000000000000000 /* 0x53 reserved */
13170 + .quad 0x0000000000000000 /* 0x5b reserved */
13171 +
13172 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
13173 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
13174 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
13175 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
13176 +
13177 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
13178 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
13179 +
13180 + /*
13181 + * Segments used for calling PnP BIOS have byte granularity.
13182 + * The code segments and data segments have fixed 64k limits,
13183 + * the transfer segment sizes are set at run time.
13184 + */
13185 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
13186 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
13187 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
13188 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
13189 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
13190 +
13191 + /*
13192 + * The APM segments have byte granularity and their bases
13193 + * are set at run time. All have 64k limits.
13194 + */
13195 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
13196 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
13197 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
13198 +
13199 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
13200 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
13201 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
13202 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
13203 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
13204 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
13205 +
13206 + /* Be sure this is zeroed to avoid false validations in Xen */
13207 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
13208 + .endr
13209 diff -urNp linux-3.0.7/arch/x86/kernel/head_64.S linux-3.0.7/arch/x86/kernel/head_64.S
13210 --- linux-3.0.7/arch/x86/kernel/head_64.S 2011-07-21 22:17:23.000000000 -0400
13211 +++ linux-3.0.7/arch/x86/kernel/head_64.S 2011-08-23 21:47:55.000000000 -0400
13212 @@ -19,6 +19,7 @@
13213 #include <asm/cache.h>
13214 #include <asm/processor-flags.h>
13215 #include <asm/percpu.h>
13216 +#include <asm/cpufeature.h>
13217
13218 #ifdef CONFIG_PARAVIRT
13219 #include <asm/asm-offsets.h>
13220 @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
13221 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
13222 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
13223 L3_START_KERNEL = pud_index(__START_KERNEL_map)
13224 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
13225 +L3_VMALLOC_START = pud_index(VMALLOC_START)
13226 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
13227 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
13228
13229 .text
13230 __HEAD
13231 @@ -85,35 +90,22 @@ startup_64:
13232 */
13233 addq %rbp, init_level4_pgt + 0(%rip)
13234 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
13235 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
13236 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
13237 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
13238
13239 addq %rbp, level3_ident_pgt + 0(%rip)
13240 +#ifndef CONFIG_XEN
13241 + addq %rbp, level3_ident_pgt + 8(%rip)
13242 +#endif
13243
13244 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
13245 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
13246 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
13247
13248 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
13249 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
13250 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
13251
13252 - /* Add an Identity mapping if I am above 1G */
13253 - leaq _text(%rip), %rdi
13254 - andq $PMD_PAGE_MASK, %rdi
13255 -
13256 - movq %rdi, %rax
13257 - shrq $PUD_SHIFT, %rax
13258 - andq $(PTRS_PER_PUD - 1), %rax
13259 - jz ident_complete
13260 -
13261 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
13262 - leaq level3_ident_pgt(%rip), %rbx
13263 - movq %rdx, 0(%rbx, %rax, 8)
13264 -
13265 - movq %rdi, %rax
13266 - shrq $PMD_SHIFT, %rax
13267 - andq $(PTRS_PER_PMD - 1), %rax
13268 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
13269 - leaq level2_spare_pgt(%rip), %rbx
13270 - movq %rdx, 0(%rbx, %rax, 8)
13271 -ident_complete:
13272 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
13273 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
13274
13275 /*
13276 * Fixup the kernel text+data virtual addresses. Note that
13277 @@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
13278 * after the boot processor executes this code.
13279 */
13280
13281 - /* Enable PAE mode and PGE */
13282 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
13283 + /* Enable PAE mode and PSE/PGE */
13284 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
13285 movq %rax, %cr4
13286
13287 /* Setup early boot stage 4 level pagetables. */
13288 @@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
13289 movl $MSR_EFER, %ecx
13290 rdmsr
13291 btsl $_EFER_SCE, %eax /* Enable System Call */
13292 - btl $20,%edi /* No Execute supported? */
13293 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
13294 jnc 1f
13295 btsl $_EFER_NX, %eax
13296 + leaq init_level4_pgt(%rip), %rdi
13297 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
13298 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
13299 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
13300 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
13301 1: wrmsr /* Make changes effective */
13302
13303 /* Setup cr0 */
13304 @@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
13305 bad_address:
13306 jmp bad_address
13307
13308 - .section ".init.text","ax"
13309 + __INIT
13310 #ifdef CONFIG_EARLY_PRINTK
13311 .globl early_idt_handlers
13312 early_idt_handlers:
13313 @@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
13314 #endif /* EARLY_PRINTK */
13315 1: hlt
13316 jmp 1b
13317 + .previous
13318
13319 #ifdef CONFIG_EARLY_PRINTK
13320 + __INITDATA
13321 early_recursion_flag:
13322 .long 0
13323 + .previous
13324
13325 + .section .rodata,"a",@progbits
13326 early_idt_msg:
13327 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
13328 early_idt_ripmsg:
13329 .asciz "RIP %s\n"
13330 -#endif /* CONFIG_EARLY_PRINTK */
13331 .previous
13332 +#endif /* CONFIG_EARLY_PRINTK */
13333
13334 + .section .rodata,"a",@progbits
13335 #define NEXT_PAGE(name) \
13336 .balign PAGE_SIZE; \
13337 ENTRY(name)
13338 @@ -338,7 +340,6 @@ ENTRY(name)
13339 i = i + 1 ; \
13340 .endr
13341
13342 - .data
13343 /*
13344 * This default setting generates an ident mapping at address 0x100000
13345 * and a mapping for the kernel that precisely maps virtual address
13346 @@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
13347 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13348 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
13349 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13350 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
13351 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
13352 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
13353 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13354 .org init_level4_pgt + L4_START_KERNEL*8, 0
13355 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
13356 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
13357
13358 +#ifdef CONFIG_PAX_PER_CPU_PGD
13359 +NEXT_PAGE(cpu_pgd)
13360 + .rept NR_CPUS
13361 + .fill 512,8,0
13362 + .endr
13363 +#endif
13364 +
13365 NEXT_PAGE(level3_ident_pgt)
13366 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13367 +#ifdef CONFIG_XEN
13368 .fill 511,8,0
13369 +#else
13370 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
13371 + .fill 510,8,0
13372 +#endif
13373 +
13374 +NEXT_PAGE(level3_vmalloc_pgt)
13375 + .fill 512,8,0
13376 +
13377 +NEXT_PAGE(level3_vmemmap_pgt)
13378 + .fill L3_VMEMMAP_START,8,0
13379 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13380
13381 NEXT_PAGE(level3_kernel_pgt)
13382 .fill L3_START_KERNEL,8,0
13383 @@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
13384 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
13385 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13386
13387 +NEXT_PAGE(level2_vmemmap_pgt)
13388 + .fill 512,8,0
13389 +
13390 NEXT_PAGE(level2_fixmap_pgt)
13391 - .fill 506,8,0
13392 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13393 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
13394 - .fill 5,8,0
13395 + .fill 507,8,0
13396 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
13397 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
13398 + .fill 4,8,0
13399
13400 -NEXT_PAGE(level1_fixmap_pgt)
13401 +NEXT_PAGE(level1_vsyscall_pgt)
13402 .fill 512,8,0
13403
13404 -NEXT_PAGE(level2_ident_pgt)
13405 - /* Since I easily can, map the first 1G.
13406 + /* Since I easily can, map the first 2G.
13407 * Don't set NX because code runs from these pages.
13408 */
13409 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
13410 +NEXT_PAGE(level2_ident_pgt)
13411 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
13412
13413 NEXT_PAGE(level2_kernel_pgt)
13414 /*
13415 @@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
13416 * If you want to increase this then increase MODULES_VADDR
13417 * too.)
13418 */
13419 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
13420 - KERNEL_IMAGE_SIZE/PMD_SIZE)
13421 -
13422 -NEXT_PAGE(level2_spare_pgt)
13423 - .fill 512, 8, 0
13424 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
13425
13426 #undef PMDS
13427 #undef NEXT_PAGE
13428
13429 - .data
13430 + .align PAGE_SIZE
13431 +ENTRY(cpu_gdt_table)
13432 + .rept NR_CPUS
13433 + .quad 0x0000000000000000 /* NULL descriptor */
13434 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
13435 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
13436 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
13437 + .quad 0x00cffb000000ffff /* __USER32_CS */
13438 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
13439 + .quad 0x00affb000000ffff /* __USER_CS */
13440 +
13441 +#ifdef CONFIG_PAX_KERNEXEC
13442 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
13443 +#else
13444 + .quad 0x0 /* unused */
13445 +#endif
13446 +
13447 + .quad 0,0 /* TSS */
13448 + .quad 0,0 /* LDT */
13449 + .quad 0,0,0 /* three TLS descriptors */
13450 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
13451 + /* asm/segment.h:GDT_ENTRIES must match this */
13452 +
13453 + /* zero the remaining page */
13454 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
13455 + .endr
13456 +
13457 .align 16
13458 .globl early_gdt_descr
13459 early_gdt_descr:
13460 .word GDT_ENTRIES*8-1
13461 early_gdt_descr_base:
13462 - .quad INIT_PER_CPU_VAR(gdt_page)
13463 + .quad cpu_gdt_table
13464
13465 ENTRY(phys_base)
13466 /* This must match the first entry in level2_kernel_pgt */
13467 .quad 0x0000000000000000
13468
13469 #include "../../x86/xen/xen-head.S"
13470 -
13471 - .section .bss, "aw", @nobits
13472 +
13473 + .section .rodata,"a",@progbits
13474 .align L1_CACHE_BYTES
13475 ENTRY(idt_table)
13476 - .skip IDT_ENTRIES * 16
13477 + .fill 512,8,0
13478
13479 __PAGE_ALIGNED_BSS
13480 .align PAGE_SIZE
13481 diff -urNp linux-3.0.7/arch/x86/kernel/i386_ksyms_32.c linux-3.0.7/arch/x86/kernel/i386_ksyms_32.c
13482 --- linux-3.0.7/arch/x86/kernel/i386_ksyms_32.c 2011-07-21 22:17:23.000000000 -0400
13483 +++ linux-3.0.7/arch/x86/kernel/i386_ksyms_32.c 2011-08-23 21:47:55.000000000 -0400
13484 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
13485 EXPORT_SYMBOL(cmpxchg8b_emu);
13486 #endif
13487
13488 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
13489 +
13490 /* Networking helper routines. */
13491 EXPORT_SYMBOL(csum_partial_copy_generic);
13492 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
13493 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
13494
13495 EXPORT_SYMBOL(__get_user_1);
13496 EXPORT_SYMBOL(__get_user_2);
13497 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
13498
13499 EXPORT_SYMBOL(csum_partial);
13500 EXPORT_SYMBOL(empty_zero_page);
13501 +
13502 +#ifdef CONFIG_PAX_KERNEXEC
13503 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
13504 +#endif
13505 diff -urNp linux-3.0.7/arch/x86/kernel/i8259.c linux-3.0.7/arch/x86/kernel/i8259.c
13506 --- linux-3.0.7/arch/x86/kernel/i8259.c 2011-07-21 22:17:23.000000000 -0400
13507 +++ linux-3.0.7/arch/x86/kernel/i8259.c 2011-08-23 21:47:55.000000000 -0400
13508 @@ -210,7 +210,7 @@ spurious_8259A_irq:
13509 "spurious 8259A interrupt: IRQ%d.\n", irq);
13510 spurious_irq_mask |= irqmask;
13511 }
13512 - atomic_inc(&irq_err_count);
13513 + atomic_inc_unchecked(&irq_err_count);
13514 /*
13515 * Theoretically we do not have to handle this IRQ,
13516 * but in Linux this does not cause problems and is
13517 diff -urNp linux-3.0.7/arch/x86/kernel/init_task.c linux-3.0.7/arch/x86/kernel/init_task.c
13518 --- linux-3.0.7/arch/x86/kernel/init_task.c 2011-07-21 22:17:23.000000000 -0400
13519 +++ linux-3.0.7/arch/x86/kernel/init_task.c 2011-08-23 21:47:55.000000000 -0400
13520 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
13521 * way process stacks are handled. This is done by having a special
13522 * "init_task" linker map entry..
13523 */
13524 -union thread_union init_thread_union __init_task_data =
13525 - { INIT_THREAD_INFO(init_task) };
13526 +union thread_union init_thread_union __init_task_data;
13527
13528 /*
13529 * Initial task structure.
13530 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
13531 * section. Since TSS's are completely CPU-local, we want them
13532 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
13533 */
13534 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
13535 -
13536 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
13537 +EXPORT_SYMBOL(init_tss);
13538 diff -urNp linux-3.0.7/arch/x86/kernel/ioport.c linux-3.0.7/arch/x86/kernel/ioport.c
13539 --- linux-3.0.7/arch/x86/kernel/ioport.c 2011-07-21 22:17:23.000000000 -0400
13540 +++ linux-3.0.7/arch/x86/kernel/ioport.c 2011-08-23 21:48:14.000000000 -0400
13541 @@ -6,6 +6,7 @@
13542 #include <linux/sched.h>
13543 #include <linux/kernel.h>
13544 #include <linux/capability.h>
13545 +#include <linux/security.h>
13546 #include <linux/errno.h>
13547 #include <linux/types.h>
13548 #include <linux/ioport.h>
13549 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
13550
13551 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
13552 return -EINVAL;
13553 +#ifdef CONFIG_GRKERNSEC_IO
13554 + if (turn_on && grsec_disable_privio) {
13555 + gr_handle_ioperm();
13556 + return -EPERM;
13557 + }
13558 +#endif
13559 if (turn_on && !capable(CAP_SYS_RAWIO))
13560 return -EPERM;
13561
13562 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
13563 * because the ->io_bitmap_max value must match the bitmap
13564 * contents:
13565 */
13566 - tss = &per_cpu(init_tss, get_cpu());
13567 + tss = init_tss + get_cpu();
13568
13569 if (turn_on)
13570 bitmap_clear(t->io_bitmap_ptr, from, num);
13571 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
13572 return -EINVAL;
13573 /* Trying to gain more privileges? */
13574 if (level > old) {
13575 +#ifdef CONFIG_GRKERNSEC_IO
13576 + if (grsec_disable_privio) {
13577 + gr_handle_iopl();
13578 + return -EPERM;
13579 + }
13580 +#endif
13581 if (!capable(CAP_SYS_RAWIO))
13582 return -EPERM;
13583 }
13584 diff -urNp linux-3.0.7/arch/x86/kernel/irq_32.c linux-3.0.7/arch/x86/kernel/irq_32.c
13585 --- linux-3.0.7/arch/x86/kernel/irq_32.c 2011-07-21 22:17:23.000000000 -0400
13586 +++ linux-3.0.7/arch/x86/kernel/irq_32.c 2011-08-23 21:47:55.000000000 -0400
13587 @@ -36,7 +36,7 @@ static int check_stack_overflow(void)
13588 __asm__ __volatile__("andl %%esp,%0" :
13589 "=r" (sp) : "0" (THREAD_SIZE - 1));
13590
13591 - return sp < (sizeof(struct thread_info) + STACK_WARN);
13592 + return sp < STACK_WARN;
13593 }
13594
13595 static void print_stack_overflow(void)
13596 @@ -54,8 +54,8 @@ static inline void print_stack_overflow(
13597 * per-CPU IRQ handling contexts (thread information and stack)
13598 */
13599 union irq_ctx {
13600 - struct thread_info tinfo;
13601 - u32 stack[THREAD_SIZE/sizeof(u32)];
13602 + unsigned long previous_esp;
13603 + u32 stack[THREAD_SIZE/sizeof(u32)];
13604 } __attribute__((aligned(THREAD_SIZE)));
13605
13606 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
13607 @@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
13608 static inline int
13609 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
13610 {
13611 - union irq_ctx *curctx, *irqctx;
13612 + union irq_ctx *irqctx;
13613 u32 *isp, arg1, arg2;
13614
13615 - curctx = (union irq_ctx *) current_thread_info();
13616 irqctx = __this_cpu_read(hardirq_ctx);
13617
13618 /*
13619 @@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
13620 * handler) we can't do that and just have to keep using the
13621 * current stack (which is the irq stack already after all)
13622 */
13623 - if (unlikely(curctx == irqctx))
13624 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
13625 return 0;
13626
13627 /* build the stack frame on the IRQ stack */
13628 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13629 - irqctx->tinfo.task = curctx->tinfo.task;
13630 - irqctx->tinfo.previous_esp = current_stack_pointer;
13631 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13632 + irqctx->previous_esp = current_stack_pointer;
13633
13634 - /*
13635 - * Copy the softirq bits in preempt_count so that the
13636 - * softirq checks work in the hardirq context.
13637 - */
13638 - irqctx->tinfo.preempt_count =
13639 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
13640 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
13641 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13642 + __set_fs(MAKE_MM_SEG(0));
13643 +#endif
13644
13645 if (unlikely(overflow))
13646 call_on_stack(print_stack_overflow, isp);
13647 @@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
13648 : "0" (irq), "1" (desc), "2" (isp),
13649 "D" (desc->handle_irq)
13650 : "memory", "cc", "ecx");
13651 +
13652 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13653 + __set_fs(current_thread_info()->addr_limit);
13654 +#endif
13655 +
13656 return 1;
13657 }
13658
13659 @@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
13660 */
13661 void __cpuinit irq_ctx_init(int cpu)
13662 {
13663 - union irq_ctx *irqctx;
13664 -
13665 if (per_cpu(hardirq_ctx, cpu))
13666 return;
13667
13668 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13669 - THREAD_FLAGS,
13670 - THREAD_ORDER));
13671 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13672 - irqctx->tinfo.cpu = cpu;
13673 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
13674 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13675 -
13676 - per_cpu(hardirq_ctx, cpu) = irqctx;
13677 -
13678 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13679 - THREAD_FLAGS,
13680 - THREAD_ORDER));
13681 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13682 - irqctx->tinfo.cpu = cpu;
13683 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13684 -
13685 - per_cpu(softirq_ctx, cpu) = irqctx;
13686 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13687 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13688
13689 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
13690 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
13691 @@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
13692 asmlinkage void do_softirq(void)
13693 {
13694 unsigned long flags;
13695 - struct thread_info *curctx;
13696 union irq_ctx *irqctx;
13697 u32 *isp;
13698
13699 @@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
13700 local_irq_save(flags);
13701
13702 if (local_softirq_pending()) {
13703 - curctx = current_thread_info();
13704 irqctx = __this_cpu_read(softirq_ctx);
13705 - irqctx->tinfo.task = curctx->task;
13706 - irqctx->tinfo.previous_esp = current_stack_pointer;
13707 + irqctx->previous_esp = current_stack_pointer;
13708
13709 /* build the stack frame on the softirq stack */
13710 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13711 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13712 +
13713 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13714 + __set_fs(MAKE_MM_SEG(0));
13715 +#endif
13716
13717 call_on_stack(__do_softirq, isp);
13718 +
13719 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13720 + __set_fs(current_thread_info()->addr_limit);
13721 +#endif
13722 +
13723 /*
13724 * Shouldn't happen, we returned above if in_interrupt():
13725 */
13726 diff -urNp linux-3.0.7/arch/x86/kernel/irq.c linux-3.0.7/arch/x86/kernel/irq.c
13727 --- linux-3.0.7/arch/x86/kernel/irq.c 2011-07-21 22:17:23.000000000 -0400
13728 +++ linux-3.0.7/arch/x86/kernel/irq.c 2011-08-23 21:47:55.000000000 -0400
13729 @@ -17,7 +17,7 @@
13730 #include <asm/mce.h>
13731 #include <asm/hw_irq.h>
13732
13733 -atomic_t irq_err_count;
13734 +atomic_unchecked_t irq_err_count;
13735
13736 /* Function pointer for generic interrupt vector handling */
13737 void (*x86_platform_ipi_callback)(void) = NULL;
13738 @@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
13739 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
13740 seq_printf(p, " Machine check polls\n");
13741 #endif
13742 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
13743 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
13744 #if defined(CONFIG_X86_IO_APIC)
13745 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
13746 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
13747 #endif
13748 return 0;
13749 }
13750 @@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
13751
13752 u64 arch_irq_stat(void)
13753 {
13754 - u64 sum = atomic_read(&irq_err_count);
13755 + u64 sum = atomic_read_unchecked(&irq_err_count);
13756
13757 #ifdef CONFIG_X86_IO_APIC
13758 - sum += atomic_read(&irq_mis_count);
13759 + sum += atomic_read_unchecked(&irq_mis_count);
13760 #endif
13761 return sum;
13762 }
13763 diff -urNp linux-3.0.7/arch/x86/kernel/kgdb.c linux-3.0.7/arch/x86/kernel/kgdb.c
13764 --- linux-3.0.7/arch/x86/kernel/kgdb.c 2011-07-21 22:17:23.000000000 -0400
13765 +++ linux-3.0.7/arch/x86/kernel/kgdb.c 2011-08-23 21:47:55.000000000 -0400
13766 @@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
13767 #ifdef CONFIG_X86_32
13768 switch (regno) {
13769 case GDB_SS:
13770 - if (!user_mode_vm(regs))
13771 + if (!user_mode(regs))
13772 *(unsigned long *)mem = __KERNEL_DS;
13773 break;
13774 case GDB_SP:
13775 - if (!user_mode_vm(regs))
13776 + if (!user_mode(regs))
13777 *(unsigned long *)mem = kernel_stack_pointer(regs);
13778 break;
13779 case GDB_GS:
13780 @@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
13781 case 'k':
13782 /* clear the trace bit */
13783 linux_regs->flags &= ~X86_EFLAGS_TF;
13784 - atomic_set(&kgdb_cpu_doing_single_step, -1);
13785 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
13786
13787 /* set the trace bit if we're stepping */
13788 if (remcomInBuffer[0] == 's') {
13789 linux_regs->flags |= X86_EFLAGS_TF;
13790 - atomic_set(&kgdb_cpu_doing_single_step,
13791 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
13792 raw_smp_processor_id());
13793 }
13794
13795 @@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
13796 return NOTIFY_DONE;
13797
13798 case DIE_DEBUG:
13799 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
13800 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
13801 if (user_mode(regs))
13802 return single_step_cont(regs, args);
13803 break;
13804 diff -urNp linux-3.0.7/arch/x86/kernel/kprobes.c linux-3.0.7/arch/x86/kernel/kprobes.c
13805 --- linux-3.0.7/arch/x86/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
13806 +++ linux-3.0.7/arch/x86/kernel/kprobes.c 2011-10-11 10:44:33.000000000 -0400
13807 @@ -115,8 +115,11 @@ static void __kprobes __synthesize_relat
13808 } __attribute__((packed)) *insn;
13809
13810 insn = (struct __arch_relative_insn *)from;
13811 +
13812 + pax_open_kernel();
13813 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
13814 insn->op = op;
13815 + pax_close_kernel();
13816 }
13817
13818 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
13819 @@ -153,7 +156,7 @@ static int __kprobes can_boost(kprobe_op
13820 kprobe_opcode_t opcode;
13821 kprobe_opcode_t *orig_opcodes = opcodes;
13822
13823 - if (search_exception_tables((unsigned long)opcodes))
13824 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
13825 return 0; /* Page fault may occur on this address. */
13826
13827 retry:
13828 @@ -314,7 +317,9 @@ static int __kprobes __copy_instruction(
13829 }
13830 }
13831 insn_get_length(&insn);
13832 + pax_open_kernel();
13833 memcpy(dest, insn.kaddr, insn.length);
13834 + pax_close_kernel();
13835
13836 #ifdef CONFIG_X86_64
13837 if (insn_rip_relative(&insn)) {
13838 @@ -338,7 +343,9 @@ static int __kprobes __copy_instruction(
13839 (u8 *) dest;
13840 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
13841 disp = (u8 *) dest + insn_offset_displacement(&insn);
13842 + pax_open_kernel();
13843 *(s32 *) disp = (s32) newdisp;
13844 + pax_close_kernel();
13845 }
13846 #endif
13847 return insn.length;
13848 @@ -352,12 +359,12 @@ static void __kprobes arch_copy_kprobe(s
13849 */
13850 __copy_instruction(p->ainsn.insn, p->addr, 0);
13851
13852 - if (can_boost(p->addr))
13853 + if (can_boost(ktla_ktva(p->addr)))
13854 p->ainsn.boostable = 0;
13855 else
13856 p->ainsn.boostable = -1;
13857
13858 - p->opcode = *p->addr;
13859 + p->opcode = *(ktla_ktva(p->addr));
13860 }
13861
13862 int __kprobes arch_prepare_kprobe(struct kprobe *p)
13863 @@ -474,7 +481,7 @@ static void __kprobes setup_singlestep(s
13864 * nor set current_kprobe, because it doesn't use single
13865 * stepping.
13866 */
13867 - regs->ip = (unsigned long)p->ainsn.insn;
13868 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13869 preempt_enable_no_resched();
13870 return;
13871 }
13872 @@ -493,7 +500,7 @@ static void __kprobes setup_singlestep(s
13873 if (p->opcode == BREAKPOINT_INSTRUCTION)
13874 regs->ip = (unsigned long)p->addr;
13875 else
13876 - regs->ip = (unsigned long)p->ainsn.insn;
13877 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13878 }
13879
13880 /*
13881 @@ -572,7 +579,7 @@ static int __kprobes kprobe_handler(stru
13882 setup_singlestep(p, regs, kcb, 0);
13883 return 1;
13884 }
13885 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
13886 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
13887 /*
13888 * The breakpoint instruction was removed right
13889 * after we hit it. Another cpu has removed
13890 @@ -680,6 +687,9 @@ static void __used __kprobes kretprobe_t
13891 " movq %rax, 152(%rsp)\n"
13892 RESTORE_REGS_STRING
13893 " popfq\n"
13894 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
13895 + " btsq $63,(%rsp)\n"
13896 +#endif
13897 #else
13898 " pushf\n"
13899 SAVE_REGS_STRING
13900 @@ -817,7 +827,7 @@ static void __kprobes resume_execution(s
13901 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
13902 {
13903 unsigned long *tos = stack_addr(regs);
13904 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
13905 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
13906 unsigned long orig_ip = (unsigned long)p->addr;
13907 kprobe_opcode_t *insn = p->ainsn.insn;
13908
13909 @@ -999,7 +1009,7 @@ int __kprobes kprobe_exceptions_notify(s
13910 struct die_args *args = data;
13911 int ret = NOTIFY_DONE;
13912
13913 - if (args->regs && user_mode_vm(args->regs))
13914 + if (args->regs && user_mode(args->regs))
13915 return ret;
13916
13917 switch (val) {
13918 @@ -1381,7 +1391,7 @@ int __kprobes arch_prepare_optimized_kpr
13919 * Verify if the address gap is in 2GB range, because this uses
13920 * a relative jump.
13921 */
13922 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
13923 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
13924 if (abs(rel) > 0x7fffffff)
13925 return -ERANGE;
13926
13927 @@ -1402,11 +1412,11 @@ int __kprobes arch_prepare_optimized_kpr
13928 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
13929
13930 /* Set probe function call */
13931 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
13932 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
13933
13934 /* Set returning jmp instruction at the tail of out-of-line buffer */
13935 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
13936 - (u8 *)op->kp.addr + op->optinsn.size);
13937 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
13938
13939 flush_icache_range((unsigned long) buf,
13940 (unsigned long) buf + TMPL_END_IDX +
13941 @@ -1428,7 +1438,7 @@ static void __kprobes setup_optimize_kpr
13942 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
13943
13944 /* Backup instructions which will be replaced by jump address */
13945 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
13946 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
13947 RELATIVE_ADDR_SIZE);
13948
13949 insn_buf[0] = RELATIVEJUMP_OPCODE;
13950 diff -urNp linux-3.0.7/arch/x86/kernel/kvm.c linux-3.0.7/arch/x86/kernel/kvm.c
13951 --- linux-3.0.7/arch/x86/kernel/kvm.c 2011-07-21 22:17:23.000000000 -0400
13952 +++ linux-3.0.7/arch/x86/kernel/kvm.c 2011-08-24 18:10:12.000000000 -0400
13953 @@ -426,6 +426,7 @@ static void __init paravirt_ops_setup(vo
13954 pv_mmu_ops.set_pud = kvm_set_pud;
13955 #if PAGETABLE_LEVELS == 4
13956 pv_mmu_ops.set_pgd = kvm_set_pgd;
13957 + pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
13958 #endif
13959 #endif
13960 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
13961 diff -urNp linux-3.0.7/arch/x86/kernel/ldt.c linux-3.0.7/arch/x86/kernel/ldt.c
13962 --- linux-3.0.7/arch/x86/kernel/ldt.c 2011-07-21 22:17:23.000000000 -0400
13963 +++ linux-3.0.7/arch/x86/kernel/ldt.c 2011-08-23 21:47:55.000000000 -0400
13964 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
13965 if (reload) {
13966 #ifdef CONFIG_SMP
13967 preempt_disable();
13968 - load_LDT(pc);
13969 + load_LDT_nolock(pc);
13970 if (!cpumask_equal(mm_cpumask(current->mm),
13971 cpumask_of(smp_processor_id())))
13972 smp_call_function(flush_ldt, current->mm, 1);
13973 preempt_enable();
13974 #else
13975 - load_LDT(pc);
13976 + load_LDT_nolock(pc);
13977 #endif
13978 }
13979 if (oldsize) {
13980 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
13981 return err;
13982
13983 for (i = 0; i < old->size; i++)
13984 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
13985 + write_ldt_entry(new->ldt, i, old->ldt + i);
13986 return 0;
13987 }
13988
13989 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct
13990 retval = copy_ldt(&mm->context, &old_mm->context);
13991 mutex_unlock(&old_mm->context.lock);
13992 }
13993 +
13994 + if (tsk == current) {
13995 + mm->context.vdso = 0;
13996 +
13997 +#ifdef CONFIG_X86_32
13998 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13999 + mm->context.user_cs_base = 0UL;
14000 + mm->context.user_cs_limit = ~0UL;
14001 +
14002 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
14003 + cpus_clear(mm->context.cpu_user_cs_mask);
14004 +#endif
14005 +
14006 +#endif
14007 +#endif
14008 +
14009 + }
14010 +
14011 return retval;
14012 }
14013
14014 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
14015 }
14016 }
14017
14018 +#ifdef CONFIG_PAX_SEGMEXEC
14019 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
14020 + error = -EINVAL;
14021 + goto out_unlock;
14022 + }
14023 +#endif
14024 +
14025 fill_ldt(&ldt, &ldt_info);
14026 if (oldmode)
14027 ldt.avl = 0;
14028 diff -urNp linux-3.0.7/arch/x86/kernel/machine_kexec_32.c linux-3.0.7/arch/x86/kernel/machine_kexec_32.c
14029 --- linux-3.0.7/arch/x86/kernel/machine_kexec_32.c 2011-07-21 22:17:23.000000000 -0400
14030 +++ linux-3.0.7/arch/x86/kernel/machine_kexec_32.c 2011-08-23 21:47:55.000000000 -0400
14031 @@ -27,7 +27,7 @@
14032 #include <asm/cacheflush.h>
14033 #include <asm/debugreg.h>
14034
14035 -static void set_idt(void *newidt, __u16 limit)
14036 +static void set_idt(struct desc_struct *newidt, __u16 limit)
14037 {
14038 struct desc_ptr curidt;
14039
14040 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
14041 }
14042
14043
14044 -static void set_gdt(void *newgdt, __u16 limit)
14045 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
14046 {
14047 struct desc_ptr curgdt;
14048
14049 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
14050 }
14051
14052 control_page = page_address(image->control_code_page);
14053 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
14054 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
14055
14056 relocate_kernel_ptr = control_page;
14057 page_list[PA_CONTROL_PAGE] = __pa(control_page);
14058 diff -urNp linux-3.0.7/arch/x86/kernel/microcode_intel.c linux-3.0.7/arch/x86/kernel/microcode_intel.c
14059 --- linux-3.0.7/arch/x86/kernel/microcode_intel.c 2011-07-21 22:17:23.000000000 -0400
14060 +++ linux-3.0.7/arch/x86/kernel/microcode_intel.c 2011-10-06 04:17:55.000000000 -0400
14061 @@ -440,13 +440,13 @@ static enum ucode_state request_microcod
14062
14063 static int get_ucode_user(void *to, const void *from, size_t n)
14064 {
14065 - return copy_from_user(to, from, n);
14066 + return copy_from_user(to, (const void __force_user *)from, n);
14067 }
14068
14069 static enum ucode_state
14070 request_microcode_user(int cpu, const void __user *buf, size_t size)
14071 {
14072 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
14073 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
14074 }
14075
14076 static void microcode_fini_cpu(int cpu)
14077 diff -urNp linux-3.0.7/arch/x86/kernel/module.c linux-3.0.7/arch/x86/kernel/module.c
14078 --- linux-3.0.7/arch/x86/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
14079 +++ linux-3.0.7/arch/x86/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
14080 @@ -36,21 +36,66 @@
14081 #define DEBUGP(fmt...)
14082 #endif
14083
14084 -void *module_alloc(unsigned long size)
14085 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
14086 {
14087 if (PAGE_ALIGN(size) > MODULES_LEN)
14088 return NULL;
14089 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
14090 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
14091 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
14092 -1, __builtin_return_address(0));
14093 }
14094
14095 +void *module_alloc(unsigned long size)
14096 +{
14097 +
14098 +#ifdef CONFIG_PAX_KERNEXEC
14099 + return __module_alloc(size, PAGE_KERNEL);
14100 +#else
14101 + return __module_alloc(size, PAGE_KERNEL_EXEC);
14102 +#endif
14103 +
14104 +}
14105 +
14106 /* Free memory returned from module_alloc */
14107 void module_free(struct module *mod, void *module_region)
14108 {
14109 vfree(module_region);
14110 }
14111
14112 +#ifdef CONFIG_PAX_KERNEXEC
14113 +#ifdef CONFIG_X86_32
14114 +void *module_alloc_exec(unsigned long size)
14115 +{
14116 + struct vm_struct *area;
14117 +
14118 + if (size == 0)
14119 + return NULL;
14120 +
14121 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
14122 + return area ? area->addr : NULL;
14123 +}
14124 +EXPORT_SYMBOL(module_alloc_exec);
14125 +
14126 +void module_free_exec(struct module *mod, void *module_region)
14127 +{
14128 + vunmap(module_region);
14129 +}
14130 +EXPORT_SYMBOL(module_free_exec);
14131 +#else
14132 +void module_free_exec(struct module *mod, void *module_region)
14133 +{
14134 + module_free(mod, module_region);
14135 +}
14136 +EXPORT_SYMBOL(module_free_exec);
14137 +
14138 +void *module_alloc_exec(unsigned long size)
14139 +{
14140 + return __module_alloc(size, PAGE_KERNEL_RX);
14141 +}
14142 +EXPORT_SYMBOL(module_alloc_exec);
14143 +#endif
14144 +#endif
14145 +
14146 /* We don't need anything special. */
14147 int module_frob_arch_sections(Elf_Ehdr *hdr,
14148 Elf_Shdr *sechdrs,
14149 @@ -70,14 +115,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
14150 unsigned int i;
14151 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
14152 Elf32_Sym *sym;
14153 - uint32_t *location;
14154 + uint32_t *plocation, location;
14155
14156 DEBUGP("Applying relocate section %u to %u\n", relsec,
14157 sechdrs[relsec].sh_info);
14158 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
14159 /* This is where to make the change */
14160 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
14161 - + rel[i].r_offset;
14162 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
14163 + location = (uint32_t)plocation;
14164 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
14165 + plocation = ktla_ktva((void *)plocation);
14166 /* This is the symbol it is referring to. Note that all
14167 undefined symbols have been resolved. */
14168 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
14169 @@ -86,11 +133,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
14170 switch (ELF32_R_TYPE(rel[i].r_info)) {
14171 case R_386_32:
14172 /* We add the value into the location given */
14173 - *location += sym->st_value;
14174 + pax_open_kernel();
14175 + *plocation += sym->st_value;
14176 + pax_close_kernel();
14177 break;
14178 case R_386_PC32:
14179 /* Add the value, subtract its postition */
14180 - *location += sym->st_value - (uint32_t)location;
14181 + pax_open_kernel();
14182 + *plocation += sym->st_value - location;
14183 + pax_close_kernel();
14184 break;
14185 default:
14186 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
14187 @@ -146,21 +197,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
14188 case R_X86_64_NONE:
14189 break;
14190 case R_X86_64_64:
14191 + pax_open_kernel();
14192 *(u64 *)loc = val;
14193 + pax_close_kernel();
14194 break;
14195 case R_X86_64_32:
14196 + pax_open_kernel();
14197 *(u32 *)loc = val;
14198 + pax_close_kernel();
14199 if (val != *(u32 *)loc)
14200 goto overflow;
14201 break;
14202 case R_X86_64_32S:
14203 + pax_open_kernel();
14204 *(s32 *)loc = val;
14205 + pax_close_kernel();
14206 if ((s64)val != *(s32 *)loc)
14207 goto overflow;
14208 break;
14209 case R_X86_64_PC32:
14210 val -= (u64)loc;
14211 + pax_open_kernel();
14212 *(u32 *)loc = val;
14213 + pax_close_kernel();
14214 +
14215 #if 0
14216 if ((s64)val != *(s32 *)loc)
14217 goto overflow;
14218 diff -urNp linux-3.0.7/arch/x86/kernel/paravirt.c linux-3.0.7/arch/x86/kernel/paravirt.c
14219 --- linux-3.0.7/arch/x86/kernel/paravirt.c 2011-07-21 22:17:23.000000000 -0400
14220 +++ linux-3.0.7/arch/x86/kernel/paravirt.c 2011-08-23 21:48:14.000000000 -0400
14221 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
14222 {
14223 return x;
14224 }
14225 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
14226 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
14227 +#endif
14228
14229 void __init default_banner(void)
14230 {
14231 @@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
14232 * corresponding structure. */
14233 static void *get_call_destination(u8 type)
14234 {
14235 - struct paravirt_patch_template tmpl = {
14236 + const struct paravirt_patch_template tmpl = {
14237 .pv_init_ops = pv_init_ops,
14238 .pv_time_ops = pv_time_ops,
14239 .pv_cpu_ops = pv_cpu_ops,
14240 @@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
14241 .pv_lock_ops = pv_lock_ops,
14242 #endif
14243 };
14244 +
14245 + pax_track_stack();
14246 +
14247 return *((void **)&tmpl + type);
14248 }
14249
14250 @@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
14251 if (opfunc == NULL)
14252 /* If there's no function, patch it with a ud2a (BUG) */
14253 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
14254 - else if (opfunc == _paravirt_nop)
14255 + else if (opfunc == (void *)_paravirt_nop)
14256 /* If the operation is a nop, then nop the callsite */
14257 ret = paravirt_patch_nop();
14258
14259 /* identity functions just return their single argument */
14260 - else if (opfunc == _paravirt_ident_32)
14261 + else if (opfunc == (void *)_paravirt_ident_32)
14262 ret = paravirt_patch_ident_32(insnbuf, len);
14263 - else if (opfunc == _paravirt_ident_64)
14264 + else if (opfunc == (void *)_paravirt_ident_64)
14265 ret = paravirt_patch_ident_64(insnbuf, len);
14266 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
14267 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
14268 + ret = paravirt_patch_ident_64(insnbuf, len);
14269 +#endif
14270
14271 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
14272 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
14273 @@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
14274 if (insn_len > len || start == NULL)
14275 insn_len = len;
14276 else
14277 - memcpy(insnbuf, start, insn_len);
14278 + memcpy(insnbuf, ktla_ktva(start), insn_len);
14279
14280 return insn_len;
14281 }
14282 @@ -294,22 +304,22 @@ void arch_flush_lazy_mmu_mode(void)
14283 preempt_enable();
14284 }
14285
14286 -struct pv_info pv_info = {
14287 +struct pv_info pv_info __read_only = {
14288 .name = "bare hardware",
14289 .paravirt_enabled = 0,
14290 .kernel_rpl = 0,
14291 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
14292 };
14293
14294 -struct pv_init_ops pv_init_ops = {
14295 +struct pv_init_ops pv_init_ops __read_only = {
14296 .patch = native_patch,
14297 };
14298
14299 -struct pv_time_ops pv_time_ops = {
14300 +struct pv_time_ops pv_time_ops __read_only = {
14301 .sched_clock = native_sched_clock,
14302 };
14303
14304 -struct pv_irq_ops pv_irq_ops = {
14305 +struct pv_irq_ops pv_irq_ops __read_only = {
14306 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
14307 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
14308 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
14309 @@ -321,7 +331,7 @@ struct pv_irq_ops pv_irq_ops = {
14310 #endif
14311 };
14312
14313 -struct pv_cpu_ops pv_cpu_ops = {
14314 +struct pv_cpu_ops pv_cpu_ops __read_only = {
14315 .cpuid = native_cpuid,
14316 .get_debugreg = native_get_debugreg,
14317 .set_debugreg = native_set_debugreg,
14318 @@ -382,21 +392,26 @@ struct pv_cpu_ops pv_cpu_ops = {
14319 .end_context_switch = paravirt_nop,
14320 };
14321
14322 -struct pv_apic_ops pv_apic_ops = {
14323 +struct pv_apic_ops pv_apic_ops __read_only = {
14324 #ifdef CONFIG_X86_LOCAL_APIC
14325 .startup_ipi_hook = paravirt_nop,
14326 #endif
14327 };
14328
14329 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
14330 +#ifdef CONFIG_X86_32
14331 +#ifdef CONFIG_X86_PAE
14332 +/* 64-bit pagetable entries */
14333 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
14334 +#else
14335 /* 32-bit pagetable entries */
14336 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
14337 +#endif
14338 #else
14339 /* 64-bit pagetable entries */
14340 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
14341 #endif
14342
14343 -struct pv_mmu_ops pv_mmu_ops = {
14344 +struct pv_mmu_ops pv_mmu_ops __read_only = {
14345
14346 .read_cr2 = native_read_cr2,
14347 .write_cr2 = native_write_cr2,
14348 @@ -446,6 +461,7 @@ struct pv_mmu_ops pv_mmu_ops = {
14349 .make_pud = PTE_IDENT,
14350
14351 .set_pgd = native_set_pgd,
14352 + .set_pgd_batched = native_set_pgd_batched,
14353 #endif
14354 #endif /* PAGETABLE_LEVELS >= 3 */
14355
14356 @@ -465,6 +481,12 @@ struct pv_mmu_ops pv_mmu_ops = {
14357 },
14358
14359 .set_fixmap = native_set_fixmap,
14360 +
14361 +#ifdef CONFIG_PAX_KERNEXEC
14362 + .pax_open_kernel = native_pax_open_kernel,
14363 + .pax_close_kernel = native_pax_close_kernel,
14364 +#endif
14365 +
14366 };
14367
14368 EXPORT_SYMBOL_GPL(pv_time_ops);
14369 diff -urNp linux-3.0.7/arch/x86/kernel/paravirt-spinlocks.c linux-3.0.7/arch/x86/kernel/paravirt-spinlocks.c
14370 --- linux-3.0.7/arch/x86/kernel/paravirt-spinlocks.c 2011-07-21 22:17:23.000000000 -0400
14371 +++ linux-3.0.7/arch/x86/kernel/paravirt-spinlocks.c 2011-08-23 21:47:55.000000000 -0400
14372 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
14373 arch_spin_lock(lock);
14374 }
14375
14376 -struct pv_lock_ops pv_lock_ops = {
14377 +struct pv_lock_ops pv_lock_ops __read_only = {
14378 #ifdef CONFIG_SMP
14379 .spin_is_locked = __ticket_spin_is_locked,
14380 .spin_is_contended = __ticket_spin_is_contended,
14381 diff -urNp linux-3.0.7/arch/x86/kernel/pci-iommu_table.c linux-3.0.7/arch/x86/kernel/pci-iommu_table.c
14382 --- linux-3.0.7/arch/x86/kernel/pci-iommu_table.c 2011-07-21 22:17:23.000000000 -0400
14383 +++ linux-3.0.7/arch/x86/kernel/pci-iommu_table.c 2011-08-23 21:48:14.000000000 -0400
14384 @@ -2,7 +2,7 @@
14385 #include <asm/iommu_table.h>
14386 #include <linux/string.h>
14387 #include <linux/kallsyms.h>
14388 -
14389 +#include <linux/sched.h>
14390
14391 #define DEBUG 1
14392
14393 @@ -51,6 +51,8 @@ void __init check_iommu_entries(struct i
14394 {
14395 struct iommu_table_entry *p, *q, *x;
14396
14397 + pax_track_stack();
14398 +
14399 /* Simple cyclic dependency checker. */
14400 for (p = start; p < finish; p++) {
14401 q = find_dependents_of(start, finish, p);
14402 diff -urNp linux-3.0.7/arch/x86/kernel/process_32.c linux-3.0.7/arch/x86/kernel/process_32.c
14403 --- linux-3.0.7/arch/x86/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
14404 +++ linux-3.0.7/arch/x86/kernel/process_32.c 2011-08-23 21:47:55.000000000 -0400
14405 @@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __as
14406 unsigned long thread_saved_pc(struct task_struct *tsk)
14407 {
14408 return ((unsigned long *)tsk->thread.sp)[3];
14409 +//XXX return tsk->thread.eip;
14410 }
14411
14412 #ifndef CONFIG_SMP
14413 @@ -126,15 +127,14 @@ void __show_regs(struct pt_regs *regs, i
14414 unsigned long sp;
14415 unsigned short ss, gs;
14416
14417 - if (user_mode_vm(regs)) {
14418 + if (user_mode(regs)) {
14419 sp = regs->sp;
14420 ss = regs->ss & 0xffff;
14421 - gs = get_user_gs(regs);
14422 } else {
14423 sp = kernel_stack_pointer(regs);
14424 savesegment(ss, ss);
14425 - savesegment(gs, gs);
14426 }
14427 + gs = get_user_gs(regs);
14428
14429 show_regs_common();
14430
14431 @@ -196,13 +196,14 @@ int copy_thread(unsigned long clone_flag
14432 struct task_struct *tsk;
14433 int err;
14434
14435 - childregs = task_pt_regs(p);
14436 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
14437 *childregs = *regs;
14438 childregs->ax = 0;
14439 childregs->sp = sp;
14440
14441 p->thread.sp = (unsigned long) childregs;
14442 p->thread.sp0 = (unsigned long) (childregs+1);
14443 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14444
14445 p->thread.ip = (unsigned long) ret_from_fork;
14446
14447 @@ -292,7 +293,7 @@ __switch_to(struct task_struct *prev_p,
14448 struct thread_struct *prev = &prev_p->thread,
14449 *next = &next_p->thread;
14450 int cpu = smp_processor_id();
14451 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
14452 + struct tss_struct *tss = init_tss + cpu;
14453 bool preload_fpu;
14454
14455 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
14456 @@ -327,6 +328,10 @@ __switch_to(struct task_struct *prev_p,
14457 */
14458 lazy_save_gs(prev->gs);
14459
14460 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14461 + __set_fs(task_thread_info(next_p)->addr_limit);
14462 +#endif
14463 +
14464 /*
14465 * Load the per-thread Thread-Local Storage descriptor.
14466 */
14467 @@ -362,6 +367,9 @@ __switch_to(struct task_struct *prev_p,
14468 */
14469 arch_end_context_switch(next_p);
14470
14471 + percpu_write(current_task, next_p);
14472 + percpu_write(current_tinfo, &next_p->tinfo);
14473 +
14474 if (preload_fpu)
14475 __math_state_restore();
14476
14477 @@ -371,8 +379,6 @@ __switch_to(struct task_struct *prev_p,
14478 if (prev->gs | next->gs)
14479 lazy_load_gs(next->gs);
14480
14481 - percpu_write(current_task, next_p);
14482 -
14483 return prev_p;
14484 }
14485
14486 @@ -402,4 +408,3 @@ unsigned long get_wchan(struct task_stru
14487 } while (count++ < 16);
14488 return 0;
14489 }
14490 -
14491 diff -urNp linux-3.0.7/arch/x86/kernel/process_64.c linux-3.0.7/arch/x86/kernel/process_64.c
14492 --- linux-3.0.7/arch/x86/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
14493 +++ linux-3.0.7/arch/x86/kernel/process_64.c 2011-08-23 21:47:55.000000000 -0400
14494 @@ -87,7 +87,7 @@ static void __exit_idle(void)
14495 void exit_idle(void)
14496 {
14497 /* idle loop has pid 0 */
14498 - if (current->pid)
14499 + if (task_pid_nr(current))
14500 return;
14501 __exit_idle();
14502 }
14503 @@ -260,8 +260,7 @@ int copy_thread(unsigned long clone_flag
14504 struct pt_regs *childregs;
14505 struct task_struct *me = current;
14506
14507 - childregs = ((struct pt_regs *)
14508 - (THREAD_SIZE + task_stack_page(p))) - 1;
14509 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
14510 *childregs = *regs;
14511
14512 childregs->ax = 0;
14513 @@ -273,6 +272,7 @@ int copy_thread(unsigned long clone_flag
14514 p->thread.sp = (unsigned long) childregs;
14515 p->thread.sp0 = (unsigned long) (childregs+1);
14516 p->thread.usersp = me->thread.usersp;
14517 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14518
14519 set_tsk_thread_flag(p, TIF_FORK);
14520
14521 @@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p,
14522 struct thread_struct *prev = &prev_p->thread;
14523 struct thread_struct *next = &next_p->thread;
14524 int cpu = smp_processor_id();
14525 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
14526 + struct tss_struct *tss = init_tss + cpu;
14527 unsigned fsindex, gsindex;
14528 bool preload_fpu;
14529
14530 @@ -471,10 +471,9 @@ __switch_to(struct task_struct *prev_p,
14531 prev->usersp = percpu_read(old_rsp);
14532 percpu_write(old_rsp, next->usersp);
14533 percpu_write(current_task, next_p);
14534 + percpu_write(current_tinfo, &next_p->tinfo);
14535
14536 - percpu_write(kernel_stack,
14537 - (unsigned long)task_stack_page(next_p) +
14538 - THREAD_SIZE - KERNEL_STACK_OFFSET);
14539 + percpu_write(kernel_stack, next->sp0);
14540
14541 /*
14542 * Now maybe reload the debug registers and handle I/O bitmaps
14543 @@ -536,12 +535,11 @@ unsigned long get_wchan(struct task_stru
14544 if (!p || p == current || p->state == TASK_RUNNING)
14545 return 0;
14546 stack = (unsigned long)task_stack_page(p);
14547 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
14548 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
14549 return 0;
14550 fp = *(u64 *)(p->thread.sp);
14551 do {
14552 - if (fp < (unsigned long)stack ||
14553 - fp >= (unsigned long)stack+THREAD_SIZE)
14554 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
14555 return 0;
14556 ip = *(u64 *)(fp+8);
14557 if (!in_sched_functions(ip))
14558 diff -urNp linux-3.0.7/arch/x86/kernel/process.c linux-3.0.7/arch/x86/kernel/process.c
14559 --- linux-3.0.7/arch/x86/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
14560 +++ linux-3.0.7/arch/x86/kernel/process.c 2011-08-30 18:23:52.000000000 -0400
14561 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
14562
14563 void free_thread_info(struct thread_info *ti)
14564 {
14565 - free_thread_xstate(ti->task);
14566 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
14567 }
14568
14569 +static struct kmem_cache *task_struct_cachep;
14570 +
14571 void arch_task_cache_init(void)
14572 {
14573 - task_xstate_cachep =
14574 - kmem_cache_create("task_xstate", xstate_size,
14575 + /* create a slab on which task_structs can be allocated */
14576 + task_struct_cachep =
14577 + kmem_cache_create("task_struct", sizeof(struct task_struct),
14578 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
14579 +
14580 + task_xstate_cachep =
14581 + kmem_cache_create("task_xstate", xstate_size,
14582 __alignof__(union thread_xstate),
14583 - SLAB_PANIC | SLAB_NOTRACK, NULL);
14584 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
14585 +}
14586 +
14587 +struct task_struct *alloc_task_struct_node(int node)
14588 +{
14589 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
14590 +}
14591 +
14592 +void free_task_struct(struct task_struct *task)
14593 +{
14594 + free_thread_xstate(task);
14595 + kmem_cache_free(task_struct_cachep, task);
14596 }
14597
14598 /*
14599 @@ -70,7 +87,7 @@ void exit_thread(void)
14600 unsigned long *bp = t->io_bitmap_ptr;
14601
14602 if (bp) {
14603 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
14604 + struct tss_struct *tss = init_tss + get_cpu();
14605
14606 t->io_bitmap_ptr = NULL;
14607 clear_thread_flag(TIF_IO_BITMAP);
14608 @@ -106,7 +123,7 @@ void show_regs_common(void)
14609
14610 printk(KERN_CONT "\n");
14611 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
14612 - current->pid, current->comm, print_tainted(),
14613 + task_pid_nr(current), current->comm, print_tainted(),
14614 init_utsname()->release,
14615 (int)strcspn(init_utsname()->version, " "),
14616 init_utsname()->version);
14617 @@ -120,6 +137,9 @@ void flush_thread(void)
14618 {
14619 struct task_struct *tsk = current;
14620
14621 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
14622 + loadsegment(gs, 0);
14623 +#endif
14624 flush_ptrace_hw_breakpoint(tsk);
14625 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
14626 /*
14627 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
14628 regs.di = (unsigned long) arg;
14629
14630 #ifdef CONFIG_X86_32
14631 - regs.ds = __USER_DS;
14632 - regs.es = __USER_DS;
14633 + regs.ds = __KERNEL_DS;
14634 + regs.es = __KERNEL_DS;
14635 regs.fs = __KERNEL_PERCPU;
14636 - regs.gs = __KERNEL_STACK_CANARY;
14637 + savesegment(gs, regs.gs);
14638 #else
14639 regs.ss = __KERNEL_DS;
14640 #endif
14641 @@ -403,7 +423,7 @@ void default_idle(void)
14642 EXPORT_SYMBOL(default_idle);
14643 #endif
14644
14645 -void stop_this_cpu(void *dummy)
14646 +__noreturn void stop_this_cpu(void *dummy)
14647 {
14648 local_irq_disable();
14649 /*
14650 @@ -668,16 +688,37 @@ static int __init idle_setup(char *str)
14651 }
14652 early_param("idle", idle_setup);
14653
14654 -unsigned long arch_align_stack(unsigned long sp)
14655 +#ifdef CONFIG_PAX_RANDKSTACK
14656 +void pax_randomize_kstack(struct pt_regs *regs)
14657 {
14658 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
14659 - sp -= get_random_int() % 8192;
14660 - return sp & ~0xf;
14661 -}
14662 + struct thread_struct *thread = &current->thread;
14663 + unsigned long time;
14664
14665 -unsigned long arch_randomize_brk(struct mm_struct *mm)
14666 -{
14667 - unsigned long range_end = mm->brk + 0x02000000;
14668 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
14669 -}
14670 + if (!randomize_va_space)
14671 + return;
14672 +
14673 + if (v8086_mode(regs))
14674 + return;
14675
14676 + rdtscl(time);
14677 +
14678 + /* P4 seems to return a 0 LSB, ignore it */
14679 +#ifdef CONFIG_MPENTIUM4
14680 + time &= 0x3EUL;
14681 + time <<= 2;
14682 +#elif defined(CONFIG_X86_64)
14683 + time &= 0xFUL;
14684 + time <<= 4;
14685 +#else
14686 + time &= 0x1FUL;
14687 + time <<= 3;
14688 +#endif
14689 +
14690 + thread->sp0 ^= time;
14691 + load_sp0(init_tss + smp_processor_id(), thread);
14692 +
14693 +#ifdef CONFIG_X86_64
14694 + percpu_write(kernel_stack, thread->sp0);
14695 +#endif
14696 +}
14697 +#endif
14698 diff -urNp linux-3.0.7/arch/x86/kernel/ptrace.c linux-3.0.7/arch/x86/kernel/ptrace.c
14699 --- linux-3.0.7/arch/x86/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
14700 +++ linux-3.0.7/arch/x86/kernel/ptrace.c 2011-08-23 21:47:55.000000000 -0400
14701 @@ -821,7 +821,7 @@ long arch_ptrace(struct task_struct *chi
14702 unsigned long addr, unsigned long data)
14703 {
14704 int ret;
14705 - unsigned long __user *datap = (unsigned long __user *)data;
14706 + unsigned long __user *datap = (__force unsigned long __user *)data;
14707
14708 switch (request) {
14709 /* read the word at location addr in the USER area. */
14710 @@ -906,14 +906,14 @@ long arch_ptrace(struct task_struct *chi
14711 if ((int) addr < 0)
14712 return -EIO;
14713 ret = do_get_thread_area(child, addr,
14714 - (struct user_desc __user *)data);
14715 + (__force struct user_desc __user *) data);
14716 break;
14717
14718 case PTRACE_SET_THREAD_AREA:
14719 if ((int) addr < 0)
14720 return -EIO;
14721 ret = do_set_thread_area(child, addr,
14722 - (struct user_desc __user *)data, 0);
14723 + (__force struct user_desc __user *) data, 0);
14724 break;
14725 #endif
14726
14727 @@ -1330,7 +1330,7 @@ static void fill_sigtrap_info(struct tas
14728 memset(info, 0, sizeof(*info));
14729 info->si_signo = SIGTRAP;
14730 info->si_code = si_code;
14731 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
14732 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
14733 }
14734
14735 void user_single_step_siginfo(struct task_struct *tsk,
14736 diff -urNp linux-3.0.7/arch/x86/kernel/pvclock.c linux-3.0.7/arch/x86/kernel/pvclock.c
14737 --- linux-3.0.7/arch/x86/kernel/pvclock.c 2011-07-21 22:17:23.000000000 -0400
14738 +++ linux-3.0.7/arch/x86/kernel/pvclock.c 2011-08-23 21:47:55.000000000 -0400
14739 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
14740 return pv_tsc_khz;
14741 }
14742
14743 -static atomic64_t last_value = ATOMIC64_INIT(0);
14744 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
14745
14746 void pvclock_resume(void)
14747 {
14748 - atomic64_set(&last_value, 0);
14749 + atomic64_set_unchecked(&last_value, 0);
14750 }
14751
14752 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
14753 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
14754 * updating at the same time, and one of them could be slightly behind,
14755 * making the assumption that last_value always go forward fail to hold.
14756 */
14757 - last = atomic64_read(&last_value);
14758 + last = atomic64_read_unchecked(&last_value);
14759 do {
14760 if (ret < last)
14761 return last;
14762 - last = atomic64_cmpxchg(&last_value, last, ret);
14763 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
14764 } while (unlikely(last != ret));
14765
14766 return ret;
14767 diff -urNp linux-3.0.7/arch/x86/kernel/reboot.c linux-3.0.7/arch/x86/kernel/reboot.c
14768 --- linux-3.0.7/arch/x86/kernel/reboot.c 2011-07-21 22:17:23.000000000 -0400
14769 +++ linux-3.0.7/arch/x86/kernel/reboot.c 2011-08-23 21:47:55.000000000 -0400
14770 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
14771 EXPORT_SYMBOL(pm_power_off);
14772
14773 static const struct desc_ptr no_idt = {};
14774 -static int reboot_mode;
14775 +static unsigned short reboot_mode;
14776 enum reboot_type reboot_type = BOOT_ACPI;
14777 int reboot_force;
14778
14779 @@ -315,13 +315,17 @@ core_initcall(reboot_init);
14780 extern const unsigned char machine_real_restart_asm[];
14781 extern const u64 machine_real_restart_gdt[3];
14782
14783 -void machine_real_restart(unsigned int type)
14784 +__noreturn void machine_real_restart(unsigned int type)
14785 {
14786 void *restart_va;
14787 unsigned long restart_pa;
14788 - void (*restart_lowmem)(unsigned int);
14789 + void (* __noreturn restart_lowmem)(unsigned int);
14790 u64 *lowmem_gdt;
14791
14792 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14793 + struct desc_struct *gdt;
14794 +#endif
14795 +
14796 local_irq_disable();
14797
14798 /* Write zero to CMOS register number 0x0f, which the BIOS POST
14799 @@ -347,14 +351,14 @@ void machine_real_restart(unsigned int t
14800 boot)". This seems like a fairly standard thing that gets set by
14801 REBOOT.COM programs, and the previous reset routine did this
14802 too. */
14803 - *((unsigned short *)0x472) = reboot_mode;
14804 + *(unsigned short *)(__va(0x472)) = reboot_mode;
14805
14806 /* Patch the GDT in the low memory trampoline */
14807 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
14808
14809 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
14810 restart_pa = virt_to_phys(restart_va);
14811 - restart_lowmem = (void (*)(unsigned int))restart_pa;
14812 + restart_lowmem = (void *)restart_pa;
14813
14814 /* GDT[0]: GDT self-pointer */
14815 lowmem_gdt[0] =
14816 @@ -365,7 +369,33 @@ void machine_real_restart(unsigned int t
14817 GDT_ENTRY(0x009b, restart_pa, 0xffff);
14818
14819 /* Jump to the identity-mapped low memory code */
14820 +
14821 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14822 + gdt = get_cpu_gdt_table(smp_processor_id());
14823 + pax_open_kernel();
14824 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14825 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
14826 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
14827 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
14828 +#endif
14829 +#ifdef CONFIG_PAX_KERNEXEC
14830 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
14831 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
14832 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
14833 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
14834 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
14835 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
14836 +#endif
14837 + pax_close_kernel();
14838 +#endif
14839 +
14840 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14841 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
14842 + unreachable();
14843 +#else
14844 restart_lowmem(type);
14845 +#endif
14846 +
14847 }
14848 #ifdef CONFIG_APM_MODULE
14849 EXPORT_SYMBOL(machine_real_restart);
14850 @@ -523,7 +553,7 @@ void __attribute__((weak)) mach_reboot_f
14851 * try to force a triple fault and then cycle between hitting the keyboard
14852 * controller and doing that
14853 */
14854 -static void native_machine_emergency_restart(void)
14855 +__noreturn static void native_machine_emergency_restart(void)
14856 {
14857 int i;
14858 int attempt = 0;
14859 @@ -647,13 +677,13 @@ void native_machine_shutdown(void)
14860 #endif
14861 }
14862
14863 -static void __machine_emergency_restart(int emergency)
14864 +static __noreturn void __machine_emergency_restart(int emergency)
14865 {
14866 reboot_emergency = emergency;
14867 machine_ops.emergency_restart();
14868 }
14869
14870 -static void native_machine_restart(char *__unused)
14871 +static __noreturn void native_machine_restart(char *__unused)
14872 {
14873 printk("machine restart\n");
14874
14875 @@ -662,7 +692,7 @@ static void native_machine_restart(char
14876 __machine_emergency_restart(0);
14877 }
14878
14879 -static void native_machine_halt(void)
14880 +static __noreturn void native_machine_halt(void)
14881 {
14882 /* stop other cpus and apics */
14883 machine_shutdown();
14884 @@ -673,7 +703,7 @@ static void native_machine_halt(void)
14885 stop_this_cpu(NULL);
14886 }
14887
14888 -static void native_machine_power_off(void)
14889 +__noreturn static void native_machine_power_off(void)
14890 {
14891 if (pm_power_off) {
14892 if (!reboot_force)
14893 @@ -682,6 +712,7 @@ static void native_machine_power_off(voi
14894 }
14895 /* a fallback in case there is no PM info available */
14896 tboot_shutdown(TB_SHUTDOWN_HALT);
14897 + unreachable();
14898 }
14899
14900 struct machine_ops machine_ops = {
14901 diff -urNp linux-3.0.7/arch/x86/kernel/setup.c linux-3.0.7/arch/x86/kernel/setup.c
14902 --- linux-3.0.7/arch/x86/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
14903 +++ linux-3.0.7/arch/x86/kernel/setup.c 2011-10-06 04:17:55.000000000 -0400
14904 @@ -447,7 +447,7 @@ static void __init parse_setup_data(void
14905
14906 switch (data->type) {
14907 case SETUP_E820_EXT:
14908 - parse_e820_ext(data);
14909 + parse_e820_ext((struct setup_data __force_kernel *)data);
14910 break;
14911 case SETUP_DTB:
14912 add_dtb(pa_data);
14913 @@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
14914 * area (640->1Mb) as ram even though it is not.
14915 * take them out.
14916 */
14917 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
14918 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
14919 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
14920 }
14921
14922 @@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
14923
14924 if (!boot_params.hdr.root_flags)
14925 root_mountflags &= ~MS_RDONLY;
14926 - init_mm.start_code = (unsigned long) _text;
14927 - init_mm.end_code = (unsigned long) _etext;
14928 + init_mm.start_code = ktla_ktva((unsigned long) _text);
14929 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
14930 init_mm.end_data = (unsigned long) _edata;
14931 init_mm.brk = _brk_end;
14932
14933 - code_resource.start = virt_to_phys(_text);
14934 - code_resource.end = virt_to_phys(_etext)-1;
14935 - data_resource.start = virt_to_phys(_etext);
14936 + code_resource.start = virt_to_phys(ktla_ktva(_text));
14937 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
14938 + data_resource.start = virt_to_phys(_sdata);
14939 data_resource.end = virt_to_phys(_edata)-1;
14940 bss_resource.start = virt_to_phys(&__bss_start);
14941 bss_resource.end = virt_to_phys(&__bss_stop)-1;
14942 diff -urNp linux-3.0.7/arch/x86/kernel/setup_percpu.c linux-3.0.7/arch/x86/kernel/setup_percpu.c
14943 --- linux-3.0.7/arch/x86/kernel/setup_percpu.c 2011-07-21 22:17:23.000000000 -0400
14944 +++ linux-3.0.7/arch/x86/kernel/setup_percpu.c 2011-08-23 21:47:55.000000000 -0400
14945 @@ -21,19 +21,17 @@
14946 #include <asm/cpu.h>
14947 #include <asm/stackprotector.h>
14948
14949 -DEFINE_PER_CPU(int, cpu_number);
14950 +#ifdef CONFIG_SMP
14951 +DEFINE_PER_CPU(unsigned int, cpu_number);
14952 EXPORT_PER_CPU_SYMBOL(cpu_number);
14953 +#endif
14954
14955 -#ifdef CONFIG_X86_64
14956 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
14957 -#else
14958 -#define BOOT_PERCPU_OFFSET 0
14959 -#endif
14960
14961 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
14962 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
14963
14964 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
14965 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
14966 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
14967 };
14968 EXPORT_SYMBOL(__per_cpu_offset);
14969 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
14970 {
14971 #ifdef CONFIG_X86_32
14972 struct desc_struct gdt;
14973 + unsigned long base = per_cpu_offset(cpu);
14974
14975 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
14976 - 0x2 | DESCTYPE_S, 0x8);
14977 - gdt.s = 1;
14978 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
14979 + 0x83 | DESCTYPE_S, 0xC);
14980 write_gdt_entry(get_cpu_gdt_table(cpu),
14981 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
14982 #endif
14983 @@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
14984 /* alrighty, percpu areas up and running */
14985 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
14986 for_each_possible_cpu(cpu) {
14987 +#ifdef CONFIG_CC_STACKPROTECTOR
14988 +#ifdef CONFIG_X86_32
14989 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
14990 +#endif
14991 +#endif
14992 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
14993 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
14994 per_cpu(cpu_number, cpu) = cpu;
14995 @@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
14996 */
14997 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
14998 #endif
14999 +#ifdef CONFIG_CC_STACKPROTECTOR
15000 +#ifdef CONFIG_X86_32
15001 + if (!cpu)
15002 + per_cpu(stack_canary.canary, cpu) = canary;
15003 +#endif
15004 +#endif
15005 /*
15006 * Up to this point, the boot CPU has been using .init.data
15007 * area. Reload any changed state for the boot CPU.
15008 diff -urNp linux-3.0.7/arch/x86/kernel/signal.c linux-3.0.7/arch/x86/kernel/signal.c
15009 --- linux-3.0.7/arch/x86/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
15010 +++ linux-3.0.7/arch/x86/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
15011 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
15012 * Align the stack pointer according to the i386 ABI,
15013 * i.e. so that on function entry ((sp + 4) & 15) == 0.
15014 */
15015 - sp = ((sp + 4) & -16ul) - 4;
15016 + sp = ((sp - 12) & -16ul) - 4;
15017 #else /* !CONFIG_X86_32 */
15018 sp = round_down(sp, 16) - 8;
15019 #endif
15020 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
15021 * Return an always-bogus address instead so we will die with SIGSEGV.
15022 */
15023 if (onsigstack && !likely(on_sig_stack(sp)))
15024 - return (void __user *)-1L;
15025 + return (__force void __user *)-1L;
15026
15027 /* save i387 state */
15028 if (used_math() && save_i387_xstate(*fpstate) < 0)
15029 - return (void __user *)-1L;
15030 + return (__force void __user *)-1L;
15031
15032 return (void __user *)sp;
15033 }
15034 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
15035 }
15036
15037 if (current->mm->context.vdso)
15038 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
15039 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
15040 else
15041 - restorer = &frame->retcode;
15042 + restorer = (void __user *)&frame->retcode;
15043 if (ka->sa.sa_flags & SA_RESTORER)
15044 restorer = ka->sa.sa_restorer;
15045
15046 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
15047 * reasons and because gdb uses it as a signature to notice
15048 * signal handler stack frames.
15049 */
15050 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
15051 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
15052
15053 if (err)
15054 return -EFAULT;
15055 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
15056 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
15057
15058 /* Set up to return from userspace. */
15059 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
15060 + if (current->mm->context.vdso)
15061 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
15062 + else
15063 + restorer = (void __user *)&frame->retcode;
15064 if (ka->sa.sa_flags & SA_RESTORER)
15065 restorer = ka->sa.sa_restorer;
15066 put_user_ex(restorer, &frame->pretcode);
15067 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
15068 * reasons and because gdb uses it as a signature to notice
15069 * signal handler stack frames.
15070 */
15071 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
15072 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
15073 } put_user_catch(err);
15074
15075 if (err)
15076 @@ -769,6 +772,8 @@ static void do_signal(struct pt_regs *re
15077 int signr;
15078 sigset_t *oldset;
15079
15080 + pax_track_stack();
15081 +
15082 /*
15083 * We want the common case to go fast, which is why we may in certain
15084 * cases get here from kernel mode. Just return without doing anything
15085 @@ -776,7 +781,7 @@ static void do_signal(struct pt_regs *re
15086 * X86_32: vm86 regs switched out by assembly code before reaching
15087 * here, so testing against kernel CS suffices.
15088 */
15089 - if (!user_mode(regs))
15090 + if (!user_mode_novm(regs))
15091 return;
15092
15093 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
15094 diff -urNp linux-3.0.7/arch/x86/kernel/smpboot.c linux-3.0.7/arch/x86/kernel/smpboot.c
15095 --- linux-3.0.7/arch/x86/kernel/smpboot.c 2011-07-21 22:17:23.000000000 -0400
15096 +++ linux-3.0.7/arch/x86/kernel/smpboot.c 2011-08-23 21:47:55.000000000 -0400
15097 @@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
15098 set_idle_for_cpu(cpu, c_idle.idle);
15099 do_rest:
15100 per_cpu(current_task, cpu) = c_idle.idle;
15101 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
15102 #ifdef CONFIG_X86_32
15103 /* Stack for startup_32 can be just as for start_secondary onwards */
15104 irq_ctx_init(cpu);
15105 #else
15106 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
15107 initial_gs = per_cpu_offset(cpu);
15108 - per_cpu(kernel_stack, cpu) =
15109 - (unsigned long)task_stack_page(c_idle.idle) -
15110 - KERNEL_STACK_OFFSET + THREAD_SIZE;
15111 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
15112 #endif
15113 +
15114 + pax_open_kernel();
15115 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
15116 + pax_close_kernel();
15117 +
15118 initial_code = (unsigned long)start_secondary;
15119 stack_start = c_idle.idle->thread.sp;
15120
15121 @@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
15122
15123 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
15124
15125 +#ifdef CONFIG_PAX_PER_CPU_PGD
15126 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
15127 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
15128 + KERNEL_PGD_PTRS);
15129 +#endif
15130 +
15131 err = do_boot_cpu(apicid, cpu);
15132 if (err) {
15133 pr_debug("do_boot_cpu failed %d\n", err);
15134 diff -urNp linux-3.0.7/arch/x86/kernel/step.c linux-3.0.7/arch/x86/kernel/step.c
15135 --- linux-3.0.7/arch/x86/kernel/step.c 2011-07-21 22:17:23.000000000 -0400
15136 +++ linux-3.0.7/arch/x86/kernel/step.c 2011-08-23 21:47:55.000000000 -0400
15137 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
15138 struct desc_struct *desc;
15139 unsigned long base;
15140
15141 - seg &= ~7UL;
15142 + seg >>= 3;
15143
15144 mutex_lock(&child->mm->context.lock);
15145 - if (unlikely((seg >> 3) >= child->mm->context.size))
15146 + if (unlikely(seg >= child->mm->context.size))
15147 addr = -1L; /* bogus selector, access would fault */
15148 else {
15149 desc = child->mm->context.ldt + seg;
15150 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
15151 addr += base;
15152 }
15153 mutex_unlock(&child->mm->context.lock);
15154 - }
15155 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
15156 + addr = ktla_ktva(addr);
15157
15158 return addr;
15159 }
15160 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
15161 unsigned char opcode[15];
15162 unsigned long addr = convert_ip_to_linear(child, regs);
15163
15164 + if (addr == -EINVAL)
15165 + return 0;
15166 +
15167 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
15168 for (i = 0; i < copied; i++) {
15169 switch (opcode[i]) {
15170 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
15171
15172 #ifdef CONFIG_X86_64
15173 case 0x40 ... 0x4f:
15174 - if (regs->cs != __USER_CS)
15175 + if ((regs->cs & 0xffff) != __USER_CS)
15176 /* 32-bit mode: register increment */
15177 return 0;
15178 /* 64-bit mode: REX prefix */
15179 diff -urNp linux-3.0.7/arch/x86/kernel/syscall_table_32.S linux-3.0.7/arch/x86/kernel/syscall_table_32.S
15180 --- linux-3.0.7/arch/x86/kernel/syscall_table_32.S 2011-07-21 22:17:23.000000000 -0400
15181 +++ linux-3.0.7/arch/x86/kernel/syscall_table_32.S 2011-08-23 21:47:55.000000000 -0400
15182 @@ -1,3 +1,4 @@
15183 +.section .rodata,"a",@progbits
15184 ENTRY(sys_call_table)
15185 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
15186 .long sys_exit
15187 diff -urNp linux-3.0.7/arch/x86/kernel/sys_i386_32.c linux-3.0.7/arch/x86/kernel/sys_i386_32.c
15188 --- linux-3.0.7/arch/x86/kernel/sys_i386_32.c 2011-07-21 22:17:23.000000000 -0400
15189 +++ linux-3.0.7/arch/x86/kernel/sys_i386_32.c 2011-08-23 21:47:55.000000000 -0400
15190 @@ -24,17 +24,224 @@
15191
15192 #include <asm/syscalls.h>
15193
15194 -/*
15195 - * Do a system call from kernel instead of calling sys_execve so we
15196 - * end up with proper pt_regs.
15197 - */
15198 -int kernel_execve(const char *filename,
15199 - const char *const argv[],
15200 - const char *const envp[])
15201 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
15202 {
15203 - long __res;
15204 - asm volatile ("int $0x80"
15205 - : "=a" (__res)
15206 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
15207 - return __res;
15208 + unsigned long pax_task_size = TASK_SIZE;
15209 +
15210 +#ifdef CONFIG_PAX_SEGMEXEC
15211 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
15212 + pax_task_size = SEGMEXEC_TASK_SIZE;
15213 +#endif
15214 +
15215 + if (len > pax_task_size || addr > pax_task_size - len)
15216 + return -EINVAL;
15217 +
15218 + return 0;
15219 +}
15220 +
15221 +unsigned long
15222 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
15223 + unsigned long len, unsigned long pgoff, unsigned long flags)
15224 +{
15225 + struct mm_struct *mm = current->mm;
15226 + struct vm_area_struct *vma;
15227 + unsigned long start_addr, pax_task_size = TASK_SIZE;
15228 +
15229 +#ifdef CONFIG_PAX_SEGMEXEC
15230 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
15231 + pax_task_size = SEGMEXEC_TASK_SIZE;
15232 +#endif
15233 +
15234 + pax_task_size -= PAGE_SIZE;
15235 +
15236 + if (len > pax_task_size)
15237 + return -ENOMEM;
15238 +
15239 + if (flags & MAP_FIXED)
15240 + return addr;
15241 +
15242 +#ifdef CONFIG_PAX_RANDMMAP
15243 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15244 +#endif
15245 +
15246 + if (addr) {
15247 + addr = PAGE_ALIGN(addr);
15248 + if (pax_task_size - len >= addr) {
15249 + vma = find_vma(mm, addr);
15250 + if (check_heap_stack_gap(vma, addr, len))
15251 + return addr;
15252 + }
15253 + }
15254 + if (len > mm->cached_hole_size) {
15255 + start_addr = addr = mm->free_area_cache;
15256 + } else {
15257 + start_addr = addr = mm->mmap_base;
15258 + mm->cached_hole_size = 0;
15259 + }
15260 +
15261 +#ifdef CONFIG_PAX_PAGEEXEC
15262 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
15263 + start_addr = 0x00110000UL;
15264 +
15265 +#ifdef CONFIG_PAX_RANDMMAP
15266 + if (mm->pax_flags & MF_PAX_RANDMMAP)
15267 + start_addr += mm->delta_mmap & 0x03FFF000UL;
15268 +#endif
15269 +
15270 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
15271 + start_addr = addr = mm->mmap_base;
15272 + else
15273 + addr = start_addr;
15274 + }
15275 +#endif
15276 +
15277 +full_search:
15278 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
15279 + /* At this point: (!vma || addr < vma->vm_end). */
15280 + if (pax_task_size - len < addr) {
15281 + /*
15282 + * Start a new search - just in case we missed
15283 + * some holes.
15284 + */
15285 + if (start_addr != mm->mmap_base) {
15286 + start_addr = addr = mm->mmap_base;
15287 + mm->cached_hole_size = 0;
15288 + goto full_search;
15289 + }
15290 + return -ENOMEM;
15291 + }
15292 + if (check_heap_stack_gap(vma, addr, len))
15293 + break;
15294 + if (addr + mm->cached_hole_size < vma->vm_start)
15295 + mm->cached_hole_size = vma->vm_start - addr;
15296 + addr = vma->vm_end;
15297 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
15298 + start_addr = addr = mm->mmap_base;
15299 + mm->cached_hole_size = 0;
15300 + goto full_search;
15301 + }
15302 + }
15303 +
15304 + /*
15305 + * Remember the place where we stopped the search:
15306 + */
15307 + mm->free_area_cache = addr + len;
15308 + return addr;
15309 +}
15310 +
15311 +unsigned long
15312 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
15313 + const unsigned long len, const unsigned long pgoff,
15314 + const unsigned long flags)
15315 +{
15316 + struct vm_area_struct *vma;
15317 + struct mm_struct *mm = current->mm;
15318 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
15319 +
15320 +#ifdef CONFIG_PAX_SEGMEXEC
15321 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
15322 + pax_task_size = SEGMEXEC_TASK_SIZE;
15323 +#endif
15324 +
15325 + pax_task_size -= PAGE_SIZE;
15326 +
15327 + /* requested length too big for entire address space */
15328 + if (len > pax_task_size)
15329 + return -ENOMEM;
15330 +
15331 + if (flags & MAP_FIXED)
15332 + return addr;
15333 +
15334 +#ifdef CONFIG_PAX_PAGEEXEC
15335 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
15336 + goto bottomup;
15337 +#endif
15338 +
15339 +#ifdef CONFIG_PAX_RANDMMAP
15340 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15341 +#endif
15342 +
15343 + /* requesting a specific address */
15344 + if (addr) {
15345 + addr = PAGE_ALIGN(addr);
15346 + if (pax_task_size - len >= addr) {
15347 + vma = find_vma(mm, addr);
15348 + if (check_heap_stack_gap(vma, addr, len))
15349 + return addr;
15350 + }
15351 + }
15352 +
15353 + /* check if free_area_cache is useful for us */
15354 + if (len <= mm->cached_hole_size) {
15355 + mm->cached_hole_size = 0;
15356 + mm->free_area_cache = mm->mmap_base;
15357 + }
15358 +
15359 + /* either no address requested or can't fit in requested address hole */
15360 + addr = mm->free_area_cache;
15361 +
15362 + /* make sure it can fit in the remaining address space */
15363 + if (addr > len) {
15364 + vma = find_vma(mm, addr-len);
15365 + if (check_heap_stack_gap(vma, addr - len, len))
15366 + /* remember the address as a hint for next time */
15367 + return (mm->free_area_cache = addr-len);
15368 + }
15369 +
15370 + if (mm->mmap_base < len)
15371 + goto bottomup;
15372 +
15373 + addr = mm->mmap_base-len;
15374 +
15375 + do {
15376 + /*
15377 + * Lookup failure means no vma is above this address,
15378 + * else if new region fits below vma->vm_start,
15379 + * return with success:
15380 + */
15381 + vma = find_vma(mm, addr);
15382 + if (check_heap_stack_gap(vma, addr, len))
15383 + /* remember the address as a hint for next time */
15384 + return (mm->free_area_cache = addr);
15385 +
15386 + /* remember the largest hole we saw so far */
15387 + if (addr + mm->cached_hole_size < vma->vm_start)
15388 + mm->cached_hole_size = vma->vm_start - addr;
15389 +
15390 + /* try just below the current vma->vm_start */
15391 + addr = skip_heap_stack_gap(vma, len);
15392 + } while (!IS_ERR_VALUE(addr));
15393 +
15394 +bottomup:
15395 + /*
15396 + * A failed mmap() very likely causes application failure,
15397 + * so fall back to the bottom-up function here. This scenario
15398 + * can happen with large stack limits and large mmap()
15399 + * allocations.
15400 + */
15401 +
15402 +#ifdef CONFIG_PAX_SEGMEXEC
15403 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
15404 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
15405 + else
15406 +#endif
15407 +
15408 + mm->mmap_base = TASK_UNMAPPED_BASE;
15409 +
15410 +#ifdef CONFIG_PAX_RANDMMAP
15411 + if (mm->pax_flags & MF_PAX_RANDMMAP)
15412 + mm->mmap_base += mm->delta_mmap;
15413 +#endif
15414 +
15415 + mm->free_area_cache = mm->mmap_base;
15416 + mm->cached_hole_size = ~0UL;
15417 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15418 + /*
15419 + * Restore the topdown base:
15420 + */
15421 + mm->mmap_base = base;
15422 + mm->free_area_cache = base;
15423 + mm->cached_hole_size = ~0UL;
15424 +
15425 + return addr;
15426 }
15427 diff -urNp linux-3.0.7/arch/x86/kernel/sys_x86_64.c linux-3.0.7/arch/x86/kernel/sys_x86_64.c
15428 --- linux-3.0.7/arch/x86/kernel/sys_x86_64.c 2011-07-21 22:17:23.000000000 -0400
15429 +++ linux-3.0.7/arch/x86/kernel/sys_x86_64.c 2011-08-23 21:47:55.000000000 -0400
15430 @@ -32,8 +32,8 @@ out:
15431 return error;
15432 }
15433
15434 -static void find_start_end(unsigned long flags, unsigned long *begin,
15435 - unsigned long *end)
15436 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
15437 + unsigned long *begin, unsigned long *end)
15438 {
15439 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
15440 unsigned long new_begin;
15441 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long
15442 *begin = new_begin;
15443 }
15444 } else {
15445 - *begin = TASK_UNMAPPED_BASE;
15446 + *begin = mm->mmap_base;
15447 *end = TASK_SIZE;
15448 }
15449 }
15450 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
15451 if (flags & MAP_FIXED)
15452 return addr;
15453
15454 - find_start_end(flags, &begin, &end);
15455 + find_start_end(mm, flags, &begin, &end);
15456
15457 if (len > end)
15458 return -ENOMEM;
15459
15460 +#ifdef CONFIG_PAX_RANDMMAP
15461 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15462 +#endif
15463 +
15464 if (addr) {
15465 addr = PAGE_ALIGN(addr);
15466 vma = find_vma(mm, addr);
15467 - if (end - len >= addr &&
15468 - (!vma || addr + len <= vma->vm_start))
15469 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
15470 return addr;
15471 }
15472 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
15473 @@ -106,7 +109,7 @@ full_search:
15474 }
15475 return -ENOMEM;
15476 }
15477 - if (!vma || addr + len <= vma->vm_start) {
15478 + if (check_heap_stack_gap(vma, addr, len)) {
15479 /*
15480 * Remember the place where we stopped the search:
15481 */
15482 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
15483 {
15484 struct vm_area_struct *vma;
15485 struct mm_struct *mm = current->mm;
15486 - unsigned long addr = addr0;
15487 + unsigned long base = mm->mmap_base, addr = addr0;
15488
15489 /* requested length too big for entire address space */
15490 if (len > TASK_SIZE)
15491 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
15492 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
15493 goto bottomup;
15494
15495 +#ifdef CONFIG_PAX_RANDMMAP
15496 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15497 +#endif
15498 +
15499 /* requesting a specific address */
15500 if (addr) {
15501 addr = PAGE_ALIGN(addr);
15502 - vma = find_vma(mm, addr);
15503 - if (TASK_SIZE - len >= addr &&
15504 - (!vma || addr + len <= vma->vm_start))
15505 - return addr;
15506 + if (TASK_SIZE - len >= addr) {
15507 + vma = find_vma(mm, addr);
15508 + if (check_heap_stack_gap(vma, addr, len))
15509 + return addr;
15510 + }
15511 }
15512
15513 /* check if free_area_cache is useful for us */
15514 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
15515 /* make sure it can fit in the remaining address space */
15516 if (addr > len) {
15517 vma = find_vma(mm, addr-len);
15518 - if (!vma || addr <= vma->vm_start)
15519 + if (check_heap_stack_gap(vma, addr - len, len))
15520 /* remember the address as a hint for next time */
15521 return mm->free_area_cache = addr-len;
15522 }
15523 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
15524 * return with success:
15525 */
15526 vma = find_vma(mm, addr);
15527 - if (!vma || addr+len <= vma->vm_start)
15528 + if (check_heap_stack_gap(vma, addr, len))
15529 /* remember the address as a hint for next time */
15530 return mm->free_area_cache = addr;
15531
15532 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
15533 mm->cached_hole_size = vma->vm_start - addr;
15534
15535 /* try just below the current vma->vm_start */
15536 - addr = vma->vm_start-len;
15537 - } while (len < vma->vm_start);
15538 + addr = skip_heap_stack_gap(vma, len);
15539 + } while (!IS_ERR_VALUE(addr));
15540
15541 bottomup:
15542 /*
15543 @@ -198,13 +206,21 @@ bottomup:
15544 * can happen with large stack limits and large mmap()
15545 * allocations.
15546 */
15547 + mm->mmap_base = TASK_UNMAPPED_BASE;
15548 +
15549 +#ifdef CONFIG_PAX_RANDMMAP
15550 + if (mm->pax_flags & MF_PAX_RANDMMAP)
15551 + mm->mmap_base += mm->delta_mmap;
15552 +#endif
15553 +
15554 + mm->free_area_cache = mm->mmap_base;
15555 mm->cached_hole_size = ~0UL;
15556 - mm->free_area_cache = TASK_UNMAPPED_BASE;
15557 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15558 /*
15559 * Restore the topdown base:
15560 */
15561 - mm->free_area_cache = mm->mmap_base;
15562 + mm->mmap_base = base;
15563 + mm->free_area_cache = base;
15564 mm->cached_hole_size = ~0UL;
15565
15566 return addr;
15567 diff -urNp linux-3.0.7/arch/x86/kernel/tboot.c linux-3.0.7/arch/x86/kernel/tboot.c
15568 --- linux-3.0.7/arch/x86/kernel/tboot.c 2011-07-21 22:17:23.000000000 -0400
15569 +++ linux-3.0.7/arch/x86/kernel/tboot.c 2011-08-23 21:47:55.000000000 -0400
15570 @@ -217,7 +217,7 @@ static int tboot_setup_sleep(void)
15571
15572 void tboot_shutdown(u32 shutdown_type)
15573 {
15574 - void (*shutdown)(void);
15575 + void (* __noreturn shutdown)(void);
15576
15577 if (!tboot_enabled())
15578 return;
15579 @@ -239,7 +239,7 @@ void tboot_shutdown(u32 shutdown_type)
15580
15581 switch_to_tboot_pt();
15582
15583 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
15584 + shutdown = (void *)tboot->shutdown_entry;
15585 shutdown();
15586
15587 /* should not reach here */
15588 @@ -296,7 +296,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
15589 tboot_shutdown(acpi_shutdown_map[sleep_state]);
15590 }
15591
15592 -static atomic_t ap_wfs_count;
15593 +static atomic_unchecked_t ap_wfs_count;
15594
15595 static int tboot_wait_for_aps(int num_aps)
15596 {
15597 @@ -320,9 +320,9 @@ static int __cpuinit tboot_cpu_callback(
15598 {
15599 switch (action) {
15600 case CPU_DYING:
15601 - atomic_inc(&ap_wfs_count);
15602 + atomic_inc_unchecked(&ap_wfs_count);
15603 if (num_online_cpus() == 1)
15604 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
15605 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
15606 return NOTIFY_BAD;
15607 break;
15608 }
15609 @@ -341,7 +341,7 @@ static __init int tboot_late_init(void)
15610
15611 tboot_create_trampoline();
15612
15613 - atomic_set(&ap_wfs_count, 0);
15614 + atomic_set_unchecked(&ap_wfs_count, 0);
15615 register_hotcpu_notifier(&tboot_cpu_notifier);
15616 return 0;
15617 }
15618 diff -urNp linux-3.0.7/arch/x86/kernel/time.c linux-3.0.7/arch/x86/kernel/time.c
15619 --- linux-3.0.7/arch/x86/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
15620 +++ linux-3.0.7/arch/x86/kernel/time.c 2011-08-23 21:47:55.000000000 -0400
15621 @@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs
15622 {
15623 unsigned long pc = instruction_pointer(regs);
15624
15625 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
15626 + if (!user_mode(regs) && in_lock_functions(pc)) {
15627 #ifdef CONFIG_FRAME_POINTER
15628 - return *(unsigned long *)(regs->bp + sizeof(long));
15629 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
15630 #else
15631 unsigned long *sp =
15632 (unsigned long *)kernel_stack_pointer(regs);
15633 @@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
15634 * or above a saved flags. Eflags has bits 22-31 zero,
15635 * kernel addresses don't.
15636 */
15637 +
15638 +#ifdef CONFIG_PAX_KERNEXEC
15639 + return ktla_ktva(sp[0]);
15640 +#else
15641 if (sp[0] >> 22)
15642 return sp[0];
15643 if (sp[1] >> 22)
15644 return sp[1];
15645 #endif
15646 +
15647 +#endif
15648 }
15649 return pc;
15650 }
15651 diff -urNp linux-3.0.7/arch/x86/kernel/tls.c linux-3.0.7/arch/x86/kernel/tls.c
15652 --- linux-3.0.7/arch/x86/kernel/tls.c 2011-07-21 22:17:23.000000000 -0400
15653 +++ linux-3.0.7/arch/x86/kernel/tls.c 2011-08-23 21:47:55.000000000 -0400
15654 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
15655 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
15656 return -EINVAL;
15657
15658 +#ifdef CONFIG_PAX_SEGMEXEC
15659 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
15660 + return -EINVAL;
15661 +#endif
15662 +
15663 set_tls_desc(p, idx, &info, 1);
15664
15665 return 0;
15666 diff -urNp linux-3.0.7/arch/x86/kernel/trampoline_32.S linux-3.0.7/arch/x86/kernel/trampoline_32.S
15667 --- linux-3.0.7/arch/x86/kernel/trampoline_32.S 2011-07-21 22:17:23.000000000 -0400
15668 +++ linux-3.0.7/arch/x86/kernel/trampoline_32.S 2011-08-23 21:47:55.000000000 -0400
15669 @@ -32,6 +32,12 @@
15670 #include <asm/segment.h>
15671 #include <asm/page_types.h>
15672
15673 +#ifdef CONFIG_PAX_KERNEXEC
15674 +#define ta(X) (X)
15675 +#else
15676 +#define ta(X) ((X) - __PAGE_OFFSET)
15677 +#endif
15678 +
15679 #ifdef CONFIG_SMP
15680
15681 .section ".x86_trampoline","a"
15682 @@ -62,7 +68,7 @@ r_base = .
15683 inc %ax # protected mode (PE) bit
15684 lmsw %ax # into protected mode
15685 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
15686 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
15687 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
15688
15689 # These need to be in the same 64K segment as the above;
15690 # hence we don't use the boot_gdt_descr defined in head.S
15691 diff -urNp linux-3.0.7/arch/x86/kernel/trampoline_64.S linux-3.0.7/arch/x86/kernel/trampoline_64.S
15692 --- linux-3.0.7/arch/x86/kernel/trampoline_64.S 2011-07-21 22:17:23.000000000 -0400
15693 +++ linux-3.0.7/arch/x86/kernel/trampoline_64.S 2011-08-23 21:47:55.000000000 -0400
15694 @@ -90,7 +90,7 @@ startup_32:
15695 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
15696 movl %eax, %ds
15697
15698 - movl $X86_CR4_PAE, %eax
15699 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15700 movl %eax, %cr4 # Enable PAE mode
15701
15702 # Setup trampoline 4 level pagetables
15703 @@ -138,7 +138,7 @@ tidt:
15704 # so the kernel can live anywhere
15705 .balign 4
15706 tgdt:
15707 - .short tgdt_end - tgdt # gdt limit
15708 + .short tgdt_end - tgdt - 1 # gdt limit
15709 .long tgdt - r_base
15710 .short 0
15711 .quad 0x00cf9b000000ffff # __KERNEL32_CS
15712 diff -urNp linux-3.0.7/arch/x86/kernel/traps.c linux-3.0.7/arch/x86/kernel/traps.c
15713 --- linux-3.0.7/arch/x86/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
15714 +++ linux-3.0.7/arch/x86/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
15715 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
15716
15717 /* Do we ignore FPU interrupts ? */
15718 char ignore_fpu_irq;
15719 -
15720 -/*
15721 - * The IDT has to be page-aligned to simplify the Pentium
15722 - * F0 0F bug workaround.
15723 - */
15724 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
15725 #endif
15726
15727 DECLARE_BITMAP(used_vectors, NR_VECTORS);
15728 @@ -117,13 +111,13 @@ static inline void preempt_conditional_c
15729 }
15730
15731 static void __kprobes
15732 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
15733 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
15734 long error_code, siginfo_t *info)
15735 {
15736 struct task_struct *tsk = current;
15737
15738 #ifdef CONFIG_X86_32
15739 - if (regs->flags & X86_VM_MASK) {
15740 + if (v8086_mode(regs)) {
15741 /*
15742 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
15743 * On nmi (interrupt 2), do_trap should not be called.
15744 @@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
15745 }
15746 #endif
15747
15748 - if (!user_mode(regs))
15749 + if (!user_mode_novm(regs))
15750 goto kernel_trap;
15751
15752 #ifdef CONFIG_X86_32
15753 @@ -157,7 +151,7 @@ trap_signal:
15754 printk_ratelimit()) {
15755 printk(KERN_INFO
15756 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
15757 - tsk->comm, tsk->pid, str,
15758 + tsk->comm, task_pid_nr(tsk), str,
15759 regs->ip, regs->sp, error_code);
15760 print_vma_addr(" in ", regs->ip);
15761 printk("\n");
15762 @@ -174,8 +168,20 @@ kernel_trap:
15763 if (!fixup_exception(regs)) {
15764 tsk->thread.error_code = error_code;
15765 tsk->thread.trap_no = trapnr;
15766 +
15767 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15768 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
15769 + str = "PAX: suspicious stack segment fault";
15770 +#endif
15771 +
15772 die(str, regs, error_code);
15773 }
15774 +
15775 +#ifdef CONFIG_PAX_REFCOUNT
15776 + if (trapnr == 4)
15777 + pax_report_refcount_overflow(regs);
15778 +#endif
15779 +
15780 return;
15781
15782 #ifdef CONFIG_X86_32
15783 @@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
15784 conditional_sti(regs);
15785
15786 #ifdef CONFIG_X86_32
15787 - if (regs->flags & X86_VM_MASK)
15788 + if (v8086_mode(regs))
15789 goto gp_in_vm86;
15790 #endif
15791
15792 tsk = current;
15793 - if (!user_mode(regs))
15794 + if (!user_mode_novm(regs))
15795 goto gp_in_kernel;
15796
15797 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
15798 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
15799 + struct mm_struct *mm = tsk->mm;
15800 + unsigned long limit;
15801 +
15802 + down_write(&mm->mmap_sem);
15803 + limit = mm->context.user_cs_limit;
15804 + if (limit < TASK_SIZE) {
15805 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
15806 + up_write(&mm->mmap_sem);
15807 + return;
15808 + }
15809 + up_write(&mm->mmap_sem);
15810 + }
15811 +#endif
15812 +
15813 tsk->thread.error_code = error_code;
15814 tsk->thread.trap_no = 13;
15815
15816 @@ -304,6 +326,13 @@ gp_in_kernel:
15817 if (notify_die(DIE_GPF, "general protection fault", regs,
15818 error_code, 13, SIGSEGV) == NOTIFY_STOP)
15819 return;
15820 +
15821 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15822 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
15823 + die("PAX: suspicious general protection fault", regs, error_code);
15824 + else
15825 +#endif
15826 +
15827 die("general protection fault", regs, error_code);
15828 }
15829
15830 @@ -433,6 +462,17 @@ static notrace __kprobes void default_do
15831 dotraplinkage notrace __kprobes void
15832 do_nmi(struct pt_regs *regs, long error_code)
15833 {
15834 +
15835 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15836 + if (!user_mode(regs)) {
15837 + unsigned long cs = regs->cs & 0xFFFF;
15838 + unsigned long ip = ktva_ktla(regs->ip);
15839 +
15840 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
15841 + regs->ip = ip;
15842 + }
15843 +#endif
15844 +
15845 nmi_enter();
15846
15847 inc_irq_stat(__nmi_count);
15848 @@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
15849 /* It's safe to allow irq's after DR6 has been saved */
15850 preempt_conditional_sti(regs);
15851
15852 - if (regs->flags & X86_VM_MASK) {
15853 + if (v8086_mode(regs)) {
15854 handle_vm86_trap((struct kernel_vm86_regs *) regs,
15855 error_code, 1);
15856 preempt_conditional_cli(regs);
15857 @@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
15858 * We already checked v86 mode above, so we can check for kernel mode
15859 * by just checking the CPL of CS.
15860 */
15861 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
15862 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
15863 tsk->thread.debugreg6 &= ~DR_STEP;
15864 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
15865 regs->flags &= ~X86_EFLAGS_TF;
15866 @@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
15867 return;
15868 conditional_sti(regs);
15869
15870 - if (!user_mode_vm(regs))
15871 + if (!user_mode(regs))
15872 {
15873 if (!fixup_exception(regs)) {
15874 task->thread.error_code = error_code;
15875 @@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
15876 void __math_state_restore(void)
15877 {
15878 struct thread_info *thread = current_thread_info();
15879 - struct task_struct *tsk = thread->task;
15880 + struct task_struct *tsk = current;
15881
15882 /*
15883 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
15884 @@ -750,8 +790,7 @@ void __math_state_restore(void)
15885 */
15886 asmlinkage void math_state_restore(void)
15887 {
15888 - struct thread_info *thread = current_thread_info();
15889 - struct task_struct *tsk = thread->task;
15890 + struct task_struct *tsk = current;
15891
15892 if (!tsk_used_math(tsk)) {
15893 local_irq_enable();
15894 diff -urNp linux-3.0.7/arch/x86/kernel/verify_cpu.S linux-3.0.7/arch/x86/kernel/verify_cpu.S
15895 --- linux-3.0.7/arch/x86/kernel/verify_cpu.S 2011-07-21 22:17:23.000000000 -0400
15896 +++ linux-3.0.7/arch/x86/kernel/verify_cpu.S 2011-08-23 21:48:14.000000000 -0400
15897 @@ -20,6 +20,7 @@
15898 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
15899 * arch/x86/kernel/trampoline_64.S: secondary processor verification
15900 * arch/x86/kernel/head_32.S: processor startup
15901 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
15902 *
15903 * verify_cpu, returns the status of longmode and SSE in register %eax.
15904 * 0: Success 1: Failure
15905 diff -urNp linux-3.0.7/arch/x86/kernel/vm86_32.c linux-3.0.7/arch/x86/kernel/vm86_32.c
15906 --- linux-3.0.7/arch/x86/kernel/vm86_32.c 2011-07-21 22:17:23.000000000 -0400
15907 +++ linux-3.0.7/arch/x86/kernel/vm86_32.c 2011-08-23 21:48:14.000000000 -0400
15908 @@ -41,6 +41,7 @@
15909 #include <linux/ptrace.h>
15910 #include <linux/audit.h>
15911 #include <linux/stddef.h>
15912 +#include <linux/grsecurity.h>
15913
15914 #include <asm/uaccess.h>
15915 #include <asm/io.h>
15916 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
15917 do_exit(SIGSEGV);
15918 }
15919
15920 - tss = &per_cpu(init_tss, get_cpu());
15921 + tss = init_tss + get_cpu();
15922 current->thread.sp0 = current->thread.saved_sp0;
15923 current->thread.sysenter_cs = __KERNEL_CS;
15924 load_sp0(tss, &current->thread);
15925 @@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
15926 struct task_struct *tsk;
15927 int tmp, ret = -EPERM;
15928
15929 +#ifdef CONFIG_GRKERNSEC_VM86
15930 + if (!capable(CAP_SYS_RAWIO)) {
15931 + gr_handle_vm86();
15932 + goto out;
15933 + }
15934 +#endif
15935 +
15936 tsk = current;
15937 if (tsk->thread.saved_sp0)
15938 goto out;
15939 @@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
15940 int tmp, ret;
15941 struct vm86plus_struct __user *v86;
15942
15943 +#ifdef CONFIG_GRKERNSEC_VM86
15944 + if (!capable(CAP_SYS_RAWIO)) {
15945 + gr_handle_vm86();
15946 + ret = -EPERM;
15947 + goto out;
15948 + }
15949 +#endif
15950 +
15951 tsk = current;
15952 switch (cmd) {
15953 case VM86_REQUEST_IRQ:
15954 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
15955 tsk->thread.saved_fs = info->regs32->fs;
15956 tsk->thread.saved_gs = get_user_gs(info->regs32);
15957
15958 - tss = &per_cpu(init_tss, get_cpu());
15959 + tss = init_tss + get_cpu();
15960 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
15961 if (cpu_has_sep)
15962 tsk->thread.sysenter_cs = 0;
15963 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
15964 goto cannot_handle;
15965 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
15966 goto cannot_handle;
15967 - intr_ptr = (unsigned long __user *) (i << 2);
15968 + intr_ptr = (__force unsigned long __user *) (i << 2);
15969 if (get_user(segoffs, intr_ptr))
15970 goto cannot_handle;
15971 if ((segoffs >> 16) == BIOSSEG)
15972 diff -urNp linux-3.0.7/arch/x86/kernel/vmlinux.lds.S linux-3.0.7/arch/x86/kernel/vmlinux.lds.S
15973 --- linux-3.0.7/arch/x86/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
15974 +++ linux-3.0.7/arch/x86/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
15975 @@ -26,6 +26,13 @@
15976 #include <asm/page_types.h>
15977 #include <asm/cache.h>
15978 #include <asm/boot.h>
15979 +#include <asm/segment.h>
15980 +
15981 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15982 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
15983 +#else
15984 +#define __KERNEL_TEXT_OFFSET 0
15985 +#endif
15986
15987 #undef i386 /* in case the preprocessor is a 32bit one */
15988
15989 @@ -69,31 +76,46 @@ jiffies_64 = jiffies;
15990
15991 PHDRS {
15992 text PT_LOAD FLAGS(5); /* R_E */
15993 +#ifdef CONFIG_X86_32
15994 + module PT_LOAD FLAGS(5); /* R_E */
15995 +#endif
15996 +#ifdef CONFIG_XEN
15997 + rodata PT_LOAD FLAGS(5); /* R_E */
15998 +#else
15999 + rodata PT_LOAD FLAGS(4); /* R__ */
16000 +#endif
16001 data PT_LOAD FLAGS(6); /* RW_ */
16002 #ifdef CONFIG_X86_64
16003 user PT_LOAD FLAGS(5); /* R_E */
16004 +#endif
16005 + init.begin PT_LOAD FLAGS(6); /* RW_ */
16006 #ifdef CONFIG_SMP
16007 percpu PT_LOAD FLAGS(6); /* RW_ */
16008 #endif
16009 + text.init PT_LOAD FLAGS(5); /* R_E */
16010 + text.exit PT_LOAD FLAGS(5); /* R_E */
16011 init PT_LOAD FLAGS(7); /* RWE */
16012 -#endif
16013 note PT_NOTE FLAGS(0); /* ___ */
16014 }
16015
16016 SECTIONS
16017 {
16018 #ifdef CONFIG_X86_32
16019 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
16020 - phys_startup_32 = startup_32 - LOAD_OFFSET;
16021 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
16022 #else
16023 - . = __START_KERNEL;
16024 - phys_startup_64 = startup_64 - LOAD_OFFSET;
16025 + . = __START_KERNEL;
16026 #endif
16027
16028 /* Text and read-only data */
16029 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
16030 - _text = .;
16031 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
16032 /* bootstrapping code */
16033 +#ifdef CONFIG_X86_32
16034 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
16035 +#else
16036 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
16037 +#endif
16038 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
16039 + _text = .;
16040 HEAD_TEXT
16041 #ifdef CONFIG_X86_32
16042 . = ALIGN(PAGE_SIZE);
16043 @@ -109,13 +131,47 @@ SECTIONS
16044 IRQENTRY_TEXT
16045 *(.fixup)
16046 *(.gnu.warning)
16047 - /* End of text section */
16048 - _etext = .;
16049 } :text = 0x9090
16050
16051 - NOTES :text :note
16052 + . += __KERNEL_TEXT_OFFSET;
16053 +
16054 +#ifdef CONFIG_X86_32
16055 + . = ALIGN(PAGE_SIZE);
16056 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
16057 +
16058 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
16059 + MODULES_EXEC_VADDR = .;
16060 + BYTE(0)
16061 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
16062 + . = ALIGN(HPAGE_SIZE);
16063 + MODULES_EXEC_END = . - 1;
16064 +#endif
16065 +
16066 + } :module
16067 +#endif
16068 +
16069 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
16070 + /* End of text section */
16071 + _etext = . - __KERNEL_TEXT_OFFSET;
16072 + }
16073 +
16074 +#ifdef CONFIG_X86_32
16075 + . = ALIGN(PAGE_SIZE);
16076 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
16077 + *(.idt)
16078 + . = ALIGN(PAGE_SIZE);
16079 + *(.empty_zero_page)
16080 + *(.initial_pg_fixmap)
16081 + *(.initial_pg_pmd)
16082 + *(.initial_page_table)
16083 + *(.swapper_pg_dir)
16084 + } :rodata
16085 +#endif
16086 +
16087 + . = ALIGN(PAGE_SIZE);
16088 + NOTES :rodata :note
16089
16090 - EXCEPTION_TABLE(16) :text = 0x9090
16091 + EXCEPTION_TABLE(16) :rodata
16092
16093 #if defined(CONFIG_DEBUG_RODATA)
16094 /* .text should occupy whole number of pages */
16095 @@ -127,16 +183,20 @@ SECTIONS
16096
16097 /* Data */
16098 .data : AT(ADDR(.data) - LOAD_OFFSET) {
16099 +
16100 +#ifdef CONFIG_PAX_KERNEXEC
16101 + . = ALIGN(HPAGE_SIZE);
16102 +#else
16103 + . = ALIGN(PAGE_SIZE);
16104 +#endif
16105 +
16106 /* Start of data section */
16107 _sdata = .;
16108
16109 /* init_task */
16110 INIT_TASK_DATA(THREAD_SIZE)
16111
16112 -#ifdef CONFIG_X86_32
16113 - /* 32 bit has nosave before _edata */
16114 NOSAVE_DATA
16115 -#endif
16116
16117 PAGE_ALIGNED_DATA(PAGE_SIZE)
16118
16119 @@ -208,12 +268,19 @@ SECTIONS
16120 #endif /* CONFIG_X86_64 */
16121
16122 /* Init code and data - will be freed after init */
16123 - . = ALIGN(PAGE_SIZE);
16124 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
16125 + BYTE(0)
16126 +
16127 +#ifdef CONFIG_PAX_KERNEXEC
16128 + . = ALIGN(HPAGE_SIZE);
16129 +#else
16130 + . = ALIGN(PAGE_SIZE);
16131 +#endif
16132 +
16133 __init_begin = .; /* paired with __init_end */
16134 - }
16135 + } :init.begin
16136
16137 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
16138 +#ifdef CONFIG_SMP
16139 /*
16140 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
16141 * output PHDR, so the next output section - .init.text - should
16142 @@ -222,12 +289,27 @@ SECTIONS
16143 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
16144 #endif
16145
16146 - INIT_TEXT_SECTION(PAGE_SIZE)
16147 -#ifdef CONFIG_X86_64
16148 - :init
16149 -#endif
16150 + . = ALIGN(PAGE_SIZE);
16151 + init_begin = .;
16152 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
16153 + VMLINUX_SYMBOL(_sinittext) = .;
16154 + INIT_TEXT
16155 + VMLINUX_SYMBOL(_einittext) = .;
16156 + . = ALIGN(PAGE_SIZE);
16157 + } :text.init
16158
16159 - INIT_DATA_SECTION(16)
16160 + /*
16161 + * .exit.text is discard at runtime, not link time, to deal with
16162 + * references from .altinstructions and .eh_frame
16163 + */
16164 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
16165 + EXIT_TEXT
16166 + . = ALIGN(16);
16167 + } :text.exit
16168 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
16169 +
16170 + . = ALIGN(PAGE_SIZE);
16171 + INIT_DATA_SECTION(16) :init
16172
16173 /*
16174 * Code and data for a variety of lowlevel trampolines, to be
16175 @@ -301,19 +383,12 @@ SECTIONS
16176 }
16177
16178 . = ALIGN(8);
16179 - /*
16180 - * .exit.text is discard at runtime, not link time, to deal with
16181 - * references from .altinstructions and .eh_frame
16182 - */
16183 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
16184 - EXIT_TEXT
16185 - }
16186
16187 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
16188 EXIT_DATA
16189 }
16190
16191 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
16192 +#ifndef CONFIG_SMP
16193 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
16194 #endif
16195
16196 @@ -332,16 +407,10 @@ SECTIONS
16197 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
16198 __smp_locks = .;
16199 *(.smp_locks)
16200 - . = ALIGN(PAGE_SIZE);
16201 __smp_locks_end = .;
16202 + . = ALIGN(PAGE_SIZE);
16203 }
16204
16205 -#ifdef CONFIG_X86_64
16206 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
16207 - NOSAVE_DATA
16208 - }
16209 -#endif
16210 -
16211 /* BSS */
16212 . = ALIGN(PAGE_SIZE);
16213 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
16214 @@ -357,6 +426,7 @@ SECTIONS
16215 __brk_base = .;
16216 . += 64 * 1024; /* 64k alignment slop space */
16217 *(.brk_reservation) /* areas brk users have reserved */
16218 + . = ALIGN(HPAGE_SIZE);
16219 __brk_limit = .;
16220 }
16221
16222 @@ -383,13 +453,12 @@ SECTIONS
16223 * for the boot processor.
16224 */
16225 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
16226 -INIT_PER_CPU(gdt_page);
16227 INIT_PER_CPU(irq_stack_union);
16228
16229 /*
16230 * Build-time check on the image size:
16231 */
16232 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
16233 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
16234 "kernel image bigger than KERNEL_IMAGE_SIZE");
16235
16236 #ifdef CONFIG_SMP
16237 diff -urNp linux-3.0.7/arch/x86/kernel/vsyscall_64.c linux-3.0.7/arch/x86/kernel/vsyscall_64.c
16238 --- linux-3.0.7/arch/x86/kernel/vsyscall_64.c 2011-07-21 22:17:23.000000000 -0400
16239 +++ linux-3.0.7/arch/x86/kernel/vsyscall_64.c 2011-08-23 21:47:55.000000000 -0400
16240 @@ -53,7 +53,7 @@ DEFINE_VVAR(int, vgetcpu_mode);
16241 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
16242 {
16243 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
16244 - .sysctl_enabled = 1,
16245 + .sysctl_enabled = 0,
16246 };
16247
16248 void update_vsyscall_tz(void)
16249 @@ -231,7 +231,7 @@ static long __vsyscall(3) venosys_1(void
16250 static ctl_table kernel_table2[] = {
16251 { .procname = "vsyscall64",
16252 .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
16253 - .mode = 0644,
16254 + .mode = 0444,
16255 .proc_handler = proc_dointvec },
16256 {}
16257 };
16258 diff -urNp linux-3.0.7/arch/x86/kernel/x8664_ksyms_64.c linux-3.0.7/arch/x86/kernel/x8664_ksyms_64.c
16259 --- linux-3.0.7/arch/x86/kernel/x8664_ksyms_64.c 2011-07-21 22:17:23.000000000 -0400
16260 +++ linux-3.0.7/arch/x86/kernel/x8664_ksyms_64.c 2011-08-23 21:47:55.000000000 -0400
16261 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
16262 EXPORT_SYMBOL(copy_user_generic_string);
16263 EXPORT_SYMBOL(copy_user_generic_unrolled);
16264 EXPORT_SYMBOL(__copy_user_nocache);
16265 -EXPORT_SYMBOL(_copy_from_user);
16266 -EXPORT_SYMBOL(_copy_to_user);
16267
16268 EXPORT_SYMBOL(copy_page);
16269 EXPORT_SYMBOL(clear_page);
16270 diff -urNp linux-3.0.7/arch/x86/kernel/xsave.c linux-3.0.7/arch/x86/kernel/xsave.c
16271 --- linux-3.0.7/arch/x86/kernel/xsave.c 2011-07-21 22:17:23.000000000 -0400
16272 +++ linux-3.0.7/arch/x86/kernel/xsave.c 2011-10-06 04:17:55.000000000 -0400
16273 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
16274 fx_sw_user->xstate_size > fx_sw_user->extended_size)
16275 return -EINVAL;
16276
16277 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
16278 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
16279 fx_sw_user->extended_size -
16280 FP_XSTATE_MAGIC2_SIZE));
16281 if (err)
16282 @@ -267,7 +267,7 @@ fx_only:
16283 * the other extended state.
16284 */
16285 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
16286 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
16287 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
16288 }
16289
16290 /*
16291 @@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
16292 if (use_xsave())
16293 err = restore_user_xstate(buf);
16294 else
16295 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
16296 + err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
16297 buf);
16298 if (unlikely(err)) {
16299 /*
16300 diff -urNp linux-3.0.7/arch/x86/kvm/emulate.c linux-3.0.7/arch/x86/kvm/emulate.c
16301 --- linux-3.0.7/arch/x86/kvm/emulate.c 2011-07-21 22:17:23.000000000 -0400
16302 +++ linux-3.0.7/arch/x86/kvm/emulate.c 2011-08-23 21:47:55.000000000 -0400
16303 @@ -96,7 +96,7 @@
16304 #define Src2ImmByte (2<<29)
16305 #define Src2One (3<<29)
16306 #define Src2Imm (4<<29)
16307 -#define Src2Mask (7<<29)
16308 +#define Src2Mask (7U<<29)
16309
16310 #define X2(x...) x, x
16311 #define X3(x...) X2(x), x
16312 @@ -207,6 +207,7 @@ struct gprefix {
16313
16314 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
16315 do { \
16316 + unsigned long _tmp; \
16317 __asm__ __volatile__ ( \
16318 _PRE_EFLAGS("0", "4", "2") \
16319 _op _suffix " %"_x"3,%1; " \
16320 @@ -220,8 +221,6 @@ struct gprefix {
16321 /* Raw emulation: instruction has two explicit operands. */
16322 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
16323 do { \
16324 - unsigned long _tmp; \
16325 - \
16326 switch ((_dst).bytes) { \
16327 case 2: \
16328 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
16329 @@ -237,7 +236,6 @@ struct gprefix {
16330
16331 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
16332 do { \
16333 - unsigned long _tmp; \
16334 switch ((_dst).bytes) { \
16335 case 1: \
16336 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
16337 diff -urNp linux-3.0.7/arch/x86/kvm/lapic.c linux-3.0.7/arch/x86/kvm/lapic.c
16338 --- linux-3.0.7/arch/x86/kvm/lapic.c 2011-07-21 22:17:23.000000000 -0400
16339 +++ linux-3.0.7/arch/x86/kvm/lapic.c 2011-08-23 21:47:55.000000000 -0400
16340 @@ -53,7 +53,7 @@
16341 #define APIC_BUS_CYCLE_NS 1
16342
16343 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
16344 -#define apic_debug(fmt, arg...)
16345 +#define apic_debug(fmt, arg...) do {} while (0)
16346
16347 #define APIC_LVT_NUM 6
16348 /* 14 is the version for Xeon and Pentium 8.4.8*/
16349 diff -urNp linux-3.0.7/arch/x86/kvm/mmu.c linux-3.0.7/arch/x86/kvm/mmu.c
16350 --- linux-3.0.7/arch/x86/kvm/mmu.c 2011-07-21 22:17:23.000000000 -0400
16351 +++ linux-3.0.7/arch/x86/kvm/mmu.c 2011-08-23 21:47:55.000000000 -0400
16352 @@ -3238,7 +3238,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16353
16354 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
16355
16356 - invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
16357 + invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
16358
16359 /*
16360 * Assume that the pte write on a page table of the same type
16361 @@ -3270,7 +3270,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16362 }
16363
16364 spin_lock(&vcpu->kvm->mmu_lock);
16365 - if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16366 + if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16367 gentry = 0;
16368 kvm_mmu_free_some_pages(vcpu);
16369 ++vcpu->kvm->stat.mmu_pte_write;
16370 diff -urNp linux-3.0.7/arch/x86/kvm/paging_tmpl.h linux-3.0.7/arch/x86/kvm/paging_tmpl.h
16371 --- linux-3.0.7/arch/x86/kvm/paging_tmpl.h 2011-07-21 22:17:23.000000000 -0400
16372 +++ linux-3.0.7/arch/x86/kvm/paging_tmpl.h 2011-10-06 04:17:55.000000000 -0400
16373 @@ -182,7 +182,7 @@ walk:
16374 break;
16375 }
16376
16377 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
16378 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
16379 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) {
16380 present = false;
16381 break;
16382 @@ -583,6 +583,8 @@ static int FNAME(page_fault)(struct kvm_
16383 unsigned long mmu_seq;
16384 bool map_writable;
16385
16386 + pax_track_stack();
16387 +
16388 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
16389
16390 r = mmu_topup_memory_caches(vcpu);
16391 @@ -703,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcp
16392 if (need_flush)
16393 kvm_flush_remote_tlbs(vcpu->kvm);
16394
16395 - atomic_inc(&vcpu->kvm->arch.invlpg_counter);
16396 + atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
16397
16398 spin_unlock(&vcpu->kvm->mmu_lock);
16399
16400 diff -urNp linux-3.0.7/arch/x86/kvm/svm.c linux-3.0.7/arch/x86/kvm/svm.c
16401 --- linux-3.0.7/arch/x86/kvm/svm.c 2011-07-21 22:17:23.000000000 -0400
16402 +++ linux-3.0.7/arch/x86/kvm/svm.c 2011-08-23 21:47:55.000000000 -0400
16403 @@ -3377,7 +3377,11 @@ static void reload_tss(struct kvm_vcpu *
16404 int cpu = raw_smp_processor_id();
16405
16406 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
16407 +
16408 + pax_open_kernel();
16409 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
16410 + pax_close_kernel();
16411 +
16412 load_TR_desc();
16413 }
16414
16415 @@ -3755,6 +3759,10 @@ static void svm_vcpu_run(struct kvm_vcpu
16416 #endif
16417 #endif
16418
16419 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16420 + __set_fs(current_thread_info()->addr_limit);
16421 +#endif
16422 +
16423 reload_tss(vcpu);
16424
16425 local_irq_disable();
16426 diff -urNp linux-3.0.7/arch/x86/kvm/vmx.c linux-3.0.7/arch/x86/kvm/vmx.c
16427 --- linux-3.0.7/arch/x86/kvm/vmx.c 2011-07-21 22:17:23.000000000 -0400
16428 +++ linux-3.0.7/arch/x86/kvm/vmx.c 2011-08-23 21:47:55.000000000 -0400
16429 @@ -797,7 +797,11 @@ static void reload_tss(void)
16430 struct desc_struct *descs;
16431
16432 descs = (void *)gdt->address;
16433 +
16434 + pax_open_kernel();
16435 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
16436 + pax_close_kernel();
16437 +
16438 load_TR_desc();
16439 }
16440
16441 @@ -1747,8 +1751,11 @@ static __init int hardware_setup(void)
16442 if (!cpu_has_vmx_flexpriority())
16443 flexpriority_enabled = 0;
16444
16445 - if (!cpu_has_vmx_tpr_shadow())
16446 - kvm_x86_ops->update_cr8_intercept = NULL;
16447 + if (!cpu_has_vmx_tpr_shadow()) {
16448 + pax_open_kernel();
16449 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
16450 + pax_close_kernel();
16451 + }
16452
16453 if (enable_ept && !cpu_has_vmx_ept_2m_page())
16454 kvm_disable_largepages();
16455 @@ -2814,7 +2821,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
16456 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
16457
16458 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
16459 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
16460 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
16461 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
16462 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
16463 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
16464 @@ -4211,6 +4218,12 @@ static void __noclone vmx_vcpu_run(struc
16465 "jmp .Lkvm_vmx_return \n\t"
16466 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
16467 ".Lkvm_vmx_return: "
16468 +
16469 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16470 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
16471 + ".Lkvm_vmx_return2: "
16472 +#endif
16473 +
16474 /* Save guest registers, load host registers, keep flags */
16475 "mov %0, %c[wordsize](%%"R"sp) \n\t"
16476 "pop %0 \n\t"
16477 @@ -4259,6 +4272,11 @@ static void __noclone vmx_vcpu_run(struc
16478 #endif
16479 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
16480 [wordsize]"i"(sizeof(ulong))
16481 +
16482 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16483 + ,[cs]"i"(__KERNEL_CS)
16484 +#endif
16485 +
16486 : "cc", "memory"
16487 , R"ax", R"bx", R"di", R"si"
16488 #ifdef CONFIG_X86_64
16489 @@ -4276,7 +4294,16 @@ static void __noclone vmx_vcpu_run(struc
16490
16491 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
16492
16493 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
16494 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
16495 +
16496 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16497 + loadsegment(fs, __KERNEL_PERCPU);
16498 +#endif
16499 +
16500 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16501 + __set_fs(current_thread_info()->addr_limit);
16502 +#endif
16503 +
16504 vmx->launched = 1;
16505
16506 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
16507 diff -urNp linux-3.0.7/arch/x86/kvm/x86.c linux-3.0.7/arch/x86/kvm/x86.c
16508 --- linux-3.0.7/arch/x86/kvm/x86.c 2011-07-21 22:17:23.000000000 -0400
16509 +++ linux-3.0.7/arch/x86/kvm/x86.c 2011-10-06 04:17:55.000000000 -0400
16510 @@ -1313,8 +1313,8 @@ static int xen_hvm_config(struct kvm_vcp
16511 {
16512 struct kvm *kvm = vcpu->kvm;
16513 int lm = is_long_mode(vcpu);
16514 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
16515 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
16516 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
16517 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
16518 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
16519 : kvm->arch.xen_hvm_config.blob_size_32;
16520 u32 page_num = data & ~PAGE_MASK;
16521 @@ -2057,6 +2057,8 @@ long kvm_arch_dev_ioctl(struct file *fil
16522 if (n < msr_list.nmsrs)
16523 goto out;
16524 r = -EFAULT;
16525 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
16526 + goto out;
16527 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
16528 num_msrs_to_save * sizeof(u32)))
16529 goto out;
16530 @@ -2229,15 +2231,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
16531 struct kvm_cpuid2 *cpuid,
16532 struct kvm_cpuid_entry2 __user *entries)
16533 {
16534 - int r;
16535 + int r, i;
16536
16537 r = -E2BIG;
16538 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
16539 goto out;
16540 r = -EFAULT;
16541 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
16542 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16543 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16544 goto out;
16545 + for (i = 0; i < cpuid->nent; ++i) {
16546 + struct kvm_cpuid_entry2 cpuid_entry;
16547 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
16548 + goto out;
16549 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
16550 + }
16551 vcpu->arch.cpuid_nent = cpuid->nent;
16552 kvm_apic_set_version(vcpu);
16553 kvm_x86_ops->cpuid_update(vcpu);
16554 @@ -2252,15 +2259,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
16555 struct kvm_cpuid2 *cpuid,
16556 struct kvm_cpuid_entry2 __user *entries)
16557 {
16558 - int r;
16559 + int r, i;
16560
16561 r = -E2BIG;
16562 if (cpuid->nent < vcpu->arch.cpuid_nent)
16563 goto out;
16564 r = -EFAULT;
16565 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
16566 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16567 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16568 goto out;
16569 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
16570 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
16571 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
16572 + goto out;
16573 + }
16574 return 0;
16575
16576 out:
16577 @@ -2579,7 +2590,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
16578 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
16579 struct kvm_interrupt *irq)
16580 {
16581 - if (irq->irq < 0 || irq->irq >= 256)
16582 + if (irq->irq >= 256)
16583 return -EINVAL;
16584 if (irqchip_in_kernel(vcpu->kvm))
16585 return -ENXIO;
16586 @@ -4878,7 +4889,7 @@ void kvm_after_handle_nmi(struct kvm_vcp
16587 }
16588 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
16589
16590 -int kvm_arch_init(void *opaque)
16591 +int kvm_arch_init(const void *opaque)
16592 {
16593 int r;
16594 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
16595 diff -urNp linux-3.0.7/arch/x86/lguest/boot.c linux-3.0.7/arch/x86/lguest/boot.c
16596 --- linux-3.0.7/arch/x86/lguest/boot.c 2011-07-21 22:17:23.000000000 -0400
16597 +++ linux-3.0.7/arch/x86/lguest/boot.c 2011-08-23 21:47:55.000000000 -0400
16598 @@ -1176,9 +1176,10 @@ static __init int early_put_chars(u32 vt
16599 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
16600 * Launcher to reboot us.
16601 */
16602 -static void lguest_restart(char *reason)
16603 +static __noreturn void lguest_restart(char *reason)
16604 {
16605 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
16606 + BUG();
16607 }
16608
16609 /*G:050
16610 diff -urNp linux-3.0.7/arch/x86/lib/atomic64_32.c linux-3.0.7/arch/x86/lib/atomic64_32.c
16611 --- linux-3.0.7/arch/x86/lib/atomic64_32.c 2011-07-21 22:17:23.000000000 -0400
16612 +++ linux-3.0.7/arch/x86/lib/atomic64_32.c 2011-08-23 21:47:55.000000000 -0400
16613 @@ -8,18 +8,30 @@
16614
16615 long long atomic64_read_cx8(long long, const atomic64_t *v);
16616 EXPORT_SYMBOL(atomic64_read_cx8);
16617 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16618 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
16619 long long atomic64_set_cx8(long long, const atomic64_t *v);
16620 EXPORT_SYMBOL(atomic64_set_cx8);
16621 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16622 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
16623 long long atomic64_xchg_cx8(long long, unsigned high);
16624 EXPORT_SYMBOL(atomic64_xchg_cx8);
16625 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
16626 EXPORT_SYMBOL(atomic64_add_return_cx8);
16627 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16628 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
16629 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
16630 EXPORT_SYMBOL(atomic64_sub_return_cx8);
16631 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16632 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
16633 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
16634 EXPORT_SYMBOL(atomic64_inc_return_cx8);
16635 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16636 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
16637 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
16638 EXPORT_SYMBOL(atomic64_dec_return_cx8);
16639 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16640 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
16641 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
16642 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
16643 int atomic64_inc_not_zero_cx8(atomic64_t *v);
16644 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
16645 #ifndef CONFIG_X86_CMPXCHG64
16646 long long atomic64_read_386(long long, const atomic64_t *v);
16647 EXPORT_SYMBOL(atomic64_read_386);
16648 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
16649 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
16650 long long atomic64_set_386(long long, const atomic64_t *v);
16651 EXPORT_SYMBOL(atomic64_set_386);
16652 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
16653 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
16654 long long atomic64_xchg_386(long long, unsigned high);
16655 EXPORT_SYMBOL(atomic64_xchg_386);
16656 long long atomic64_add_return_386(long long a, atomic64_t *v);
16657 EXPORT_SYMBOL(atomic64_add_return_386);
16658 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16659 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
16660 long long atomic64_sub_return_386(long long a, atomic64_t *v);
16661 EXPORT_SYMBOL(atomic64_sub_return_386);
16662 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16663 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
16664 long long atomic64_inc_return_386(long long a, atomic64_t *v);
16665 EXPORT_SYMBOL(atomic64_inc_return_386);
16666 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16667 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
16668 long long atomic64_dec_return_386(long long a, atomic64_t *v);
16669 EXPORT_SYMBOL(atomic64_dec_return_386);
16670 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16671 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
16672 long long atomic64_add_386(long long a, atomic64_t *v);
16673 EXPORT_SYMBOL(atomic64_add_386);
16674 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
16675 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
16676 long long atomic64_sub_386(long long a, atomic64_t *v);
16677 EXPORT_SYMBOL(atomic64_sub_386);
16678 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
16679 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
16680 long long atomic64_inc_386(long long a, atomic64_t *v);
16681 EXPORT_SYMBOL(atomic64_inc_386);
16682 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
16683 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
16684 long long atomic64_dec_386(long long a, atomic64_t *v);
16685 EXPORT_SYMBOL(atomic64_dec_386);
16686 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
16687 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
16688 long long atomic64_dec_if_positive_386(atomic64_t *v);
16689 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
16690 int atomic64_inc_not_zero_386(atomic64_t *v);
16691 diff -urNp linux-3.0.7/arch/x86/lib/atomic64_386_32.S linux-3.0.7/arch/x86/lib/atomic64_386_32.S
16692 --- linux-3.0.7/arch/x86/lib/atomic64_386_32.S 2011-07-21 22:17:23.000000000 -0400
16693 +++ linux-3.0.7/arch/x86/lib/atomic64_386_32.S 2011-08-23 21:47:55.000000000 -0400
16694 @@ -48,6 +48,10 @@ BEGIN(read)
16695 movl (v), %eax
16696 movl 4(v), %edx
16697 RET_ENDP
16698 +BEGIN(read_unchecked)
16699 + movl (v), %eax
16700 + movl 4(v), %edx
16701 +RET_ENDP
16702 #undef v
16703
16704 #define v %esi
16705 @@ -55,6 +59,10 @@ BEGIN(set)
16706 movl %ebx, (v)
16707 movl %ecx, 4(v)
16708 RET_ENDP
16709 +BEGIN(set_unchecked)
16710 + movl %ebx, (v)
16711 + movl %ecx, 4(v)
16712 +RET_ENDP
16713 #undef v
16714
16715 #define v %esi
16716 @@ -70,6 +78,20 @@ RET_ENDP
16717 BEGIN(add)
16718 addl %eax, (v)
16719 adcl %edx, 4(v)
16720 +
16721 +#ifdef CONFIG_PAX_REFCOUNT
16722 + jno 0f
16723 + subl %eax, (v)
16724 + sbbl %edx, 4(v)
16725 + int $4
16726 +0:
16727 + _ASM_EXTABLE(0b, 0b)
16728 +#endif
16729 +
16730 +RET_ENDP
16731 +BEGIN(add_unchecked)
16732 + addl %eax, (v)
16733 + adcl %edx, 4(v)
16734 RET_ENDP
16735 #undef v
16736
16737 @@ -77,6 +99,24 @@ RET_ENDP
16738 BEGIN(add_return)
16739 addl (v), %eax
16740 adcl 4(v), %edx
16741 +
16742 +#ifdef CONFIG_PAX_REFCOUNT
16743 + into
16744 +1234:
16745 + _ASM_EXTABLE(1234b, 2f)
16746 +#endif
16747 +
16748 + movl %eax, (v)
16749 + movl %edx, 4(v)
16750 +
16751 +#ifdef CONFIG_PAX_REFCOUNT
16752 +2:
16753 +#endif
16754 +
16755 +RET_ENDP
16756 +BEGIN(add_return_unchecked)
16757 + addl (v), %eax
16758 + adcl 4(v), %edx
16759 movl %eax, (v)
16760 movl %edx, 4(v)
16761 RET_ENDP
16762 @@ -86,6 +126,20 @@ RET_ENDP
16763 BEGIN(sub)
16764 subl %eax, (v)
16765 sbbl %edx, 4(v)
16766 +
16767 +#ifdef CONFIG_PAX_REFCOUNT
16768 + jno 0f
16769 + addl %eax, (v)
16770 + adcl %edx, 4(v)
16771 + int $4
16772 +0:
16773 + _ASM_EXTABLE(0b, 0b)
16774 +#endif
16775 +
16776 +RET_ENDP
16777 +BEGIN(sub_unchecked)
16778 + subl %eax, (v)
16779 + sbbl %edx, 4(v)
16780 RET_ENDP
16781 #undef v
16782
16783 @@ -96,6 +150,27 @@ BEGIN(sub_return)
16784 sbbl $0, %edx
16785 addl (v), %eax
16786 adcl 4(v), %edx
16787 +
16788 +#ifdef CONFIG_PAX_REFCOUNT
16789 + into
16790 +1234:
16791 + _ASM_EXTABLE(1234b, 2f)
16792 +#endif
16793 +
16794 + movl %eax, (v)
16795 + movl %edx, 4(v)
16796 +
16797 +#ifdef CONFIG_PAX_REFCOUNT
16798 +2:
16799 +#endif
16800 +
16801 +RET_ENDP
16802 +BEGIN(sub_return_unchecked)
16803 + negl %edx
16804 + negl %eax
16805 + sbbl $0, %edx
16806 + addl (v), %eax
16807 + adcl 4(v), %edx
16808 movl %eax, (v)
16809 movl %edx, 4(v)
16810 RET_ENDP
16811 @@ -105,6 +180,20 @@ RET_ENDP
16812 BEGIN(inc)
16813 addl $1, (v)
16814 adcl $0, 4(v)
16815 +
16816 +#ifdef CONFIG_PAX_REFCOUNT
16817 + jno 0f
16818 + subl $1, (v)
16819 + sbbl $0, 4(v)
16820 + int $4
16821 +0:
16822 + _ASM_EXTABLE(0b, 0b)
16823 +#endif
16824 +
16825 +RET_ENDP
16826 +BEGIN(inc_unchecked)
16827 + addl $1, (v)
16828 + adcl $0, 4(v)
16829 RET_ENDP
16830 #undef v
16831
16832 @@ -114,6 +203,26 @@ BEGIN(inc_return)
16833 movl 4(v), %edx
16834 addl $1, %eax
16835 adcl $0, %edx
16836 +
16837 +#ifdef CONFIG_PAX_REFCOUNT
16838 + into
16839 +1234:
16840 + _ASM_EXTABLE(1234b, 2f)
16841 +#endif
16842 +
16843 + movl %eax, (v)
16844 + movl %edx, 4(v)
16845 +
16846 +#ifdef CONFIG_PAX_REFCOUNT
16847 +2:
16848 +#endif
16849 +
16850 +RET_ENDP
16851 +BEGIN(inc_return_unchecked)
16852 + movl (v), %eax
16853 + movl 4(v), %edx
16854 + addl $1, %eax
16855 + adcl $0, %edx
16856 movl %eax, (v)
16857 movl %edx, 4(v)
16858 RET_ENDP
16859 @@ -123,6 +232,20 @@ RET_ENDP
16860 BEGIN(dec)
16861 subl $1, (v)
16862 sbbl $0, 4(v)
16863 +
16864 +#ifdef CONFIG_PAX_REFCOUNT
16865 + jno 0f
16866 + addl $1, (v)
16867 + adcl $0, 4(v)
16868 + int $4
16869 +0:
16870 + _ASM_EXTABLE(0b, 0b)
16871 +#endif
16872 +
16873 +RET_ENDP
16874 +BEGIN(dec_unchecked)
16875 + subl $1, (v)
16876 + sbbl $0, 4(v)
16877 RET_ENDP
16878 #undef v
16879
16880 @@ -132,6 +255,26 @@ BEGIN(dec_return)
16881 movl 4(v), %edx
16882 subl $1, %eax
16883 sbbl $0, %edx
16884 +
16885 +#ifdef CONFIG_PAX_REFCOUNT
16886 + into
16887 +1234:
16888 + _ASM_EXTABLE(1234b, 2f)
16889 +#endif
16890 +
16891 + movl %eax, (v)
16892 + movl %edx, 4(v)
16893 +
16894 +#ifdef CONFIG_PAX_REFCOUNT
16895 +2:
16896 +#endif
16897 +
16898 +RET_ENDP
16899 +BEGIN(dec_return_unchecked)
16900 + movl (v), %eax
16901 + movl 4(v), %edx
16902 + subl $1, %eax
16903 + sbbl $0, %edx
16904 movl %eax, (v)
16905 movl %edx, 4(v)
16906 RET_ENDP
16907 @@ -143,6 +286,13 @@ BEGIN(add_unless)
16908 adcl %edx, %edi
16909 addl (v), %eax
16910 adcl 4(v), %edx
16911 +
16912 +#ifdef CONFIG_PAX_REFCOUNT
16913 + into
16914 +1234:
16915 + _ASM_EXTABLE(1234b, 2f)
16916 +#endif
16917 +
16918 cmpl %eax, %esi
16919 je 3f
16920 1:
16921 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
16922 1:
16923 addl $1, %eax
16924 adcl $0, %edx
16925 +
16926 +#ifdef CONFIG_PAX_REFCOUNT
16927 + into
16928 +1234:
16929 + _ASM_EXTABLE(1234b, 2f)
16930 +#endif
16931 +
16932 movl %eax, (v)
16933 movl %edx, 4(v)
16934 movl $1, %eax
16935 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
16936 movl 4(v), %edx
16937 subl $1, %eax
16938 sbbl $0, %edx
16939 +
16940 +#ifdef CONFIG_PAX_REFCOUNT
16941 + into
16942 +1234:
16943 + _ASM_EXTABLE(1234b, 1f)
16944 +#endif
16945 +
16946 js 1f
16947 movl %eax, (v)
16948 movl %edx, 4(v)
16949 diff -urNp linux-3.0.7/arch/x86/lib/atomic64_cx8_32.S linux-3.0.7/arch/x86/lib/atomic64_cx8_32.S
16950 --- linux-3.0.7/arch/x86/lib/atomic64_cx8_32.S 2011-07-21 22:17:23.000000000 -0400
16951 +++ linux-3.0.7/arch/x86/lib/atomic64_cx8_32.S 2011-10-06 04:17:55.000000000 -0400
16952 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
16953 CFI_STARTPROC
16954
16955 read64 %ecx
16956 + pax_force_retaddr
16957 ret
16958 CFI_ENDPROC
16959 ENDPROC(atomic64_read_cx8)
16960
16961 +ENTRY(atomic64_read_unchecked_cx8)
16962 + CFI_STARTPROC
16963 +
16964 + read64 %ecx
16965 + pax_force_retaddr
16966 + ret
16967 + CFI_ENDPROC
16968 +ENDPROC(atomic64_read_unchecked_cx8)
16969 +
16970 ENTRY(atomic64_set_cx8)
16971 CFI_STARTPROC
16972
16973 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
16974 cmpxchg8b (%esi)
16975 jne 1b
16976
16977 + pax_force_retaddr
16978 ret
16979 CFI_ENDPROC
16980 ENDPROC(atomic64_set_cx8)
16981
16982 +ENTRY(atomic64_set_unchecked_cx8)
16983 + CFI_STARTPROC
16984 +
16985 +1:
16986 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
16987 + * are atomic on 586 and newer */
16988 + cmpxchg8b (%esi)
16989 + jne 1b
16990 +
16991 + pax_force_retaddr
16992 + ret
16993 + CFI_ENDPROC
16994 +ENDPROC(atomic64_set_unchecked_cx8)
16995 +
16996 ENTRY(atomic64_xchg_cx8)
16997 CFI_STARTPROC
16998
16999 @@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
17000 cmpxchg8b (%esi)
17001 jne 1b
17002
17003 + pax_force_retaddr
17004 ret
17005 CFI_ENDPROC
17006 ENDPROC(atomic64_xchg_cx8)
17007
17008 -.macro addsub_return func ins insc
17009 -ENTRY(atomic64_\func\()_return_cx8)
17010 +.macro addsub_return func ins insc unchecked=""
17011 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
17012 CFI_STARTPROC
17013 SAVE ebp
17014 SAVE ebx
17015 @@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
17016 movl %edx, %ecx
17017 \ins\()l %esi, %ebx
17018 \insc\()l %edi, %ecx
17019 +
17020 +.ifb \unchecked
17021 +#ifdef CONFIG_PAX_REFCOUNT
17022 + into
17023 +2:
17024 + _ASM_EXTABLE(2b, 3f)
17025 +#endif
17026 +.endif
17027 +
17028 LOCK_PREFIX
17029 cmpxchg8b (%ebp)
17030 jne 1b
17031 -
17032 -10:
17033 movl %ebx, %eax
17034 movl %ecx, %edx
17035 +
17036 +.ifb \unchecked
17037 +#ifdef CONFIG_PAX_REFCOUNT
17038 +3:
17039 +#endif
17040 +.endif
17041 +
17042 RESTORE edi
17043 RESTORE esi
17044 RESTORE ebx
17045 RESTORE ebp
17046 + pax_force_retaddr
17047 ret
17048 CFI_ENDPROC
17049 -ENDPROC(atomic64_\func\()_return_cx8)
17050 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
17051 .endm
17052
17053 addsub_return add add adc
17054 addsub_return sub sub sbb
17055 +addsub_return add add adc _unchecked
17056 +addsub_return sub sub sbb _unchecked
17057
17058 -.macro incdec_return func ins insc
17059 -ENTRY(atomic64_\func\()_return_cx8)
17060 +.macro incdec_return func ins insc unchecked
17061 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
17062 CFI_STARTPROC
17063 SAVE ebx
17064
17065 @@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
17066 movl %edx, %ecx
17067 \ins\()l $1, %ebx
17068 \insc\()l $0, %ecx
17069 +
17070 +.ifb \unchecked
17071 +#ifdef CONFIG_PAX_REFCOUNT
17072 + into
17073 +2:
17074 + _ASM_EXTABLE(2b, 3f)
17075 +#endif
17076 +.endif
17077 +
17078 LOCK_PREFIX
17079 cmpxchg8b (%esi)
17080 jne 1b
17081
17082 -10:
17083 movl %ebx, %eax
17084 movl %ecx, %edx
17085 +
17086 +.ifb \unchecked
17087 +#ifdef CONFIG_PAX_REFCOUNT
17088 +3:
17089 +#endif
17090 +.endif
17091 +
17092 RESTORE ebx
17093 + pax_force_retaddr
17094 ret
17095 CFI_ENDPROC
17096 -ENDPROC(atomic64_\func\()_return_cx8)
17097 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
17098 .endm
17099
17100 incdec_return inc add adc
17101 incdec_return dec sub sbb
17102 +incdec_return inc add adc _unchecked
17103 +incdec_return dec sub sbb _unchecked
17104
17105 ENTRY(atomic64_dec_if_positive_cx8)
17106 CFI_STARTPROC
17107 @@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
17108 movl %edx, %ecx
17109 subl $1, %ebx
17110 sbb $0, %ecx
17111 +
17112 +#ifdef CONFIG_PAX_REFCOUNT
17113 + into
17114 +1234:
17115 + _ASM_EXTABLE(1234b, 2f)
17116 +#endif
17117 +
17118 js 2f
17119 LOCK_PREFIX
17120 cmpxchg8b (%esi)
17121 @@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
17122 movl %ebx, %eax
17123 movl %ecx, %edx
17124 RESTORE ebx
17125 + pax_force_retaddr
17126 ret
17127 CFI_ENDPROC
17128 ENDPROC(atomic64_dec_if_positive_cx8)
17129 @@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
17130 movl %edx, %ecx
17131 addl %esi, %ebx
17132 adcl %edi, %ecx
17133 +
17134 +#ifdef CONFIG_PAX_REFCOUNT
17135 + into
17136 +1234:
17137 + _ASM_EXTABLE(1234b, 3f)
17138 +#endif
17139 +
17140 LOCK_PREFIX
17141 cmpxchg8b (%ebp)
17142 jne 1b
17143 @@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
17144 CFI_ADJUST_CFA_OFFSET -8
17145 RESTORE ebx
17146 RESTORE ebp
17147 + pax_force_retaddr
17148 ret
17149 4:
17150 cmpl %edx, 4(%esp)
17151 @@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
17152 movl %edx, %ecx
17153 addl $1, %ebx
17154 adcl $0, %ecx
17155 +
17156 +#ifdef CONFIG_PAX_REFCOUNT
17157 + into
17158 +1234:
17159 + _ASM_EXTABLE(1234b, 3f)
17160 +#endif
17161 +
17162 LOCK_PREFIX
17163 cmpxchg8b (%esi)
17164 jne 1b
17165 @@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
17166 movl $1, %eax
17167 3:
17168 RESTORE ebx
17169 + pax_force_retaddr
17170 ret
17171 4:
17172 testl %edx, %edx
17173 diff -urNp linux-3.0.7/arch/x86/lib/checksum_32.S linux-3.0.7/arch/x86/lib/checksum_32.S
17174 --- linux-3.0.7/arch/x86/lib/checksum_32.S 2011-07-21 22:17:23.000000000 -0400
17175 +++ linux-3.0.7/arch/x86/lib/checksum_32.S 2011-08-23 21:47:55.000000000 -0400
17176 @@ -28,7 +28,8 @@
17177 #include <linux/linkage.h>
17178 #include <asm/dwarf2.h>
17179 #include <asm/errno.h>
17180 -
17181 +#include <asm/segment.h>
17182 +
17183 /*
17184 * computes a partial checksum, e.g. for TCP/UDP fragments
17185 */
17186 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
17187
17188 #define ARGBASE 16
17189 #define FP 12
17190 -
17191 -ENTRY(csum_partial_copy_generic)
17192 +
17193 +ENTRY(csum_partial_copy_generic_to_user)
17194 CFI_STARTPROC
17195 +
17196 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17197 + pushl_cfi %gs
17198 + popl_cfi %es
17199 + jmp csum_partial_copy_generic
17200 +#endif
17201 +
17202 +ENTRY(csum_partial_copy_generic_from_user)
17203 +
17204 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17205 + pushl_cfi %gs
17206 + popl_cfi %ds
17207 +#endif
17208 +
17209 +ENTRY(csum_partial_copy_generic)
17210 subl $4,%esp
17211 CFI_ADJUST_CFA_OFFSET 4
17212 pushl_cfi %edi
17213 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
17214 jmp 4f
17215 SRC(1: movw (%esi), %bx )
17216 addl $2, %esi
17217 -DST( movw %bx, (%edi) )
17218 +DST( movw %bx, %es:(%edi) )
17219 addl $2, %edi
17220 addw %bx, %ax
17221 adcl $0, %eax
17222 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
17223 SRC(1: movl (%esi), %ebx )
17224 SRC( movl 4(%esi), %edx )
17225 adcl %ebx, %eax
17226 -DST( movl %ebx, (%edi) )
17227 +DST( movl %ebx, %es:(%edi) )
17228 adcl %edx, %eax
17229 -DST( movl %edx, 4(%edi) )
17230 +DST( movl %edx, %es:4(%edi) )
17231
17232 SRC( movl 8(%esi), %ebx )
17233 SRC( movl 12(%esi), %edx )
17234 adcl %ebx, %eax
17235 -DST( movl %ebx, 8(%edi) )
17236 +DST( movl %ebx, %es:8(%edi) )
17237 adcl %edx, %eax
17238 -DST( movl %edx, 12(%edi) )
17239 +DST( movl %edx, %es:12(%edi) )
17240
17241 SRC( movl 16(%esi), %ebx )
17242 SRC( movl 20(%esi), %edx )
17243 adcl %ebx, %eax
17244 -DST( movl %ebx, 16(%edi) )
17245 +DST( movl %ebx, %es:16(%edi) )
17246 adcl %edx, %eax
17247 -DST( movl %edx, 20(%edi) )
17248 +DST( movl %edx, %es:20(%edi) )
17249
17250 SRC( movl 24(%esi), %ebx )
17251 SRC( movl 28(%esi), %edx )
17252 adcl %ebx, %eax
17253 -DST( movl %ebx, 24(%edi) )
17254 +DST( movl %ebx, %es:24(%edi) )
17255 adcl %edx, %eax
17256 -DST( movl %edx, 28(%edi) )
17257 +DST( movl %edx, %es:28(%edi) )
17258
17259 lea 32(%esi), %esi
17260 lea 32(%edi), %edi
17261 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
17262 shrl $2, %edx # This clears CF
17263 SRC(3: movl (%esi), %ebx )
17264 adcl %ebx, %eax
17265 -DST( movl %ebx, (%edi) )
17266 +DST( movl %ebx, %es:(%edi) )
17267 lea 4(%esi), %esi
17268 lea 4(%edi), %edi
17269 dec %edx
17270 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
17271 jb 5f
17272 SRC( movw (%esi), %cx )
17273 leal 2(%esi), %esi
17274 -DST( movw %cx, (%edi) )
17275 +DST( movw %cx, %es:(%edi) )
17276 leal 2(%edi), %edi
17277 je 6f
17278 shll $16,%ecx
17279 SRC(5: movb (%esi), %cl )
17280 -DST( movb %cl, (%edi) )
17281 +DST( movb %cl, %es:(%edi) )
17282 6: addl %ecx, %eax
17283 adcl $0, %eax
17284 7:
17285 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
17286
17287 6001:
17288 movl ARGBASE+20(%esp), %ebx # src_err_ptr
17289 - movl $-EFAULT, (%ebx)
17290 + movl $-EFAULT, %ss:(%ebx)
17291
17292 # zero the complete destination - computing the rest
17293 # is too much work
17294 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
17295
17296 6002:
17297 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
17298 - movl $-EFAULT,(%ebx)
17299 + movl $-EFAULT,%ss:(%ebx)
17300 jmp 5000b
17301
17302 .previous
17303
17304 + pushl_cfi %ss
17305 + popl_cfi %ds
17306 + pushl_cfi %ss
17307 + popl_cfi %es
17308 popl_cfi %ebx
17309 CFI_RESTORE ebx
17310 popl_cfi %esi
17311 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
17312 popl_cfi %ecx # equivalent to addl $4,%esp
17313 ret
17314 CFI_ENDPROC
17315 -ENDPROC(csum_partial_copy_generic)
17316 +ENDPROC(csum_partial_copy_generic_to_user)
17317
17318 #else
17319
17320 /* Version for PentiumII/PPro */
17321
17322 #define ROUND1(x) \
17323 + nop; nop; nop; \
17324 SRC(movl x(%esi), %ebx ) ; \
17325 addl %ebx, %eax ; \
17326 - DST(movl %ebx, x(%edi) ) ;
17327 + DST(movl %ebx, %es:x(%edi)) ;
17328
17329 #define ROUND(x) \
17330 + nop; nop; nop; \
17331 SRC(movl x(%esi), %ebx ) ; \
17332 adcl %ebx, %eax ; \
17333 - DST(movl %ebx, x(%edi) ) ;
17334 + DST(movl %ebx, %es:x(%edi)) ;
17335
17336 #define ARGBASE 12
17337 -
17338 -ENTRY(csum_partial_copy_generic)
17339 +
17340 +ENTRY(csum_partial_copy_generic_to_user)
17341 CFI_STARTPROC
17342 +
17343 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17344 + pushl_cfi %gs
17345 + popl_cfi %es
17346 + jmp csum_partial_copy_generic
17347 +#endif
17348 +
17349 +ENTRY(csum_partial_copy_generic_from_user)
17350 +
17351 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17352 + pushl_cfi %gs
17353 + popl_cfi %ds
17354 +#endif
17355 +
17356 +ENTRY(csum_partial_copy_generic)
17357 pushl_cfi %ebx
17358 CFI_REL_OFFSET ebx, 0
17359 pushl_cfi %edi
17360 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
17361 subl %ebx, %edi
17362 lea -1(%esi),%edx
17363 andl $-32,%edx
17364 - lea 3f(%ebx,%ebx), %ebx
17365 + lea 3f(%ebx,%ebx,2), %ebx
17366 testl %esi, %esi
17367 jmp *%ebx
17368 1: addl $64,%esi
17369 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
17370 jb 5f
17371 SRC( movw (%esi), %dx )
17372 leal 2(%esi), %esi
17373 -DST( movw %dx, (%edi) )
17374 +DST( movw %dx, %es:(%edi) )
17375 leal 2(%edi), %edi
17376 je 6f
17377 shll $16,%edx
17378 5:
17379 SRC( movb (%esi), %dl )
17380 -DST( movb %dl, (%edi) )
17381 +DST( movb %dl, %es:(%edi) )
17382 6: addl %edx, %eax
17383 adcl $0, %eax
17384 7:
17385 .section .fixup, "ax"
17386 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
17387 - movl $-EFAULT, (%ebx)
17388 + movl $-EFAULT, %ss:(%ebx)
17389 # zero the complete destination (computing the rest is too much work)
17390 movl ARGBASE+8(%esp),%edi # dst
17391 movl ARGBASE+12(%esp),%ecx # len
17392 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
17393 rep; stosb
17394 jmp 7b
17395 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
17396 - movl $-EFAULT, (%ebx)
17397 + movl $-EFAULT, %ss:(%ebx)
17398 jmp 7b
17399 .previous
17400
17401 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17402 + pushl_cfi %ss
17403 + popl_cfi %ds
17404 + pushl_cfi %ss
17405 + popl_cfi %es
17406 +#endif
17407 +
17408 popl_cfi %esi
17409 CFI_RESTORE esi
17410 popl_cfi %edi
17411 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
17412 CFI_RESTORE ebx
17413 ret
17414 CFI_ENDPROC
17415 -ENDPROC(csum_partial_copy_generic)
17416 +ENDPROC(csum_partial_copy_generic_to_user)
17417
17418 #undef ROUND
17419 #undef ROUND1
17420 diff -urNp linux-3.0.7/arch/x86/lib/clear_page_64.S linux-3.0.7/arch/x86/lib/clear_page_64.S
17421 --- linux-3.0.7/arch/x86/lib/clear_page_64.S 2011-07-21 22:17:23.000000000 -0400
17422 +++ linux-3.0.7/arch/x86/lib/clear_page_64.S 2011-10-06 04:17:55.000000000 -0400
17423 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
17424 movl $4096/8,%ecx
17425 xorl %eax,%eax
17426 rep stosq
17427 + pax_force_retaddr
17428 ret
17429 CFI_ENDPROC
17430 ENDPROC(clear_page_c)
17431 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
17432 movl $4096,%ecx
17433 xorl %eax,%eax
17434 rep stosb
17435 + pax_force_retaddr
17436 ret
17437 CFI_ENDPROC
17438 ENDPROC(clear_page_c_e)
17439 @@ -43,6 +45,7 @@ ENTRY(clear_page)
17440 leaq 64(%rdi),%rdi
17441 jnz .Lloop
17442 nop
17443 + pax_force_retaddr
17444 ret
17445 CFI_ENDPROC
17446 .Lclear_page_end:
17447 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
17448
17449 #include <asm/cpufeature.h>
17450
17451 - .section .altinstr_replacement,"ax"
17452 + .section .altinstr_replacement,"a"
17453 1: .byte 0xeb /* jmp <disp8> */
17454 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
17455 2: .byte 0xeb /* jmp <disp8> */
17456 diff -urNp linux-3.0.7/arch/x86/lib/cmpxchg16b_emu.S linux-3.0.7/arch/x86/lib/cmpxchg16b_emu.S
17457 --- linux-3.0.7/arch/x86/lib/cmpxchg16b_emu.S 2011-07-21 22:17:23.000000000 -0400
17458 +++ linux-3.0.7/arch/x86/lib/cmpxchg16b_emu.S 2011-10-07 19:07:28.000000000 -0400
17459 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
17460
17461 popf
17462 mov $1, %al
17463 + pax_force_retaddr
17464 ret
17465
17466 not_same:
17467 popf
17468 xor %al,%al
17469 + pax_force_retaddr
17470 ret
17471
17472 CFI_ENDPROC
17473 diff -urNp linux-3.0.7/arch/x86/lib/copy_page_64.S linux-3.0.7/arch/x86/lib/copy_page_64.S
17474 --- linux-3.0.7/arch/x86/lib/copy_page_64.S 2011-07-21 22:17:23.000000000 -0400
17475 +++ linux-3.0.7/arch/x86/lib/copy_page_64.S 2011-10-06 04:17:55.000000000 -0400
17476 @@ -2,12 +2,14 @@
17477
17478 #include <linux/linkage.h>
17479 #include <asm/dwarf2.h>
17480 +#include <asm/alternative-asm.h>
17481
17482 ALIGN
17483 copy_page_c:
17484 CFI_STARTPROC
17485 movl $4096/8,%ecx
17486 rep movsq
17487 + pax_force_retaddr
17488 ret
17489 CFI_ENDPROC
17490 ENDPROC(copy_page_c)
17491 @@ -94,6 +96,7 @@ ENTRY(copy_page)
17492 CFI_RESTORE r13
17493 addq $3*8,%rsp
17494 CFI_ADJUST_CFA_OFFSET -3*8
17495 + pax_force_retaddr
17496 ret
17497 .Lcopy_page_end:
17498 CFI_ENDPROC
17499 @@ -104,7 +107,7 @@ ENDPROC(copy_page)
17500
17501 #include <asm/cpufeature.h>
17502
17503 - .section .altinstr_replacement,"ax"
17504 + .section .altinstr_replacement,"a"
17505 1: .byte 0xeb /* jmp <disp8> */
17506 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
17507 2:
17508 diff -urNp linux-3.0.7/arch/x86/lib/copy_user_64.S linux-3.0.7/arch/x86/lib/copy_user_64.S
17509 --- linux-3.0.7/arch/x86/lib/copy_user_64.S 2011-07-21 22:17:23.000000000 -0400
17510 +++ linux-3.0.7/arch/x86/lib/copy_user_64.S 2011-10-06 04:17:55.000000000 -0400
17511 @@ -16,6 +16,7 @@
17512 #include <asm/thread_info.h>
17513 #include <asm/cpufeature.h>
17514 #include <asm/alternative-asm.h>
17515 +#include <asm/pgtable.h>
17516
17517 /*
17518 * By placing feature2 after feature1 in altinstructions section, we logically
17519 @@ -29,7 +30,7 @@
17520 .byte 0xe9 /* 32bit jump */
17521 .long \orig-1f /* by default jump to orig */
17522 1:
17523 - .section .altinstr_replacement,"ax"
17524 + .section .altinstr_replacement,"a"
17525 2: .byte 0xe9 /* near jump with 32bit immediate */
17526 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
17527 3: .byte 0xe9 /* near jump with 32bit immediate */
17528 @@ -71,47 +72,20 @@
17529 #endif
17530 .endm
17531
17532 -/* Standard copy_to_user with segment limit checking */
17533 -ENTRY(_copy_to_user)
17534 - CFI_STARTPROC
17535 - GET_THREAD_INFO(%rax)
17536 - movq %rdi,%rcx
17537 - addq %rdx,%rcx
17538 - jc bad_to_user
17539 - cmpq TI_addr_limit(%rax),%rcx
17540 - ja bad_to_user
17541 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17542 - copy_user_generic_unrolled,copy_user_generic_string, \
17543 - copy_user_enhanced_fast_string
17544 - CFI_ENDPROC
17545 -ENDPROC(_copy_to_user)
17546 -
17547 -/* Standard copy_from_user with segment limit checking */
17548 -ENTRY(_copy_from_user)
17549 - CFI_STARTPROC
17550 - GET_THREAD_INFO(%rax)
17551 - movq %rsi,%rcx
17552 - addq %rdx,%rcx
17553 - jc bad_from_user
17554 - cmpq TI_addr_limit(%rax),%rcx
17555 - ja bad_from_user
17556 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17557 - copy_user_generic_unrolled,copy_user_generic_string, \
17558 - copy_user_enhanced_fast_string
17559 - CFI_ENDPROC
17560 -ENDPROC(_copy_from_user)
17561 -
17562 .section .fixup,"ax"
17563 /* must zero dest */
17564 ENTRY(bad_from_user)
17565 bad_from_user:
17566 CFI_STARTPROC
17567 + testl %edx,%edx
17568 + js bad_to_user
17569 movl %edx,%ecx
17570 xorl %eax,%eax
17571 rep
17572 stosb
17573 bad_to_user:
17574 movl %edx,%eax
17575 + pax_force_retaddr
17576 ret
17577 CFI_ENDPROC
17578 ENDPROC(bad_from_user)
17579 @@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
17580 decl %ecx
17581 jnz 21b
17582 23: xor %eax,%eax
17583 + pax_force_retaddr
17584 ret
17585
17586 .section .fixup,"ax"
17587 @@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
17588 3: rep
17589 movsb
17590 4: xorl %eax,%eax
17591 + pax_force_retaddr
17592 ret
17593
17594 .section .fixup,"ax"
17595 @@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
17596 1: rep
17597 movsb
17598 2: xorl %eax,%eax
17599 + pax_force_retaddr
17600 ret
17601
17602 .section .fixup,"ax"
17603 diff -urNp linux-3.0.7/arch/x86/lib/copy_user_nocache_64.S linux-3.0.7/arch/x86/lib/copy_user_nocache_64.S
17604 --- linux-3.0.7/arch/x86/lib/copy_user_nocache_64.S 2011-07-21 22:17:23.000000000 -0400
17605 +++ linux-3.0.7/arch/x86/lib/copy_user_nocache_64.S 2011-10-06 04:17:55.000000000 -0400
17606 @@ -8,12 +8,14 @@
17607
17608 #include <linux/linkage.h>
17609 #include <asm/dwarf2.h>
17610 +#include <asm/alternative-asm.h>
17611
17612 #define FIX_ALIGNMENT 1
17613
17614 #include <asm/current.h>
17615 #include <asm/asm-offsets.h>
17616 #include <asm/thread_info.h>
17617 +#include <asm/pgtable.h>
17618
17619 .macro ALIGN_DESTINATION
17620 #ifdef FIX_ALIGNMENT
17621 @@ -50,6 +52,15 @@
17622 */
17623 ENTRY(__copy_user_nocache)
17624 CFI_STARTPROC
17625 +
17626 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17627 + mov $PAX_USER_SHADOW_BASE,%rcx
17628 + cmp %rcx,%rsi
17629 + jae 1f
17630 + add %rcx,%rsi
17631 +1:
17632 +#endif
17633 +
17634 cmpl $8,%edx
17635 jb 20f /* less then 8 bytes, go to byte copy loop */
17636 ALIGN_DESTINATION
17637 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
17638 jnz 21b
17639 23: xorl %eax,%eax
17640 sfence
17641 + pax_force_retaddr
17642 ret
17643
17644 .section .fixup,"ax"
17645 diff -urNp linux-3.0.7/arch/x86/lib/csum-copy_64.S linux-3.0.7/arch/x86/lib/csum-copy_64.S
17646 --- linux-3.0.7/arch/x86/lib/csum-copy_64.S 2011-07-21 22:17:23.000000000 -0400
17647 +++ linux-3.0.7/arch/x86/lib/csum-copy_64.S 2011-10-06 04:17:55.000000000 -0400
17648 @@ -8,6 +8,7 @@
17649 #include <linux/linkage.h>
17650 #include <asm/dwarf2.h>
17651 #include <asm/errno.h>
17652 +#include <asm/alternative-asm.h>
17653
17654 /*
17655 * Checksum copy with exception handling.
17656 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
17657 CFI_RESTORE rbp
17658 addq $7*8, %rsp
17659 CFI_ADJUST_CFA_OFFSET -7*8
17660 + pax_force_retaddr
17661 ret
17662 CFI_RESTORE_STATE
17663
17664 diff -urNp linux-3.0.7/arch/x86/lib/csum-wrappers_64.c linux-3.0.7/arch/x86/lib/csum-wrappers_64.c
17665 --- linux-3.0.7/arch/x86/lib/csum-wrappers_64.c 2011-07-21 22:17:23.000000000 -0400
17666 +++ linux-3.0.7/arch/x86/lib/csum-wrappers_64.c 2011-10-06 04:17:55.000000000 -0400
17667 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void _
17668 len -= 2;
17669 }
17670 }
17671 - isum = csum_partial_copy_generic((__force const void *)src,
17672 +
17673 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17674 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
17675 + src += PAX_USER_SHADOW_BASE;
17676 +#endif
17677 +
17678 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
17679 dst, len, isum, errp, NULL);
17680 if (unlikely(*errp))
17681 goto out_err;
17682 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *sr
17683 }
17684
17685 *errp = 0;
17686 - return csum_partial_copy_generic(src, (void __force *)dst,
17687 +
17688 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17689 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
17690 + dst += PAX_USER_SHADOW_BASE;
17691 +#endif
17692 +
17693 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
17694 len, isum, NULL, errp);
17695 }
17696 EXPORT_SYMBOL(csum_partial_copy_to_user);
17697 diff -urNp linux-3.0.7/arch/x86/lib/getuser.S linux-3.0.7/arch/x86/lib/getuser.S
17698 --- linux-3.0.7/arch/x86/lib/getuser.S 2011-07-21 22:17:23.000000000 -0400
17699 +++ linux-3.0.7/arch/x86/lib/getuser.S 2011-10-07 19:07:23.000000000 -0400
17700 @@ -33,15 +33,38 @@
17701 #include <asm/asm-offsets.h>
17702 #include <asm/thread_info.h>
17703 #include <asm/asm.h>
17704 +#include <asm/segment.h>
17705 +#include <asm/pgtable.h>
17706 +#include <asm/alternative-asm.h>
17707 +
17708 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17709 +#define __copyuser_seg gs;
17710 +#else
17711 +#define __copyuser_seg
17712 +#endif
17713
17714 .text
17715 ENTRY(__get_user_1)
17716 CFI_STARTPROC
17717 +
17718 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17719 GET_THREAD_INFO(%_ASM_DX)
17720 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17721 jae bad_get_user
17722 -1: movzb (%_ASM_AX),%edx
17723 +
17724 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17725 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17726 + cmp %_ASM_DX,%_ASM_AX
17727 + jae 1234f
17728 + add %_ASM_DX,%_ASM_AX
17729 +1234:
17730 +#endif
17731 +
17732 +#endif
17733 +
17734 +1: __copyuser_seg movzb (%_ASM_AX),%edx
17735 xor %eax,%eax
17736 + pax_force_retaddr
17737 ret
17738 CFI_ENDPROC
17739 ENDPROC(__get_user_1)
17740 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
17741 ENTRY(__get_user_2)
17742 CFI_STARTPROC
17743 add $1,%_ASM_AX
17744 +
17745 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17746 jc bad_get_user
17747 GET_THREAD_INFO(%_ASM_DX)
17748 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17749 jae bad_get_user
17750 -2: movzwl -1(%_ASM_AX),%edx
17751 +
17752 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17753 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17754 + cmp %_ASM_DX,%_ASM_AX
17755 + jae 1234f
17756 + add %_ASM_DX,%_ASM_AX
17757 +1234:
17758 +#endif
17759 +
17760 +#endif
17761 +
17762 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
17763 xor %eax,%eax
17764 + pax_force_retaddr
17765 ret
17766 CFI_ENDPROC
17767 ENDPROC(__get_user_2)
17768 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
17769 ENTRY(__get_user_4)
17770 CFI_STARTPROC
17771 add $3,%_ASM_AX
17772 +
17773 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17774 jc bad_get_user
17775 GET_THREAD_INFO(%_ASM_DX)
17776 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17777 jae bad_get_user
17778 -3: mov -3(%_ASM_AX),%edx
17779 +
17780 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17781 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17782 + cmp %_ASM_DX,%_ASM_AX
17783 + jae 1234f
17784 + add %_ASM_DX,%_ASM_AX
17785 +1234:
17786 +#endif
17787 +
17788 +#endif
17789 +
17790 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
17791 xor %eax,%eax
17792 + pax_force_retaddr
17793 ret
17794 CFI_ENDPROC
17795 ENDPROC(__get_user_4)
17796 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
17797 GET_THREAD_INFO(%_ASM_DX)
17798 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17799 jae bad_get_user
17800 +
17801 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17802 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17803 + cmp %_ASM_DX,%_ASM_AX
17804 + jae 1234f
17805 + add %_ASM_DX,%_ASM_AX
17806 +1234:
17807 +#endif
17808 +
17809 4: movq -7(%_ASM_AX),%_ASM_DX
17810 xor %eax,%eax
17811 + pax_force_retaddr
17812 ret
17813 CFI_ENDPROC
17814 ENDPROC(__get_user_8)
17815 @@ -91,6 +152,7 @@ bad_get_user:
17816 CFI_STARTPROC
17817 xor %edx,%edx
17818 mov $(-EFAULT),%_ASM_AX
17819 + pax_force_retaddr
17820 ret
17821 CFI_ENDPROC
17822 END(bad_get_user)
17823 diff -urNp linux-3.0.7/arch/x86/lib/insn.c linux-3.0.7/arch/x86/lib/insn.c
17824 --- linux-3.0.7/arch/x86/lib/insn.c 2011-07-21 22:17:23.000000000 -0400
17825 +++ linux-3.0.7/arch/x86/lib/insn.c 2011-08-23 21:47:55.000000000 -0400
17826 @@ -21,6 +21,11 @@
17827 #include <linux/string.h>
17828 #include <asm/inat.h>
17829 #include <asm/insn.h>
17830 +#ifdef __KERNEL__
17831 +#include <asm/pgtable_types.h>
17832 +#else
17833 +#define ktla_ktva(addr) addr
17834 +#endif
17835
17836 #define get_next(t, insn) \
17837 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
17838 @@ -40,8 +45,8 @@
17839 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
17840 {
17841 memset(insn, 0, sizeof(*insn));
17842 - insn->kaddr = kaddr;
17843 - insn->next_byte = kaddr;
17844 + insn->kaddr = ktla_ktva(kaddr);
17845 + insn->next_byte = ktla_ktva(kaddr);
17846 insn->x86_64 = x86_64 ? 1 : 0;
17847 insn->opnd_bytes = 4;
17848 if (x86_64)
17849 diff -urNp linux-3.0.7/arch/x86/lib/iomap_copy_64.S linux-3.0.7/arch/x86/lib/iomap_copy_64.S
17850 --- linux-3.0.7/arch/x86/lib/iomap_copy_64.S 2011-07-21 22:17:23.000000000 -0400
17851 +++ linux-3.0.7/arch/x86/lib/iomap_copy_64.S 2011-10-06 04:17:55.000000000 -0400
17852 @@ -17,6 +17,7 @@
17853
17854 #include <linux/linkage.h>
17855 #include <asm/dwarf2.h>
17856 +#include <asm/alternative-asm.h>
17857
17858 /*
17859 * override generic version in lib/iomap_copy.c
17860 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
17861 CFI_STARTPROC
17862 movl %edx,%ecx
17863 rep movsd
17864 + pax_force_retaddr
17865 ret
17866 CFI_ENDPROC
17867 ENDPROC(__iowrite32_copy)
17868 diff -urNp linux-3.0.7/arch/x86/lib/memcpy_64.S linux-3.0.7/arch/x86/lib/memcpy_64.S
17869 --- linux-3.0.7/arch/x86/lib/memcpy_64.S 2011-07-21 22:17:23.000000000 -0400
17870 +++ linux-3.0.7/arch/x86/lib/memcpy_64.S 2011-10-06 04:17:55.000000000 -0400
17871 @@ -34,6 +34,7 @@
17872 rep movsq
17873 movl %edx, %ecx
17874 rep movsb
17875 + pax_force_retaddr
17876 ret
17877 .Lmemcpy_e:
17878 .previous
17879 @@ -51,6 +52,7 @@
17880
17881 movl %edx, %ecx
17882 rep movsb
17883 + pax_force_retaddr
17884 ret
17885 .Lmemcpy_e_e:
17886 .previous
17887 @@ -141,6 +143,7 @@ ENTRY(memcpy)
17888 movq %r9, 1*8(%rdi)
17889 movq %r10, -2*8(%rdi, %rdx)
17890 movq %r11, -1*8(%rdi, %rdx)
17891 + pax_force_retaddr
17892 retq
17893 .p2align 4
17894 .Lless_16bytes:
17895 @@ -153,6 +156,7 @@ ENTRY(memcpy)
17896 movq -1*8(%rsi, %rdx), %r9
17897 movq %r8, 0*8(%rdi)
17898 movq %r9, -1*8(%rdi, %rdx)
17899 + pax_force_retaddr
17900 retq
17901 .p2align 4
17902 .Lless_8bytes:
17903 @@ -166,6 +170,7 @@ ENTRY(memcpy)
17904 movl -4(%rsi, %rdx), %r8d
17905 movl %ecx, (%rdi)
17906 movl %r8d, -4(%rdi, %rdx)
17907 + pax_force_retaddr
17908 retq
17909 .p2align 4
17910 .Lless_3bytes:
17911 @@ -183,6 +188,7 @@ ENTRY(memcpy)
17912 jnz .Lloop_1
17913
17914 .Lend:
17915 + pax_force_retaddr
17916 retq
17917 CFI_ENDPROC
17918 ENDPROC(memcpy)
17919 diff -urNp linux-3.0.7/arch/x86/lib/memmove_64.S linux-3.0.7/arch/x86/lib/memmove_64.S
17920 --- linux-3.0.7/arch/x86/lib/memmove_64.S 2011-07-21 22:17:23.000000000 -0400
17921 +++ linux-3.0.7/arch/x86/lib/memmove_64.S 2011-10-06 04:17:55.000000000 -0400
17922 @@ -9,6 +9,7 @@
17923 #include <linux/linkage.h>
17924 #include <asm/dwarf2.h>
17925 #include <asm/cpufeature.h>
17926 +#include <asm/alternative-asm.h>
17927
17928 #undef memmove
17929
17930 @@ -201,6 +202,7 @@ ENTRY(memmove)
17931 movb (%rsi), %r11b
17932 movb %r11b, (%rdi)
17933 13:
17934 + pax_force_retaddr
17935 retq
17936 CFI_ENDPROC
17937
17938 @@ -209,6 +211,7 @@ ENTRY(memmove)
17939 /* Forward moving data. */
17940 movq %rdx, %rcx
17941 rep movsb
17942 + pax_force_retaddr
17943 retq
17944 .Lmemmove_end_forward_efs:
17945 .previous
17946 diff -urNp linux-3.0.7/arch/x86/lib/memset_64.S linux-3.0.7/arch/x86/lib/memset_64.S
17947 --- linux-3.0.7/arch/x86/lib/memset_64.S 2011-07-21 22:17:23.000000000 -0400
17948 +++ linux-3.0.7/arch/x86/lib/memset_64.S 2011-10-06 04:17:55.000000000 -0400
17949 @@ -31,6 +31,7 @@
17950 movl %r8d,%ecx
17951 rep stosb
17952 movq %r9,%rax
17953 + pax_force_retaddr
17954 ret
17955 .Lmemset_e:
17956 .previous
17957 @@ -53,6 +54,7 @@
17958 movl %edx,%ecx
17959 rep stosb
17960 movq %r9,%rax
17961 + pax_force_retaddr
17962 ret
17963 .Lmemset_e_e:
17964 .previous
17965 @@ -121,6 +123,7 @@ ENTRY(__memset)
17966
17967 .Lende:
17968 movq %r10,%rax
17969 + pax_force_retaddr
17970 ret
17971
17972 CFI_RESTORE_STATE
17973 diff -urNp linux-3.0.7/arch/x86/lib/mmx_32.c linux-3.0.7/arch/x86/lib/mmx_32.c
17974 --- linux-3.0.7/arch/x86/lib/mmx_32.c 2011-07-21 22:17:23.000000000 -0400
17975 +++ linux-3.0.7/arch/x86/lib/mmx_32.c 2011-08-23 21:47:55.000000000 -0400
17976 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
17977 {
17978 void *p;
17979 int i;
17980 + unsigned long cr0;
17981
17982 if (unlikely(in_interrupt()))
17983 return __memcpy(to, from, len);
17984 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
17985 kernel_fpu_begin();
17986
17987 __asm__ __volatile__ (
17988 - "1: prefetch (%0)\n" /* This set is 28 bytes */
17989 - " prefetch 64(%0)\n"
17990 - " prefetch 128(%0)\n"
17991 - " prefetch 192(%0)\n"
17992 - " prefetch 256(%0)\n"
17993 + "1: prefetch (%1)\n" /* This set is 28 bytes */
17994 + " prefetch 64(%1)\n"
17995 + " prefetch 128(%1)\n"
17996 + " prefetch 192(%1)\n"
17997 + " prefetch 256(%1)\n"
17998 "2: \n"
17999 ".section .fixup, \"ax\"\n"
18000 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18001 + "3: \n"
18002 +
18003 +#ifdef CONFIG_PAX_KERNEXEC
18004 + " movl %%cr0, %0\n"
18005 + " movl %0, %%eax\n"
18006 + " andl $0xFFFEFFFF, %%eax\n"
18007 + " movl %%eax, %%cr0\n"
18008 +#endif
18009 +
18010 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18011 +
18012 +#ifdef CONFIG_PAX_KERNEXEC
18013 + " movl %0, %%cr0\n"
18014 +#endif
18015 +
18016 " jmp 2b\n"
18017 ".previous\n"
18018 _ASM_EXTABLE(1b, 3b)
18019 - : : "r" (from));
18020 + : "=&r" (cr0) : "r" (from) : "ax");
18021
18022 for ( ; i > 5; i--) {
18023 __asm__ __volatile__ (
18024 - "1: prefetch 320(%0)\n"
18025 - "2: movq (%0), %%mm0\n"
18026 - " movq 8(%0), %%mm1\n"
18027 - " movq 16(%0), %%mm2\n"
18028 - " movq 24(%0), %%mm3\n"
18029 - " movq %%mm0, (%1)\n"
18030 - " movq %%mm1, 8(%1)\n"
18031 - " movq %%mm2, 16(%1)\n"
18032 - " movq %%mm3, 24(%1)\n"
18033 - " movq 32(%0), %%mm0\n"
18034 - " movq 40(%0), %%mm1\n"
18035 - " movq 48(%0), %%mm2\n"
18036 - " movq 56(%0), %%mm3\n"
18037 - " movq %%mm0, 32(%1)\n"
18038 - " movq %%mm1, 40(%1)\n"
18039 - " movq %%mm2, 48(%1)\n"
18040 - " movq %%mm3, 56(%1)\n"
18041 + "1: prefetch 320(%1)\n"
18042 + "2: movq (%1), %%mm0\n"
18043 + " movq 8(%1), %%mm1\n"
18044 + " movq 16(%1), %%mm2\n"
18045 + " movq 24(%1), %%mm3\n"
18046 + " movq %%mm0, (%2)\n"
18047 + " movq %%mm1, 8(%2)\n"
18048 + " movq %%mm2, 16(%2)\n"
18049 + " movq %%mm3, 24(%2)\n"
18050 + " movq 32(%1), %%mm0\n"
18051 + " movq 40(%1), %%mm1\n"
18052 + " movq 48(%1), %%mm2\n"
18053 + " movq 56(%1), %%mm3\n"
18054 + " movq %%mm0, 32(%2)\n"
18055 + " movq %%mm1, 40(%2)\n"
18056 + " movq %%mm2, 48(%2)\n"
18057 + " movq %%mm3, 56(%2)\n"
18058 ".section .fixup, \"ax\"\n"
18059 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18060 + "3:\n"
18061 +
18062 +#ifdef CONFIG_PAX_KERNEXEC
18063 + " movl %%cr0, %0\n"
18064 + " movl %0, %%eax\n"
18065 + " andl $0xFFFEFFFF, %%eax\n"
18066 + " movl %%eax, %%cr0\n"
18067 +#endif
18068 +
18069 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18070 +
18071 +#ifdef CONFIG_PAX_KERNEXEC
18072 + " movl %0, %%cr0\n"
18073 +#endif
18074 +
18075 " jmp 2b\n"
18076 ".previous\n"
18077 _ASM_EXTABLE(1b, 3b)
18078 - : : "r" (from), "r" (to) : "memory");
18079 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18080
18081 from += 64;
18082 to += 64;
18083 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
18084 static void fast_copy_page(void *to, void *from)
18085 {
18086 int i;
18087 + unsigned long cr0;
18088
18089 kernel_fpu_begin();
18090
18091 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
18092 * but that is for later. -AV
18093 */
18094 __asm__ __volatile__(
18095 - "1: prefetch (%0)\n"
18096 - " prefetch 64(%0)\n"
18097 - " prefetch 128(%0)\n"
18098 - " prefetch 192(%0)\n"
18099 - " prefetch 256(%0)\n"
18100 + "1: prefetch (%1)\n"
18101 + " prefetch 64(%1)\n"
18102 + " prefetch 128(%1)\n"
18103 + " prefetch 192(%1)\n"
18104 + " prefetch 256(%1)\n"
18105 "2: \n"
18106 ".section .fixup, \"ax\"\n"
18107 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18108 + "3: \n"
18109 +
18110 +#ifdef CONFIG_PAX_KERNEXEC
18111 + " movl %%cr0, %0\n"
18112 + " movl %0, %%eax\n"
18113 + " andl $0xFFFEFFFF, %%eax\n"
18114 + " movl %%eax, %%cr0\n"
18115 +#endif
18116 +
18117 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18118 +
18119 +#ifdef CONFIG_PAX_KERNEXEC
18120 + " movl %0, %%cr0\n"
18121 +#endif
18122 +
18123 " jmp 2b\n"
18124 ".previous\n"
18125 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
18126 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18127
18128 for (i = 0; i < (4096-320)/64; i++) {
18129 __asm__ __volatile__ (
18130 - "1: prefetch 320(%0)\n"
18131 - "2: movq (%0), %%mm0\n"
18132 - " movntq %%mm0, (%1)\n"
18133 - " movq 8(%0), %%mm1\n"
18134 - " movntq %%mm1, 8(%1)\n"
18135 - " movq 16(%0), %%mm2\n"
18136 - " movntq %%mm2, 16(%1)\n"
18137 - " movq 24(%0), %%mm3\n"
18138 - " movntq %%mm3, 24(%1)\n"
18139 - " movq 32(%0), %%mm4\n"
18140 - " movntq %%mm4, 32(%1)\n"
18141 - " movq 40(%0), %%mm5\n"
18142 - " movntq %%mm5, 40(%1)\n"
18143 - " movq 48(%0), %%mm6\n"
18144 - " movntq %%mm6, 48(%1)\n"
18145 - " movq 56(%0), %%mm7\n"
18146 - " movntq %%mm7, 56(%1)\n"
18147 + "1: prefetch 320(%1)\n"
18148 + "2: movq (%1), %%mm0\n"
18149 + " movntq %%mm0, (%2)\n"
18150 + " movq 8(%1), %%mm1\n"
18151 + " movntq %%mm1, 8(%2)\n"
18152 + " movq 16(%1), %%mm2\n"
18153 + " movntq %%mm2, 16(%2)\n"
18154 + " movq 24(%1), %%mm3\n"
18155 + " movntq %%mm3, 24(%2)\n"
18156 + " movq 32(%1), %%mm4\n"
18157 + " movntq %%mm4, 32(%2)\n"
18158 + " movq 40(%1), %%mm5\n"
18159 + " movntq %%mm5, 40(%2)\n"
18160 + " movq 48(%1), %%mm6\n"
18161 + " movntq %%mm6, 48(%2)\n"
18162 + " movq 56(%1), %%mm7\n"
18163 + " movntq %%mm7, 56(%2)\n"
18164 ".section .fixup, \"ax\"\n"
18165 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18166 + "3:\n"
18167 +
18168 +#ifdef CONFIG_PAX_KERNEXEC
18169 + " movl %%cr0, %0\n"
18170 + " movl %0, %%eax\n"
18171 + " andl $0xFFFEFFFF, %%eax\n"
18172 + " movl %%eax, %%cr0\n"
18173 +#endif
18174 +
18175 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18176 +
18177 +#ifdef CONFIG_PAX_KERNEXEC
18178 + " movl %0, %%cr0\n"
18179 +#endif
18180 +
18181 " jmp 2b\n"
18182 ".previous\n"
18183 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
18184 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18185
18186 from += 64;
18187 to += 64;
18188 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
18189 static void fast_copy_page(void *to, void *from)
18190 {
18191 int i;
18192 + unsigned long cr0;
18193
18194 kernel_fpu_begin();
18195
18196 __asm__ __volatile__ (
18197 - "1: prefetch (%0)\n"
18198 - " prefetch 64(%0)\n"
18199 - " prefetch 128(%0)\n"
18200 - " prefetch 192(%0)\n"
18201 - " prefetch 256(%0)\n"
18202 + "1: prefetch (%1)\n"
18203 + " prefetch 64(%1)\n"
18204 + " prefetch 128(%1)\n"
18205 + " prefetch 192(%1)\n"
18206 + " prefetch 256(%1)\n"
18207 "2: \n"
18208 ".section .fixup, \"ax\"\n"
18209 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18210 + "3: \n"
18211 +
18212 +#ifdef CONFIG_PAX_KERNEXEC
18213 + " movl %%cr0, %0\n"
18214 + " movl %0, %%eax\n"
18215 + " andl $0xFFFEFFFF, %%eax\n"
18216 + " movl %%eax, %%cr0\n"
18217 +#endif
18218 +
18219 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18220 +
18221 +#ifdef CONFIG_PAX_KERNEXEC
18222 + " movl %0, %%cr0\n"
18223 +#endif
18224 +
18225 " jmp 2b\n"
18226 ".previous\n"
18227 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
18228 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18229
18230 for (i = 0; i < 4096/64; i++) {
18231 __asm__ __volatile__ (
18232 - "1: prefetch 320(%0)\n"
18233 - "2: movq (%0), %%mm0\n"
18234 - " movq 8(%0), %%mm1\n"
18235 - " movq 16(%0), %%mm2\n"
18236 - " movq 24(%0), %%mm3\n"
18237 - " movq %%mm0, (%1)\n"
18238 - " movq %%mm1, 8(%1)\n"
18239 - " movq %%mm2, 16(%1)\n"
18240 - " movq %%mm3, 24(%1)\n"
18241 - " movq 32(%0), %%mm0\n"
18242 - " movq 40(%0), %%mm1\n"
18243 - " movq 48(%0), %%mm2\n"
18244 - " movq 56(%0), %%mm3\n"
18245 - " movq %%mm0, 32(%1)\n"
18246 - " movq %%mm1, 40(%1)\n"
18247 - " movq %%mm2, 48(%1)\n"
18248 - " movq %%mm3, 56(%1)\n"
18249 + "1: prefetch 320(%1)\n"
18250 + "2: movq (%1), %%mm0\n"
18251 + " movq 8(%1), %%mm1\n"
18252 + " movq 16(%1), %%mm2\n"
18253 + " movq 24(%1), %%mm3\n"
18254 + " movq %%mm0, (%2)\n"
18255 + " movq %%mm1, 8(%2)\n"
18256 + " movq %%mm2, 16(%2)\n"
18257 + " movq %%mm3, 24(%2)\n"
18258 + " movq 32(%1), %%mm0\n"
18259 + " movq 40(%1), %%mm1\n"
18260 + " movq 48(%1), %%mm2\n"
18261 + " movq 56(%1), %%mm3\n"
18262 + " movq %%mm0, 32(%2)\n"
18263 + " movq %%mm1, 40(%2)\n"
18264 + " movq %%mm2, 48(%2)\n"
18265 + " movq %%mm3, 56(%2)\n"
18266 ".section .fixup, \"ax\"\n"
18267 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18268 + "3:\n"
18269 +
18270 +#ifdef CONFIG_PAX_KERNEXEC
18271 + " movl %%cr0, %0\n"
18272 + " movl %0, %%eax\n"
18273 + " andl $0xFFFEFFFF, %%eax\n"
18274 + " movl %%eax, %%cr0\n"
18275 +#endif
18276 +
18277 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18278 +
18279 +#ifdef CONFIG_PAX_KERNEXEC
18280 + " movl %0, %%cr0\n"
18281 +#endif
18282 +
18283 " jmp 2b\n"
18284 ".previous\n"
18285 _ASM_EXTABLE(1b, 3b)
18286 - : : "r" (from), "r" (to) : "memory");
18287 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18288
18289 from += 64;
18290 to += 64;
18291 diff -urNp linux-3.0.7/arch/x86/lib/msr-reg.S linux-3.0.7/arch/x86/lib/msr-reg.S
18292 --- linux-3.0.7/arch/x86/lib/msr-reg.S 2011-07-21 22:17:23.000000000 -0400
18293 +++ linux-3.0.7/arch/x86/lib/msr-reg.S 2011-10-07 19:07:28.000000000 -0400
18294 @@ -3,6 +3,7 @@
18295 #include <asm/dwarf2.h>
18296 #include <asm/asm.h>
18297 #include <asm/msr.h>
18298 +#include <asm/alternative-asm.h>
18299
18300 #ifdef CONFIG_X86_64
18301 /*
18302 @@ -37,6 +38,7 @@ ENTRY(native_\op\()_safe_regs)
18303 movl %edi, 28(%r10)
18304 popq_cfi %rbp
18305 popq_cfi %rbx
18306 + pax_force_retaddr
18307 ret
18308 3:
18309 CFI_RESTORE_STATE
18310 diff -urNp linux-3.0.7/arch/x86/lib/putuser.S linux-3.0.7/arch/x86/lib/putuser.S
18311 --- linux-3.0.7/arch/x86/lib/putuser.S 2011-07-21 22:17:23.000000000 -0400
18312 +++ linux-3.0.7/arch/x86/lib/putuser.S 2011-10-07 19:07:23.000000000 -0400
18313 @@ -15,7 +15,9 @@
18314 #include <asm/thread_info.h>
18315 #include <asm/errno.h>
18316 #include <asm/asm.h>
18317 -
18318 +#include <asm/segment.h>
18319 +#include <asm/pgtable.h>
18320 +#include <asm/alternative-asm.h>
18321
18322 /*
18323 * __put_user_X
18324 @@ -29,52 +31,119 @@
18325 * as they get called from within inline assembly.
18326 */
18327
18328 -#define ENTER CFI_STARTPROC ; \
18329 - GET_THREAD_INFO(%_ASM_BX)
18330 -#define EXIT ret ; \
18331 +#define ENTER CFI_STARTPROC
18332 +#define EXIT pax_force_retaddr; ret ; \
18333 CFI_ENDPROC
18334
18335 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18336 +#define _DEST %_ASM_CX,%_ASM_BX
18337 +#else
18338 +#define _DEST %_ASM_CX
18339 +#endif
18340 +
18341 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18342 +#define __copyuser_seg gs;
18343 +#else
18344 +#define __copyuser_seg
18345 +#endif
18346 +
18347 .text
18348 ENTRY(__put_user_1)
18349 ENTER
18350 +
18351 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18352 + GET_THREAD_INFO(%_ASM_BX)
18353 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
18354 jae bad_put_user
18355 -1: movb %al,(%_ASM_CX)
18356 +
18357 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18358 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18359 + cmp %_ASM_BX,%_ASM_CX
18360 + jb 1234f
18361 + xor %ebx,%ebx
18362 +1234:
18363 +#endif
18364 +
18365 +#endif
18366 +
18367 +1: __copyuser_seg movb %al,(_DEST)
18368 xor %eax,%eax
18369 EXIT
18370 ENDPROC(__put_user_1)
18371
18372 ENTRY(__put_user_2)
18373 ENTER
18374 +
18375 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18376 + GET_THREAD_INFO(%_ASM_BX)
18377 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18378 sub $1,%_ASM_BX
18379 cmp %_ASM_BX,%_ASM_CX
18380 jae bad_put_user
18381 -2: movw %ax,(%_ASM_CX)
18382 +
18383 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18384 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18385 + cmp %_ASM_BX,%_ASM_CX
18386 + jb 1234f
18387 + xor %ebx,%ebx
18388 +1234:
18389 +#endif
18390 +
18391 +#endif
18392 +
18393 +2: __copyuser_seg movw %ax,(_DEST)
18394 xor %eax,%eax
18395 EXIT
18396 ENDPROC(__put_user_2)
18397
18398 ENTRY(__put_user_4)
18399 ENTER
18400 +
18401 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18402 + GET_THREAD_INFO(%_ASM_BX)
18403 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18404 sub $3,%_ASM_BX
18405 cmp %_ASM_BX,%_ASM_CX
18406 jae bad_put_user
18407 -3: movl %eax,(%_ASM_CX)
18408 +
18409 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18410 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18411 + cmp %_ASM_BX,%_ASM_CX
18412 + jb 1234f
18413 + xor %ebx,%ebx
18414 +1234:
18415 +#endif
18416 +
18417 +#endif
18418 +
18419 +3: __copyuser_seg movl %eax,(_DEST)
18420 xor %eax,%eax
18421 EXIT
18422 ENDPROC(__put_user_4)
18423
18424 ENTRY(__put_user_8)
18425 ENTER
18426 +
18427 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18428 + GET_THREAD_INFO(%_ASM_BX)
18429 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18430 sub $7,%_ASM_BX
18431 cmp %_ASM_BX,%_ASM_CX
18432 jae bad_put_user
18433 -4: mov %_ASM_AX,(%_ASM_CX)
18434 +
18435 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18436 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18437 + cmp %_ASM_BX,%_ASM_CX
18438 + jb 1234f
18439 + xor %ebx,%ebx
18440 +1234:
18441 +#endif
18442 +
18443 +#endif
18444 +
18445 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
18446 #ifdef CONFIG_X86_32
18447 -5: movl %edx,4(%_ASM_CX)
18448 +5: __copyuser_seg movl %edx,4(_DEST)
18449 #endif
18450 xor %eax,%eax
18451 EXIT
18452 diff -urNp linux-3.0.7/arch/x86/lib/rwlock_64.S linux-3.0.7/arch/x86/lib/rwlock_64.S
18453 --- linux-3.0.7/arch/x86/lib/rwlock_64.S 2011-07-21 22:17:23.000000000 -0400
18454 +++ linux-3.0.7/arch/x86/lib/rwlock_64.S 2011-10-06 04:17:55.000000000 -0400
18455 @@ -17,6 +17,7 @@ ENTRY(__write_lock_failed)
18456 LOCK_PREFIX
18457 subl $RW_LOCK_BIAS,(%rdi)
18458 jnz __write_lock_failed
18459 + pax_force_retaddr
18460 ret
18461 CFI_ENDPROC
18462 END(__write_lock_failed)
18463 @@ -33,6 +34,7 @@ ENTRY(__read_lock_failed)
18464 LOCK_PREFIX
18465 decl (%rdi)
18466 js __read_lock_failed
18467 + pax_force_retaddr
18468 ret
18469 CFI_ENDPROC
18470 END(__read_lock_failed)
18471 diff -urNp linux-3.0.7/arch/x86/lib/rwsem_64.S linux-3.0.7/arch/x86/lib/rwsem_64.S
18472 --- linux-3.0.7/arch/x86/lib/rwsem_64.S 2011-07-21 22:17:23.000000000 -0400
18473 +++ linux-3.0.7/arch/x86/lib/rwsem_64.S 2011-10-07 10:46:47.000000000 -0400
18474 @@ -51,6 +51,7 @@ ENTRY(call_rwsem_down_read_failed)
18475 popq_cfi %rdx
18476 CFI_RESTORE rdx
18477 restore_common_regs
18478 + pax_force_retaddr
18479 ret
18480 CFI_ENDPROC
18481 ENDPROC(call_rwsem_down_read_failed)
18482 @@ -61,6 +62,7 @@ ENTRY(call_rwsem_down_write_failed)
18483 movq %rax,%rdi
18484 call rwsem_down_write_failed
18485 restore_common_regs
18486 + pax_force_retaddr
18487 ret
18488 CFI_ENDPROC
18489 ENDPROC(call_rwsem_down_write_failed)
18490 @@ -73,7 +75,8 @@ ENTRY(call_rwsem_wake)
18491 movq %rax,%rdi
18492 call rwsem_wake
18493 restore_common_regs
18494 -1: ret
18495 +1: pax_force_retaddr
18496 + ret
18497 CFI_ENDPROC
18498 ENDPROC(call_rwsem_wake)
18499
18500 @@ -88,6 +91,7 @@ ENTRY(call_rwsem_downgrade_wake)
18501 popq_cfi %rdx
18502 CFI_RESTORE rdx
18503 restore_common_regs
18504 + pax_force_retaddr
18505 ret
18506 CFI_ENDPROC
18507 ENDPROC(call_rwsem_downgrade_wake)
18508 diff -urNp linux-3.0.7/arch/x86/lib/thunk_64.S linux-3.0.7/arch/x86/lib/thunk_64.S
18509 --- linux-3.0.7/arch/x86/lib/thunk_64.S 2011-07-21 22:17:23.000000000 -0400
18510 +++ linux-3.0.7/arch/x86/lib/thunk_64.S 2011-10-06 04:17:55.000000000 -0400
18511 @@ -10,7 +10,8 @@
18512 #include <asm/dwarf2.h>
18513 #include <asm/calling.h>
18514 #include <asm/rwlock.h>
18515 -
18516 + #include <asm/alternative-asm.h>
18517 +
18518 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
18519 .macro thunk name,func
18520 .globl \name
18521 @@ -50,5 +51,6 @@
18522 SAVE_ARGS
18523 restore:
18524 RESTORE_ARGS
18525 - ret
18526 + pax_force_retaddr
18527 + ret
18528 CFI_ENDPROC
18529 diff -urNp linux-3.0.7/arch/x86/lib/usercopy_32.c linux-3.0.7/arch/x86/lib/usercopy_32.c
18530 --- linux-3.0.7/arch/x86/lib/usercopy_32.c 2011-07-21 22:17:23.000000000 -0400
18531 +++ linux-3.0.7/arch/x86/lib/usercopy_32.c 2011-08-23 21:47:55.000000000 -0400
18532 @@ -43,7 +43,7 @@ do { \
18533 __asm__ __volatile__( \
18534 " testl %1,%1\n" \
18535 " jz 2f\n" \
18536 - "0: lodsb\n" \
18537 + "0: "__copyuser_seg"lodsb\n" \
18538 " stosb\n" \
18539 " testb %%al,%%al\n" \
18540 " jz 1f\n" \
18541 @@ -128,10 +128,12 @@ do { \
18542 int __d0; \
18543 might_fault(); \
18544 __asm__ __volatile__( \
18545 + __COPYUSER_SET_ES \
18546 "0: rep; stosl\n" \
18547 " movl %2,%0\n" \
18548 "1: rep; stosb\n" \
18549 "2:\n" \
18550 + __COPYUSER_RESTORE_ES \
18551 ".section .fixup,\"ax\"\n" \
18552 "3: lea 0(%2,%0,4),%0\n" \
18553 " jmp 2b\n" \
18554 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
18555 might_fault();
18556
18557 __asm__ __volatile__(
18558 + __COPYUSER_SET_ES
18559 " testl %0, %0\n"
18560 " jz 3f\n"
18561 " andl %0,%%ecx\n"
18562 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
18563 " subl %%ecx,%0\n"
18564 " addl %0,%%eax\n"
18565 "1:\n"
18566 + __COPYUSER_RESTORE_ES
18567 ".section .fixup,\"ax\"\n"
18568 "2: xorl %%eax,%%eax\n"
18569 " jmp 1b\n"
18570 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
18571
18572 #ifdef CONFIG_X86_INTEL_USERCOPY
18573 static unsigned long
18574 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
18575 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
18576 {
18577 int d0, d1;
18578 __asm__ __volatile__(
18579 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
18580 " .align 2,0x90\n"
18581 "3: movl 0(%4), %%eax\n"
18582 "4: movl 4(%4), %%edx\n"
18583 - "5: movl %%eax, 0(%3)\n"
18584 - "6: movl %%edx, 4(%3)\n"
18585 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
18586 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
18587 "7: movl 8(%4), %%eax\n"
18588 "8: movl 12(%4),%%edx\n"
18589 - "9: movl %%eax, 8(%3)\n"
18590 - "10: movl %%edx, 12(%3)\n"
18591 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
18592 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
18593 "11: movl 16(%4), %%eax\n"
18594 "12: movl 20(%4), %%edx\n"
18595 - "13: movl %%eax, 16(%3)\n"
18596 - "14: movl %%edx, 20(%3)\n"
18597 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
18598 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
18599 "15: movl 24(%4), %%eax\n"
18600 "16: movl 28(%4), %%edx\n"
18601 - "17: movl %%eax, 24(%3)\n"
18602 - "18: movl %%edx, 28(%3)\n"
18603 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
18604 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
18605 "19: movl 32(%4), %%eax\n"
18606 "20: movl 36(%4), %%edx\n"
18607 - "21: movl %%eax, 32(%3)\n"
18608 - "22: movl %%edx, 36(%3)\n"
18609 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
18610 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
18611 "23: movl 40(%4), %%eax\n"
18612 "24: movl 44(%4), %%edx\n"
18613 - "25: movl %%eax, 40(%3)\n"
18614 - "26: movl %%edx, 44(%3)\n"
18615 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
18616 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
18617 "27: movl 48(%4), %%eax\n"
18618 "28: movl 52(%4), %%edx\n"
18619 - "29: movl %%eax, 48(%3)\n"
18620 - "30: movl %%edx, 52(%3)\n"
18621 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
18622 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
18623 "31: movl 56(%4), %%eax\n"
18624 "32: movl 60(%4), %%edx\n"
18625 - "33: movl %%eax, 56(%3)\n"
18626 - "34: movl %%edx, 60(%3)\n"
18627 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
18628 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
18629 " addl $-64, %0\n"
18630 " addl $64, %4\n"
18631 " addl $64, %3\n"
18632 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
18633 " shrl $2, %0\n"
18634 " andl $3, %%eax\n"
18635 " cld\n"
18636 + __COPYUSER_SET_ES
18637 "99: rep; movsl\n"
18638 "36: movl %%eax, %0\n"
18639 "37: rep; movsb\n"
18640 "100:\n"
18641 + __COPYUSER_RESTORE_ES
18642 + ".section .fixup,\"ax\"\n"
18643 + "101: lea 0(%%eax,%0,4),%0\n"
18644 + " jmp 100b\n"
18645 + ".previous\n"
18646 + ".section __ex_table,\"a\"\n"
18647 + " .align 4\n"
18648 + " .long 1b,100b\n"
18649 + " .long 2b,100b\n"
18650 + " .long 3b,100b\n"
18651 + " .long 4b,100b\n"
18652 + " .long 5b,100b\n"
18653 + " .long 6b,100b\n"
18654 + " .long 7b,100b\n"
18655 + " .long 8b,100b\n"
18656 + " .long 9b,100b\n"
18657 + " .long 10b,100b\n"
18658 + " .long 11b,100b\n"
18659 + " .long 12b,100b\n"
18660 + " .long 13b,100b\n"
18661 + " .long 14b,100b\n"
18662 + " .long 15b,100b\n"
18663 + " .long 16b,100b\n"
18664 + " .long 17b,100b\n"
18665 + " .long 18b,100b\n"
18666 + " .long 19b,100b\n"
18667 + " .long 20b,100b\n"
18668 + " .long 21b,100b\n"
18669 + " .long 22b,100b\n"
18670 + " .long 23b,100b\n"
18671 + " .long 24b,100b\n"
18672 + " .long 25b,100b\n"
18673 + " .long 26b,100b\n"
18674 + " .long 27b,100b\n"
18675 + " .long 28b,100b\n"
18676 + " .long 29b,100b\n"
18677 + " .long 30b,100b\n"
18678 + " .long 31b,100b\n"
18679 + " .long 32b,100b\n"
18680 + " .long 33b,100b\n"
18681 + " .long 34b,100b\n"
18682 + " .long 35b,100b\n"
18683 + " .long 36b,100b\n"
18684 + " .long 37b,100b\n"
18685 + " .long 99b,101b\n"
18686 + ".previous"
18687 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
18688 + : "1"(to), "2"(from), "0"(size)
18689 + : "eax", "edx", "memory");
18690 + return size;
18691 +}
18692 +
18693 +static unsigned long
18694 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
18695 +{
18696 + int d0, d1;
18697 + __asm__ __volatile__(
18698 + " .align 2,0x90\n"
18699 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
18700 + " cmpl $67, %0\n"
18701 + " jbe 3f\n"
18702 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
18703 + " .align 2,0x90\n"
18704 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
18705 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
18706 + "5: movl %%eax, 0(%3)\n"
18707 + "6: movl %%edx, 4(%3)\n"
18708 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
18709 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
18710 + "9: movl %%eax, 8(%3)\n"
18711 + "10: movl %%edx, 12(%3)\n"
18712 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
18713 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
18714 + "13: movl %%eax, 16(%3)\n"
18715 + "14: movl %%edx, 20(%3)\n"
18716 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
18717 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
18718 + "17: movl %%eax, 24(%3)\n"
18719 + "18: movl %%edx, 28(%3)\n"
18720 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
18721 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
18722 + "21: movl %%eax, 32(%3)\n"
18723 + "22: movl %%edx, 36(%3)\n"
18724 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
18725 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
18726 + "25: movl %%eax, 40(%3)\n"
18727 + "26: movl %%edx, 44(%3)\n"
18728 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
18729 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
18730 + "29: movl %%eax, 48(%3)\n"
18731 + "30: movl %%edx, 52(%3)\n"
18732 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
18733 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
18734 + "33: movl %%eax, 56(%3)\n"
18735 + "34: movl %%edx, 60(%3)\n"
18736 + " addl $-64, %0\n"
18737 + " addl $64, %4\n"
18738 + " addl $64, %3\n"
18739 + " cmpl $63, %0\n"
18740 + " ja 1b\n"
18741 + "35: movl %0, %%eax\n"
18742 + " shrl $2, %0\n"
18743 + " andl $3, %%eax\n"
18744 + " cld\n"
18745 + "99: rep; "__copyuser_seg" movsl\n"
18746 + "36: movl %%eax, %0\n"
18747 + "37: rep; "__copyuser_seg" movsb\n"
18748 + "100:\n"
18749 ".section .fixup,\"ax\"\n"
18750 "101: lea 0(%%eax,%0,4),%0\n"
18751 " jmp 100b\n"
18752 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
18753 int d0, d1;
18754 __asm__ __volatile__(
18755 " .align 2,0x90\n"
18756 - "0: movl 32(%4), %%eax\n"
18757 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18758 " cmpl $67, %0\n"
18759 " jbe 2f\n"
18760 - "1: movl 64(%4), %%eax\n"
18761 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18762 " .align 2,0x90\n"
18763 - "2: movl 0(%4), %%eax\n"
18764 - "21: movl 4(%4), %%edx\n"
18765 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18766 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18767 " movl %%eax, 0(%3)\n"
18768 " movl %%edx, 4(%3)\n"
18769 - "3: movl 8(%4), %%eax\n"
18770 - "31: movl 12(%4),%%edx\n"
18771 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18772 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18773 " movl %%eax, 8(%3)\n"
18774 " movl %%edx, 12(%3)\n"
18775 - "4: movl 16(%4), %%eax\n"
18776 - "41: movl 20(%4), %%edx\n"
18777 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18778 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18779 " movl %%eax, 16(%3)\n"
18780 " movl %%edx, 20(%3)\n"
18781 - "10: movl 24(%4), %%eax\n"
18782 - "51: movl 28(%4), %%edx\n"
18783 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18784 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18785 " movl %%eax, 24(%3)\n"
18786 " movl %%edx, 28(%3)\n"
18787 - "11: movl 32(%4), %%eax\n"
18788 - "61: movl 36(%4), %%edx\n"
18789 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18790 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18791 " movl %%eax, 32(%3)\n"
18792 " movl %%edx, 36(%3)\n"
18793 - "12: movl 40(%4), %%eax\n"
18794 - "71: movl 44(%4), %%edx\n"
18795 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18796 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18797 " movl %%eax, 40(%3)\n"
18798 " movl %%edx, 44(%3)\n"
18799 - "13: movl 48(%4), %%eax\n"
18800 - "81: movl 52(%4), %%edx\n"
18801 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18802 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18803 " movl %%eax, 48(%3)\n"
18804 " movl %%edx, 52(%3)\n"
18805 - "14: movl 56(%4), %%eax\n"
18806 - "91: movl 60(%4), %%edx\n"
18807 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18808 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18809 " movl %%eax, 56(%3)\n"
18810 " movl %%edx, 60(%3)\n"
18811 " addl $-64, %0\n"
18812 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
18813 " shrl $2, %0\n"
18814 " andl $3, %%eax\n"
18815 " cld\n"
18816 - "6: rep; movsl\n"
18817 + "6: rep; "__copyuser_seg" movsl\n"
18818 " movl %%eax,%0\n"
18819 - "7: rep; movsb\n"
18820 + "7: rep; "__copyuser_seg" movsb\n"
18821 "8:\n"
18822 ".section .fixup,\"ax\"\n"
18823 "9: lea 0(%%eax,%0,4),%0\n"
18824 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
18825
18826 __asm__ __volatile__(
18827 " .align 2,0x90\n"
18828 - "0: movl 32(%4), %%eax\n"
18829 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18830 " cmpl $67, %0\n"
18831 " jbe 2f\n"
18832 - "1: movl 64(%4), %%eax\n"
18833 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18834 " .align 2,0x90\n"
18835 - "2: movl 0(%4), %%eax\n"
18836 - "21: movl 4(%4), %%edx\n"
18837 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18838 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18839 " movnti %%eax, 0(%3)\n"
18840 " movnti %%edx, 4(%3)\n"
18841 - "3: movl 8(%4), %%eax\n"
18842 - "31: movl 12(%4),%%edx\n"
18843 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18844 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18845 " movnti %%eax, 8(%3)\n"
18846 " movnti %%edx, 12(%3)\n"
18847 - "4: movl 16(%4), %%eax\n"
18848 - "41: movl 20(%4), %%edx\n"
18849 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18850 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18851 " movnti %%eax, 16(%3)\n"
18852 " movnti %%edx, 20(%3)\n"
18853 - "10: movl 24(%4), %%eax\n"
18854 - "51: movl 28(%4), %%edx\n"
18855 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18856 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18857 " movnti %%eax, 24(%3)\n"
18858 " movnti %%edx, 28(%3)\n"
18859 - "11: movl 32(%4), %%eax\n"
18860 - "61: movl 36(%4), %%edx\n"
18861 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18862 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18863 " movnti %%eax, 32(%3)\n"
18864 " movnti %%edx, 36(%3)\n"
18865 - "12: movl 40(%4), %%eax\n"
18866 - "71: movl 44(%4), %%edx\n"
18867 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18868 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18869 " movnti %%eax, 40(%3)\n"
18870 " movnti %%edx, 44(%3)\n"
18871 - "13: movl 48(%4), %%eax\n"
18872 - "81: movl 52(%4), %%edx\n"
18873 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18874 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18875 " movnti %%eax, 48(%3)\n"
18876 " movnti %%edx, 52(%3)\n"
18877 - "14: movl 56(%4), %%eax\n"
18878 - "91: movl 60(%4), %%edx\n"
18879 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18880 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18881 " movnti %%eax, 56(%3)\n"
18882 " movnti %%edx, 60(%3)\n"
18883 " addl $-64, %0\n"
18884 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
18885 " shrl $2, %0\n"
18886 " andl $3, %%eax\n"
18887 " cld\n"
18888 - "6: rep; movsl\n"
18889 + "6: rep; "__copyuser_seg" movsl\n"
18890 " movl %%eax,%0\n"
18891 - "7: rep; movsb\n"
18892 + "7: rep; "__copyuser_seg" movsb\n"
18893 "8:\n"
18894 ".section .fixup,\"ax\"\n"
18895 "9: lea 0(%%eax,%0,4),%0\n"
18896 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
18897
18898 __asm__ __volatile__(
18899 " .align 2,0x90\n"
18900 - "0: movl 32(%4), %%eax\n"
18901 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18902 " cmpl $67, %0\n"
18903 " jbe 2f\n"
18904 - "1: movl 64(%4), %%eax\n"
18905 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18906 " .align 2,0x90\n"
18907 - "2: movl 0(%4), %%eax\n"
18908 - "21: movl 4(%4), %%edx\n"
18909 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18910 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18911 " movnti %%eax, 0(%3)\n"
18912 " movnti %%edx, 4(%3)\n"
18913 - "3: movl 8(%4), %%eax\n"
18914 - "31: movl 12(%4),%%edx\n"
18915 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18916 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18917 " movnti %%eax, 8(%3)\n"
18918 " movnti %%edx, 12(%3)\n"
18919 - "4: movl 16(%4), %%eax\n"
18920 - "41: movl 20(%4), %%edx\n"
18921 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18922 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18923 " movnti %%eax, 16(%3)\n"
18924 " movnti %%edx, 20(%3)\n"
18925 - "10: movl 24(%4), %%eax\n"
18926 - "51: movl 28(%4), %%edx\n"
18927 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18928 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18929 " movnti %%eax, 24(%3)\n"
18930 " movnti %%edx, 28(%3)\n"
18931 - "11: movl 32(%4), %%eax\n"
18932 - "61: movl 36(%4), %%edx\n"
18933 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18934 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18935 " movnti %%eax, 32(%3)\n"
18936 " movnti %%edx, 36(%3)\n"
18937 - "12: movl 40(%4), %%eax\n"
18938 - "71: movl 44(%4), %%edx\n"
18939 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18940 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18941 " movnti %%eax, 40(%3)\n"
18942 " movnti %%edx, 44(%3)\n"
18943 - "13: movl 48(%4), %%eax\n"
18944 - "81: movl 52(%4), %%edx\n"
18945 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18946 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18947 " movnti %%eax, 48(%3)\n"
18948 " movnti %%edx, 52(%3)\n"
18949 - "14: movl 56(%4), %%eax\n"
18950 - "91: movl 60(%4), %%edx\n"
18951 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18952 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18953 " movnti %%eax, 56(%3)\n"
18954 " movnti %%edx, 60(%3)\n"
18955 " addl $-64, %0\n"
18956 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
18957 " shrl $2, %0\n"
18958 " andl $3, %%eax\n"
18959 " cld\n"
18960 - "6: rep; movsl\n"
18961 + "6: rep; "__copyuser_seg" movsl\n"
18962 " movl %%eax,%0\n"
18963 - "7: rep; movsb\n"
18964 + "7: rep; "__copyuser_seg" movsb\n"
18965 "8:\n"
18966 ".section .fixup,\"ax\"\n"
18967 "9: lea 0(%%eax,%0,4),%0\n"
18968 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
18969 */
18970 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
18971 unsigned long size);
18972 -unsigned long __copy_user_intel(void __user *to, const void *from,
18973 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
18974 + unsigned long size);
18975 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
18976 unsigned long size);
18977 unsigned long __copy_user_zeroing_intel_nocache(void *to,
18978 const void __user *from, unsigned long size);
18979 #endif /* CONFIG_X86_INTEL_USERCOPY */
18980
18981 /* Generic arbitrary sized copy. */
18982 -#define __copy_user(to, from, size) \
18983 +#define __copy_user(to, from, size, prefix, set, restore) \
18984 do { \
18985 int __d0, __d1, __d2; \
18986 __asm__ __volatile__( \
18987 + set \
18988 " cmp $7,%0\n" \
18989 " jbe 1f\n" \
18990 " movl %1,%0\n" \
18991 " negl %0\n" \
18992 " andl $7,%0\n" \
18993 " subl %0,%3\n" \
18994 - "4: rep; movsb\n" \
18995 + "4: rep; "prefix"movsb\n" \
18996 " movl %3,%0\n" \
18997 " shrl $2,%0\n" \
18998 " andl $3,%3\n" \
18999 " .align 2,0x90\n" \
19000 - "0: rep; movsl\n" \
19001 + "0: rep; "prefix"movsl\n" \
19002 " movl %3,%0\n" \
19003 - "1: rep; movsb\n" \
19004 + "1: rep; "prefix"movsb\n" \
19005 "2:\n" \
19006 + restore \
19007 ".section .fixup,\"ax\"\n" \
19008 "5: addl %3,%0\n" \
19009 " jmp 2b\n" \
19010 @@ -682,14 +799,14 @@ do { \
19011 " negl %0\n" \
19012 " andl $7,%0\n" \
19013 " subl %0,%3\n" \
19014 - "4: rep; movsb\n" \
19015 + "4: rep; "__copyuser_seg"movsb\n" \
19016 " movl %3,%0\n" \
19017 " shrl $2,%0\n" \
19018 " andl $3,%3\n" \
19019 " .align 2,0x90\n" \
19020 - "0: rep; movsl\n" \
19021 + "0: rep; "__copyuser_seg"movsl\n" \
19022 " movl %3,%0\n" \
19023 - "1: rep; movsb\n" \
19024 + "1: rep; "__copyuser_seg"movsb\n" \
19025 "2:\n" \
19026 ".section .fixup,\"ax\"\n" \
19027 "5: addl %3,%0\n" \
19028 @@ -775,9 +892,9 @@ survive:
19029 }
19030 #endif
19031 if (movsl_is_ok(to, from, n))
19032 - __copy_user(to, from, n);
19033 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
19034 else
19035 - n = __copy_user_intel(to, from, n);
19036 + n = __generic_copy_to_user_intel(to, from, n);
19037 return n;
19038 }
19039 EXPORT_SYMBOL(__copy_to_user_ll);
19040 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
19041 unsigned long n)
19042 {
19043 if (movsl_is_ok(to, from, n))
19044 - __copy_user(to, from, n);
19045 + __copy_user(to, from, n, __copyuser_seg, "", "");
19046 else
19047 - n = __copy_user_intel((void __user *)to,
19048 - (const void *)from, n);
19049 + n = __generic_copy_from_user_intel(to, from, n);
19050 return n;
19051 }
19052 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
19053 @@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
19054 if (n > 64 && cpu_has_xmm2)
19055 n = __copy_user_intel_nocache(to, from, n);
19056 else
19057 - __copy_user(to, from, n);
19058 + __copy_user(to, from, n, __copyuser_seg, "", "");
19059 #else
19060 - __copy_user(to, from, n);
19061 + __copy_user(to, from, n, __copyuser_seg, "", "");
19062 #endif
19063 return n;
19064 }
19065 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
19066
19067 -/**
19068 - * copy_to_user: - Copy a block of data into user space.
19069 - * @to: Destination address, in user space.
19070 - * @from: Source address, in kernel space.
19071 - * @n: Number of bytes to copy.
19072 - *
19073 - * Context: User context only. This function may sleep.
19074 - *
19075 - * Copy data from kernel space to user space.
19076 - *
19077 - * Returns number of bytes that could not be copied.
19078 - * On success, this will be zero.
19079 - */
19080 -unsigned long
19081 -copy_to_user(void __user *to, const void *from, unsigned long n)
19082 +void copy_from_user_overflow(void)
19083 {
19084 - if (access_ok(VERIFY_WRITE, to, n))
19085 - n = __copy_to_user(to, from, n);
19086 - return n;
19087 + WARN(1, "Buffer overflow detected!\n");
19088 }
19089 -EXPORT_SYMBOL(copy_to_user);
19090 +EXPORT_SYMBOL(copy_from_user_overflow);
19091
19092 -/**
19093 - * copy_from_user: - Copy a block of data from user space.
19094 - * @to: Destination address, in kernel space.
19095 - * @from: Source address, in user space.
19096 - * @n: Number of bytes to copy.
19097 - *
19098 - * Context: User context only. This function may sleep.
19099 - *
19100 - * Copy data from user space to kernel space.
19101 - *
19102 - * Returns number of bytes that could not be copied.
19103 - * On success, this will be zero.
19104 - *
19105 - * If some data could not be copied, this function will pad the copied
19106 - * data to the requested size using zero bytes.
19107 - */
19108 -unsigned long
19109 -_copy_from_user(void *to, const void __user *from, unsigned long n)
19110 +void copy_to_user_overflow(void)
19111 {
19112 - if (access_ok(VERIFY_READ, from, n))
19113 - n = __copy_from_user(to, from, n);
19114 - else
19115 - memset(to, 0, n);
19116 - return n;
19117 + WARN(1, "Buffer overflow detected!\n");
19118 }
19119 -EXPORT_SYMBOL(_copy_from_user);
19120 +EXPORT_SYMBOL(copy_to_user_overflow);
19121
19122 -void copy_from_user_overflow(void)
19123 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19124 +void __set_fs(mm_segment_t x)
19125 {
19126 - WARN(1, "Buffer overflow detected!\n");
19127 + switch (x.seg) {
19128 + case 0:
19129 + loadsegment(gs, 0);
19130 + break;
19131 + case TASK_SIZE_MAX:
19132 + loadsegment(gs, __USER_DS);
19133 + break;
19134 + case -1UL:
19135 + loadsegment(gs, __KERNEL_DS);
19136 + break;
19137 + default:
19138 + BUG();
19139 + }
19140 + return;
19141 }
19142 -EXPORT_SYMBOL(copy_from_user_overflow);
19143 +EXPORT_SYMBOL(__set_fs);
19144 +
19145 +void set_fs(mm_segment_t x)
19146 +{
19147 + current_thread_info()->addr_limit = x;
19148 + __set_fs(x);
19149 +}
19150 +EXPORT_SYMBOL(set_fs);
19151 +#endif
19152 diff -urNp linux-3.0.7/arch/x86/lib/usercopy_64.c linux-3.0.7/arch/x86/lib/usercopy_64.c
19153 --- linux-3.0.7/arch/x86/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
19154 +++ linux-3.0.7/arch/x86/lib/usercopy_64.c 2011-10-06 04:17:55.000000000 -0400
19155 @@ -42,6 +42,12 @@ long
19156 __strncpy_from_user(char *dst, const char __user *src, long count)
19157 {
19158 long res;
19159 +
19160 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19161 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19162 + src += PAX_USER_SHADOW_BASE;
19163 +#endif
19164 +
19165 __do_strncpy_from_user(dst, src, count, res);
19166 return res;
19167 }
19168 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
19169 {
19170 long __d0;
19171 might_fault();
19172 +
19173 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19174 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
19175 + addr += PAX_USER_SHADOW_BASE;
19176 +#endif
19177 +
19178 /* no memory constraint because it doesn't change any memory gcc knows
19179 about */
19180 asm volatile(
19181 @@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
19182
19183 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
19184 {
19185 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19186 - return copy_user_generic((__force void *)to, (__force void *)from, len);
19187 - }
19188 - return len;
19189 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19190 +
19191 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19192 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
19193 + to += PAX_USER_SHADOW_BASE;
19194 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
19195 + from += PAX_USER_SHADOW_BASE;
19196 +#endif
19197 +
19198 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
19199 + }
19200 + return len;
19201 }
19202 EXPORT_SYMBOL(copy_in_user);
19203
19204 @@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
19205 * it is not necessary to optimize tail handling.
19206 */
19207 unsigned long
19208 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
19209 +copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest)
19210 {
19211 char c;
19212 unsigned zero_len;
19213 diff -urNp linux-3.0.7/arch/x86/Makefile linux-3.0.7/arch/x86/Makefile
19214 --- linux-3.0.7/arch/x86/Makefile 2011-07-21 22:17:23.000000000 -0400
19215 +++ linux-3.0.7/arch/x86/Makefile 2011-08-23 21:48:14.000000000 -0400
19216 @@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
19217 else
19218 BITS := 64
19219 UTS_MACHINE := x86_64
19220 + biarch := $(call cc-option,-m64)
19221 CHECKFLAGS += -D__x86_64__ -m64
19222
19223 KBUILD_AFLAGS += -m64
19224 @@ -195,3 +196,12 @@ define archhelp
19225 echo ' FDARGS="..." arguments for the booted kernel'
19226 echo ' FDINITRD=file initrd for the booted kernel'
19227 endef
19228 +
19229 +define OLD_LD
19230 +
19231 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
19232 +*** Please upgrade your binutils to 2.18 or newer
19233 +endef
19234 +
19235 +archprepare:
19236 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
19237 diff -urNp linux-3.0.7/arch/x86/mm/extable.c linux-3.0.7/arch/x86/mm/extable.c
19238 --- linux-3.0.7/arch/x86/mm/extable.c 2011-07-21 22:17:23.000000000 -0400
19239 +++ linux-3.0.7/arch/x86/mm/extable.c 2011-08-23 21:47:55.000000000 -0400
19240 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs
19241 const struct exception_table_entry *fixup;
19242
19243 #ifdef CONFIG_PNPBIOS
19244 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
19245 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
19246 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
19247 extern u32 pnp_bios_is_utter_crap;
19248 pnp_bios_is_utter_crap = 1;
19249 diff -urNp linux-3.0.7/arch/x86/mm/fault.c linux-3.0.7/arch/x86/mm/fault.c
19250 --- linux-3.0.7/arch/x86/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
19251 +++ linux-3.0.7/arch/x86/mm/fault.c 2011-10-06 04:17:55.000000000 -0400
19252 @@ -13,10 +13,18 @@
19253 #include <linux/perf_event.h> /* perf_sw_event */
19254 #include <linux/hugetlb.h> /* hstate_index_to_shift */
19255 #include <linux/prefetch.h> /* prefetchw */
19256 +#include <linux/unistd.h>
19257 +#include <linux/compiler.h>
19258
19259 #include <asm/traps.h> /* dotraplinkage, ... */
19260 #include <asm/pgalloc.h> /* pgd_*(), ... */
19261 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
19262 +#include <asm/vsyscall.h>
19263 +#include <asm/tlbflush.h>
19264 +
19265 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19266 +#include <asm/stacktrace.h>
19267 +#endif
19268
19269 /*
19270 * Page fault error code bits:
19271 @@ -54,7 +62,7 @@ static inline int __kprobes notify_page_
19272 int ret = 0;
19273
19274 /* kprobe_running() needs smp_processor_id() */
19275 - if (kprobes_built_in() && !user_mode_vm(regs)) {
19276 + if (kprobes_built_in() && !user_mode(regs)) {
19277 preempt_disable();
19278 if (kprobe_running() && kprobe_fault_handler(regs, 14))
19279 ret = 1;
19280 @@ -115,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *re
19281 return !instr_lo || (instr_lo>>1) == 1;
19282 case 0x00:
19283 /* Prefetch instruction is 0x0F0D or 0x0F18 */
19284 - if (probe_kernel_address(instr, opcode))
19285 + if (user_mode(regs)) {
19286 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
19287 + return 0;
19288 + } else if (probe_kernel_address(instr, opcode))
19289 return 0;
19290
19291 *prefetch = (instr_lo == 0xF) &&
19292 @@ -149,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsign
19293 while (instr < max_instr) {
19294 unsigned char opcode;
19295
19296 - if (probe_kernel_address(instr, opcode))
19297 + if (user_mode(regs)) {
19298 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
19299 + break;
19300 + } else if (probe_kernel_address(instr, opcode))
19301 break;
19302
19303 instr++;
19304 @@ -180,6 +194,30 @@ force_sig_info_fault(int si_signo, int s
19305 force_sig_info(si_signo, &info, tsk);
19306 }
19307
19308 +#ifdef CONFIG_PAX_EMUTRAMP
19309 +static int pax_handle_fetch_fault(struct pt_regs *regs);
19310 +#endif
19311 +
19312 +#ifdef CONFIG_PAX_PAGEEXEC
19313 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
19314 +{
19315 + pgd_t *pgd;
19316 + pud_t *pud;
19317 + pmd_t *pmd;
19318 +
19319 + pgd = pgd_offset(mm, address);
19320 + if (!pgd_present(*pgd))
19321 + return NULL;
19322 + pud = pud_offset(pgd, address);
19323 + if (!pud_present(*pud))
19324 + return NULL;
19325 + pmd = pmd_offset(pud, address);
19326 + if (!pmd_present(*pmd))
19327 + return NULL;
19328 + return pmd;
19329 +}
19330 +#endif
19331 +
19332 DEFINE_SPINLOCK(pgd_lock);
19333 LIST_HEAD(pgd_list);
19334
19335 @@ -230,10 +268,22 @@ void vmalloc_sync_all(void)
19336 for (address = VMALLOC_START & PMD_MASK;
19337 address >= TASK_SIZE && address < FIXADDR_TOP;
19338 address += PMD_SIZE) {
19339 +
19340 +#ifdef CONFIG_PAX_PER_CPU_PGD
19341 + unsigned long cpu;
19342 +#else
19343 struct page *page;
19344 +#endif
19345
19346 spin_lock(&pgd_lock);
19347 +
19348 +#ifdef CONFIG_PAX_PER_CPU_PGD
19349 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19350 + pgd_t *pgd = get_cpu_pgd(cpu);
19351 + pmd_t *ret;
19352 +#else
19353 list_for_each_entry(page, &pgd_list, lru) {
19354 + pgd_t *pgd = page_address(page);
19355 spinlock_t *pgt_lock;
19356 pmd_t *ret;
19357
19358 @@ -241,8 +291,13 @@ void vmalloc_sync_all(void)
19359 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
19360
19361 spin_lock(pgt_lock);
19362 - ret = vmalloc_sync_one(page_address(page), address);
19363 +#endif
19364 +
19365 + ret = vmalloc_sync_one(pgd, address);
19366 +
19367 +#ifndef CONFIG_PAX_PER_CPU_PGD
19368 spin_unlock(pgt_lock);
19369 +#endif
19370
19371 if (!ret)
19372 break;
19373 @@ -276,6 +331,11 @@ static noinline __kprobes int vmalloc_fa
19374 * an interrupt in the middle of a task switch..
19375 */
19376 pgd_paddr = read_cr3();
19377 +
19378 +#ifdef CONFIG_PAX_PER_CPU_PGD
19379 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
19380 +#endif
19381 +
19382 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
19383 if (!pmd_k)
19384 return -1;
19385 @@ -371,7 +431,14 @@ static noinline __kprobes int vmalloc_fa
19386 * happen within a race in page table update. In the later
19387 * case just flush:
19388 */
19389 +
19390 +#ifdef CONFIG_PAX_PER_CPU_PGD
19391 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
19392 + pgd = pgd_offset_cpu(smp_processor_id(), address);
19393 +#else
19394 pgd = pgd_offset(current->active_mm, address);
19395 +#endif
19396 +
19397 pgd_ref = pgd_offset_k(address);
19398 if (pgd_none(*pgd_ref))
19399 return -1;
19400 @@ -533,7 +600,7 @@ static int is_errata93(struct pt_regs *r
19401 static int is_errata100(struct pt_regs *regs, unsigned long address)
19402 {
19403 #ifdef CONFIG_X86_64
19404 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
19405 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
19406 return 1;
19407 #endif
19408 return 0;
19409 @@ -560,7 +627,7 @@ static int is_f00f_bug(struct pt_regs *r
19410 }
19411
19412 static const char nx_warning[] = KERN_CRIT
19413 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
19414 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
19415
19416 static void
19417 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
19418 @@ -569,14 +636,25 @@ show_fault_oops(struct pt_regs *regs, un
19419 if (!oops_may_print())
19420 return;
19421
19422 - if (error_code & PF_INSTR) {
19423 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
19424 unsigned int level;
19425
19426 pte_t *pte = lookup_address(address, &level);
19427
19428 if (pte && pte_present(*pte) && !pte_exec(*pte))
19429 - printk(nx_warning, current_uid());
19430 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
19431 + }
19432 +
19433 +#ifdef CONFIG_PAX_KERNEXEC
19434 + if (init_mm.start_code <= address && address < init_mm.end_code) {
19435 + if (current->signal->curr_ip)
19436 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
19437 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
19438 + else
19439 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
19440 + current->comm, task_pid_nr(current), current_uid(), current_euid());
19441 }
19442 +#endif
19443
19444 printk(KERN_ALERT "BUG: unable to handle kernel ");
19445 if (address < PAGE_SIZE)
19446 @@ -702,6 +780,66 @@ __bad_area_nosemaphore(struct pt_regs *r
19447 unsigned long address, int si_code)
19448 {
19449 struct task_struct *tsk = current;
19450 +#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19451 + struct mm_struct *mm = tsk->mm;
19452 +#endif
19453 +
19454 +#ifdef CONFIG_X86_64
19455 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
19456 + if (regs->ip == VSYSCALL_ADDR(__NR_vgettimeofday) ||
19457 + regs->ip == VSYSCALL_ADDR(__NR_vtime) ||
19458 + regs->ip == VSYSCALL_ADDR(__NR_vgetcpu)) {
19459 + regs->ip += mm->context.vdso - PAGE_SIZE - VSYSCALL_START;
19460 + return;
19461 + }
19462 + }
19463 +#endif
19464 +
19465 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19466 + if (mm && (error_code & PF_USER)) {
19467 + unsigned long ip = regs->ip;
19468 +
19469 + if (v8086_mode(regs))
19470 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
19471 +
19472 + /*
19473 + * It's possible to have interrupts off here:
19474 + */
19475 + local_irq_enable();
19476 +
19477 +#ifdef CONFIG_PAX_PAGEEXEC
19478 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
19479 + (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
19480 +
19481 +#ifdef CONFIG_PAX_EMUTRAMP
19482 + switch (pax_handle_fetch_fault(regs)) {
19483 + case 2:
19484 + return;
19485 + }
19486 +#endif
19487 +
19488 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
19489 + do_group_exit(SIGKILL);
19490 + }
19491 +#endif
19492 +
19493 +#ifdef CONFIG_PAX_SEGMEXEC
19494 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
19495 +
19496 +#ifdef CONFIG_PAX_EMUTRAMP
19497 + switch (pax_handle_fetch_fault(regs)) {
19498 + case 2:
19499 + return;
19500 + }
19501 +#endif
19502 +
19503 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
19504 + do_group_exit(SIGKILL);
19505 + }
19506 +#endif
19507 +
19508 + }
19509 +#endif
19510
19511 /* User mode accesses just cause a SIGSEGV */
19512 if (error_code & PF_USER) {
19513 @@ -871,6 +1009,99 @@ static int spurious_fault_check(unsigned
19514 return 1;
19515 }
19516
19517 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19518 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
19519 +{
19520 + pte_t *pte;
19521 + pmd_t *pmd;
19522 + spinlock_t *ptl;
19523 + unsigned char pte_mask;
19524 +
19525 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
19526 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
19527 + return 0;
19528 +
19529 + /* PaX: it's our fault, let's handle it if we can */
19530 +
19531 + /* PaX: take a look at read faults before acquiring any locks */
19532 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
19533 + /* instruction fetch attempt from a protected page in user mode */
19534 + up_read(&mm->mmap_sem);
19535 +
19536 +#ifdef CONFIG_PAX_EMUTRAMP
19537 + switch (pax_handle_fetch_fault(regs)) {
19538 + case 2:
19539 + return 1;
19540 + }
19541 +#endif
19542 +
19543 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
19544 + do_group_exit(SIGKILL);
19545 + }
19546 +
19547 + pmd = pax_get_pmd(mm, address);
19548 + if (unlikely(!pmd))
19549 + return 0;
19550 +
19551 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
19552 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
19553 + pte_unmap_unlock(pte, ptl);
19554 + return 0;
19555 + }
19556 +
19557 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
19558 + /* write attempt to a protected page in user mode */
19559 + pte_unmap_unlock(pte, ptl);
19560 + return 0;
19561 + }
19562 +
19563 +#ifdef CONFIG_SMP
19564 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
19565 +#else
19566 + if (likely(address > get_limit(regs->cs)))
19567 +#endif
19568 + {
19569 + set_pte(pte, pte_mkread(*pte));
19570 + __flush_tlb_one(address);
19571 + pte_unmap_unlock(pte, ptl);
19572 + up_read(&mm->mmap_sem);
19573 + return 1;
19574 + }
19575 +
19576 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
19577 +
19578 + /*
19579 + * PaX: fill DTLB with user rights and retry
19580 + */
19581 + __asm__ __volatile__ (
19582 + "orb %2,(%1)\n"
19583 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
19584 +/*
19585 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
19586 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
19587 + * page fault when examined during a TLB load attempt. this is true not only
19588 + * for PTEs holding a non-present entry but also present entries that will
19589 + * raise a page fault (such as those set up by PaX, or the copy-on-write
19590 + * mechanism). in effect it means that we do *not* need to flush the TLBs
19591 + * for our target pages since their PTEs are simply not in the TLBs at all.
19592 +
19593 + * the best thing in omitting it is that we gain around 15-20% speed in the
19594 + * fast path of the page fault handler and can get rid of tracing since we
19595 + * can no longer flush unintended entries.
19596 + */
19597 + "invlpg (%0)\n"
19598 +#endif
19599 + __copyuser_seg"testb $0,(%0)\n"
19600 + "xorb %3,(%1)\n"
19601 + :
19602 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
19603 + : "memory", "cc");
19604 + pte_unmap_unlock(pte, ptl);
19605 + up_read(&mm->mmap_sem);
19606 + return 1;
19607 +}
19608 +#endif
19609 +
19610 /*
19611 * Handle a spurious fault caused by a stale TLB entry.
19612 *
19613 @@ -943,6 +1174,9 @@ int show_unhandled_signals = 1;
19614 static inline int
19615 access_error(unsigned long error_code, struct vm_area_struct *vma)
19616 {
19617 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
19618 + return 1;
19619 +
19620 if (error_code & PF_WRITE) {
19621 /* write, present and write, not present: */
19622 if (unlikely(!(vma->vm_flags & VM_WRITE)))
19623 @@ -976,19 +1210,33 @@ do_page_fault(struct pt_regs *regs, unsi
19624 {
19625 struct vm_area_struct *vma;
19626 struct task_struct *tsk;
19627 - unsigned long address;
19628 struct mm_struct *mm;
19629 int fault;
19630 int write = error_code & PF_WRITE;
19631 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
19632 (write ? FAULT_FLAG_WRITE : 0);
19633
19634 + /* Get the faulting address: */
19635 + unsigned long address = read_cr2();
19636 +
19637 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19638 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
19639 + if (!search_exception_tables(regs->ip)) {
19640 + bad_area_nosemaphore(regs, error_code, address);
19641 + return;
19642 + }
19643 + if (address < PAX_USER_SHADOW_BASE) {
19644 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
19645 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
19646 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
19647 + } else
19648 + address -= PAX_USER_SHADOW_BASE;
19649 + }
19650 +#endif
19651 +
19652 tsk = current;
19653 mm = tsk->mm;
19654
19655 - /* Get the faulting address: */
19656 - address = read_cr2();
19657 -
19658 /*
19659 * Detect and handle instructions that would cause a page fault for
19660 * both a tracked kernel page and a userspace page.
19661 @@ -1048,7 +1296,7 @@ do_page_fault(struct pt_regs *regs, unsi
19662 * User-mode registers count as a user access even for any
19663 * potential system fault or CPU buglet:
19664 */
19665 - if (user_mode_vm(regs)) {
19666 + if (user_mode(regs)) {
19667 local_irq_enable();
19668 error_code |= PF_USER;
19669 } else {
19670 @@ -1103,6 +1351,11 @@ retry:
19671 might_sleep();
19672 }
19673
19674 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19675 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
19676 + return;
19677 +#endif
19678 +
19679 vma = find_vma(mm, address);
19680 if (unlikely(!vma)) {
19681 bad_area(regs, error_code, address);
19682 @@ -1114,18 +1367,24 @@ retry:
19683 bad_area(regs, error_code, address);
19684 return;
19685 }
19686 - if (error_code & PF_USER) {
19687 - /*
19688 - * Accessing the stack below %sp is always a bug.
19689 - * The large cushion allows instructions like enter
19690 - * and pusha to work. ("enter $65535, $31" pushes
19691 - * 32 pointers and then decrements %sp by 65535.)
19692 - */
19693 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
19694 - bad_area(regs, error_code, address);
19695 - return;
19696 - }
19697 + /*
19698 + * Accessing the stack below %sp is always a bug.
19699 + * The large cushion allows instructions like enter
19700 + * and pusha to work. ("enter $65535, $31" pushes
19701 + * 32 pointers and then decrements %sp by 65535.)
19702 + */
19703 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
19704 + bad_area(regs, error_code, address);
19705 + return;
19706 }
19707 +
19708 +#ifdef CONFIG_PAX_SEGMEXEC
19709 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
19710 + bad_area(regs, error_code, address);
19711 + return;
19712 + }
19713 +#endif
19714 +
19715 if (unlikely(expand_stack(vma, address))) {
19716 bad_area(regs, error_code, address);
19717 return;
19718 @@ -1180,3 +1439,199 @@ good_area:
19719
19720 up_read(&mm->mmap_sem);
19721 }
19722 +
19723 +#ifdef CONFIG_PAX_EMUTRAMP
19724 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
19725 +{
19726 + int err;
19727 +
19728 + do { /* PaX: gcc trampoline emulation #1 */
19729 + unsigned char mov1, mov2;
19730 + unsigned short jmp;
19731 + unsigned int addr1, addr2;
19732 +
19733 +#ifdef CONFIG_X86_64
19734 + if ((regs->ip + 11) >> 32)
19735 + break;
19736 +#endif
19737 +
19738 + err = get_user(mov1, (unsigned char __user *)regs->ip);
19739 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
19740 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
19741 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
19742 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
19743 +
19744 + if (err)
19745 + break;
19746 +
19747 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
19748 + regs->cx = addr1;
19749 + regs->ax = addr2;
19750 + regs->ip = addr2;
19751 + return 2;
19752 + }
19753 + } while (0);
19754 +
19755 + do { /* PaX: gcc trampoline emulation #2 */
19756 + unsigned char mov, jmp;
19757 + unsigned int addr1, addr2;
19758 +
19759 +#ifdef CONFIG_X86_64
19760 + if ((regs->ip + 9) >> 32)
19761 + break;
19762 +#endif
19763 +
19764 + err = get_user(mov, (unsigned char __user *)regs->ip);
19765 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
19766 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
19767 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
19768 +
19769 + if (err)
19770 + break;
19771 +
19772 + if (mov == 0xB9 && jmp == 0xE9) {
19773 + regs->cx = addr1;
19774 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
19775 + return 2;
19776 + }
19777 + } while (0);
19778 +
19779 + return 1; /* PaX in action */
19780 +}
19781 +
19782 +#ifdef CONFIG_X86_64
19783 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
19784 +{
19785 + int err;
19786 +
19787 + do { /* PaX: gcc trampoline emulation #1 */
19788 + unsigned short mov1, mov2, jmp1;
19789 + unsigned char jmp2;
19790 + unsigned int addr1;
19791 + unsigned long addr2;
19792 +
19793 + err = get_user(mov1, (unsigned short __user *)regs->ip);
19794 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
19795 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
19796 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
19797 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
19798 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
19799 +
19800 + if (err)
19801 + break;
19802 +
19803 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
19804 + regs->r11 = addr1;
19805 + regs->r10 = addr2;
19806 + regs->ip = addr1;
19807 + return 2;
19808 + }
19809 + } while (0);
19810 +
19811 + do { /* PaX: gcc trampoline emulation #2 */
19812 + unsigned short mov1, mov2, jmp1;
19813 + unsigned char jmp2;
19814 + unsigned long addr1, addr2;
19815 +
19816 + err = get_user(mov1, (unsigned short __user *)regs->ip);
19817 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
19818 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
19819 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
19820 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
19821 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
19822 +
19823 + if (err)
19824 + break;
19825 +
19826 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
19827 + regs->r11 = addr1;
19828 + regs->r10 = addr2;
19829 + regs->ip = addr1;
19830 + return 2;
19831 + }
19832 + } while (0);
19833 +
19834 + return 1; /* PaX in action */
19835 +}
19836 +#endif
19837 +
19838 +/*
19839 + * PaX: decide what to do with offenders (regs->ip = fault address)
19840 + *
19841 + * returns 1 when task should be killed
19842 + * 2 when gcc trampoline was detected
19843 + */
19844 +static int pax_handle_fetch_fault(struct pt_regs *regs)
19845 +{
19846 + if (v8086_mode(regs))
19847 + return 1;
19848 +
19849 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
19850 + return 1;
19851 +
19852 +#ifdef CONFIG_X86_32
19853 + return pax_handle_fetch_fault_32(regs);
19854 +#else
19855 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
19856 + return pax_handle_fetch_fault_32(regs);
19857 + else
19858 + return pax_handle_fetch_fault_64(regs);
19859 +#endif
19860 +}
19861 +#endif
19862 +
19863 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19864 +void pax_report_insns(void *pc, void *sp)
19865 +{
19866 + long i;
19867 +
19868 + printk(KERN_ERR "PAX: bytes at PC: ");
19869 + for (i = 0; i < 20; i++) {
19870 + unsigned char c;
19871 + if (get_user(c, (unsigned char __force_user *)pc+i))
19872 + printk(KERN_CONT "?? ");
19873 + else
19874 + printk(KERN_CONT "%02x ", c);
19875 + }
19876 + printk("\n");
19877 +
19878 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
19879 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
19880 + unsigned long c;
19881 + if (get_user(c, (unsigned long __force_user *)sp+i))
19882 +#ifdef CONFIG_X86_32
19883 + printk(KERN_CONT "???????? ");
19884 +#else
19885 + printk(KERN_CONT "???????????????? ");
19886 +#endif
19887 + else
19888 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
19889 + }
19890 + printk("\n");
19891 +}
19892 +#endif
19893 +
19894 +/**
19895 + * probe_kernel_write(): safely attempt to write to a location
19896 + * @dst: address to write to
19897 + * @src: pointer to the data that shall be written
19898 + * @size: size of the data chunk
19899 + *
19900 + * Safely write to address @dst from the buffer at @src. If a kernel fault
19901 + * happens, handle that and return -EFAULT.
19902 + */
19903 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
19904 +{
19905 + long ret;
19906 + mm_segment_t old_fs = get_fs();
19907 +
19908 + set_fs(KERNEL_DS);
19909 + pagefault_disable();
19910 + pax_open_kernel();
19911 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
19912 + pax_close_kernel();
19913 + pagefault_enable();
19914 + set_fs(old_fs);
19915 +
19916 + return ret ? -EFAULT : 0;
19917 +}
19918 diff -urNp linux-3.0.7/arch/x86/mm/gup.c linux-3.0.7/arch/x86/mm/gup.c
19919 --- linux-3.0.7/arch/x86/mm/gup.c 2011-07-21 22:17:23.000000000 -0400
19920 +++ linux-3.0.7/arch/x86/mm/gup.c 2011-08-23 21:47:55.000000000 -0400
19921 @@ -263,7 +263,7 @@ int __get_user_pages_fast(unsigned long
19922 addr = start;
19923 len = (unsigned long) nr_pages << PAGE_SHIFT;
19924 end = start + len;
19925 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19926 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19927 (void __user *)start, len)))
19928 return 0;
19929
19930 diff -urNp linux-3.0.7/arch/x86/mm/highmem_32.c linux-3.0.7/arch/x86/mm/highmem_32.c
19931 --- linux-3.0.7/arch/x86/mm/highmem_32.c 2011-07-21 22:17:23.000000000 -0400
19932 +++ linux-3.0.7/arch/x86/mm/highmem_32.c 2011-08-23 21:47:55.000000000 -0400
19933 @@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
19934 idx = type + KM_TYPE_NR*smp_processor_id();
19935 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19936 BUG_ON(!pte_none(*(kmap_pte-idx)));
19937 +
19938 + pax_open_kernel();
19939 set_pte(kmap_pte-idx, mk_pte(page, prot));
19940 + pax_close_kernel();
19941
19942 return (void *)vaddr;
19943 }
19944 diff -urNp linux-3.0.7/arch/x86/mm/hugetlbpage.c linux-3.0.7/arch/x86/mm/hugetlbpage.c
19945 --- linux-3.0.7/arch/x86/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
19946 +++ linux-3.0.7/arch/x86/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
19947 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
19948 struct hstate *h = hstate_file(file);
19949 struct mm_struct *mm = current->mm;
19950 struct vm_area_struct *vma;
19951 - unsigned long start_addr;
19952 + unsigned long start_addr, pax_task_size = TASK_SIZE;
19953 +
19954 +#ifdef CONFIG_PAX_SEGMEXEC
19955 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19956 + pax_task_size = SEGMEXEC_TASK_SIZE;
19957 +#endif
19958 +
19959 + pax_task_size -= PAGE_SIZE;
19960
19961 if (len > mm->cached_hole_size) {
19962 - start_addr = mm->free_area_cache;
19963 + start_addr = mm->free_area_cache;
19964 } else {
19965 - start_addr = TASK_UNMAPPED_BASE;
19966 - mm->cached_hole_size = 0;
19967 + start_addr = mm->mmap_base;
19968 + mm->cached_hole_size = 0;
19969 }
19970
19971 full_search:
19972 @@ -280,26 +287,27 @@ full_search:
19973
19974 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19975 /* At this point: (!vma || addr < vma->vm_end). */
19976 - if (TASK_SIZE - len < addr) {
19977 + if (pax_task_size - len < addr) {
19978 /*
19979 * Start a new search - just in case we missed
19980 * some holes.
19981 */
19982 - if (start_addr != TASK_UNMAPPED_BASE) {
19983 - start_addr = TASK_UNMAPPED_BASE;
19984 + if (start_addr != mm->mmap_base) {
19985 + start_addr = mm->mmap_base;
19986 mm->cached_hole_size = 0;
19987 goto full_search;
19988 }
19989 return -ENOMEM;
19990 }
19991 - if (!vma || addr + len <= vma->vm_start) {
19992 - mm->free_area_cache = addr + len;
19993 - return addr;
19994 - }
19995 + if (check_heap_stack_gap(vma, addr, len))
19996 + break;
19997 if (addr + mm->cached_hole_size < vma->vm_start)
19998 mm->cached_hole_size = vma->vm_start - addr;
19999 addr = ALIGN(vma->vm_end, huge_page_size(h));
20000 }
20001 +
20002 + mm->free_area_cache = addr + len;
20003 + return addr;
20004 }
20005
20006 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
20007 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
20008 {
20009 struct hstate *h = hstate_file(file);
20010 struct mm_struct *mm = current->mm;
20011 - struct vm_area_struct *vma, *prev_vma;
20012 - unsigned long base = mm->mmap_base, addr = addr0;
20013 + struct vm_area_struct *vma;
20014 + unsigned long base = mm->mmap_base, addr;
20015 unsigned long largest_hole = mm->cached_hole_size;
20016 - int first_time = 1;
20017
20018 /* don't allow allocations above current base */
20019 if (mm->free_area_cache > base)
20020 @@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
20021 largest_hole = 0;
20022 mm->free_area_cache = base;
20023 }
20024 -try_again:
20025 +
20026 /* make sure it can fit in the remaining address space */
20027 if (mm->free_area_cache < len)
20028 goto fail;
20029
20030 /* either no address requested or can't fit in requested address hole */
20031 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
20032 + addr = (mm->free_area_cache - len);
20033 do {
20034 + addr &= huge_page_mask(h);
20035 + vma = find_vma(mm, addr);
20036 /*
20037 * Lookup failure means no vma is above this address,
20038 * i.e. return with success:
20039 - */
20040 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
20041 - return addr;
20042 -
20043 - /*
20044 * new region fits between prev_vma->vm_end and
20045 * vma->vm_start, use it:
20046 */
20047 - if (addr + len <= vma->vm_start &&
20048 - (!prev_vma || (addr >= prev_vma->vm_end))) {
20049 + if (check_heap_stack_gap(vma, addr, len)) {
20050 /* remember the address as a hint for next time */
20051 - mm->cached_hole_size = largest_hole;
20052 - return (mm->free_area_cache = addr);
20053 - } else {
20054 - /* pull free_area_cache down to the first hole */
20055 - if (mm->free_area_cache == vma->vm_end) {
20056 - mm->free_area_cache = vma->vm_start;
20057 - mm->cached_hole_size = largest_hole;
20058 - }
20059 + mm->cached_hole_size = largest_hole;
20060 + return (mm->free_area_cache = addr);
20061 + }
20062 + /* pull free_area_cache down to the first hole */
20063 + if (mm->free_area_cache == vma->vm_end) {
20064 + mm->free_area_cache = vma->vm_start;
20065 + mm->cached_hole_size = largest_hole;
20066 }
20067
20068 /* remember the largest hole we saw so far */
20069 if (addr + largest_hole < vma->vm_start)
20070 - largest_hole = vma->vm_start - addr;
20071 + largest_hole = vma->vm_start - addr;
20072
20073 /* try just below the current vma->vm_start */
20074 - addr = (vma->vm_start - len) & huge_page_mask(h);
20075 - } while (len <= vma->vm_start);
20076 + addr = skip_heap_stack_gap(vma, len);
20077 + } while (!IS_ERR_VALUE(addr));
20078
20079 fail:
20080 /*
20081 - * if hint left us with no space for the requested
20082 - * mapping then try again:
20083 - */
20084 - if (first_time) {
20085 - mm->free_area_cache = base;
20086 - largest_hole = 0;
20087 - first_time = 0;
20088 - goto try_again;
20089 - }
20090 - /*
20091 * A failed mmap() very likely causes application failure,
20092 * so fall back to the bottom-up function here. This scenario
20093 * can happen with large stack limits and large mmap()
20094 * allocations.
20095 */
20096 - mm->free_area_cache = TASK_UNMAPPED_BASE;
20097 +
20098 +#ifdef CONFIG_PAX_SEGMEXEC
20099 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20100 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
20101 + else
20102 +#endif
20103 +
20104 + mm->mmap_base = TASK_UNMAPPED_BASE;
20105 +
20106 +#ifdef CONFIG_PAX_RANDMMAP
20107 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20108 + mm->mmap_base += mm->delta_mmap;
20109 +#endif
20110 +
20111 + mm->free_area_cache = mm->mmap_base;
20112 mm->cached_hole_size = ~0UL;
20113 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
20114 len, pgoff, flags);
20115 @@ -386,6 +392,7 @@ fail:
20116 /*
20117 * Restore the topdown base:
20118 */
20119 + mm->mmap_base = base;
20120 mm->free_area_cache = base;
20121 mm->cached_hole_size = ~0UL;
20122
20123 @@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
20124 struct hstate *h = hstate_file(file);
20125 struct mm_struct *mm = current->mm;
20126 struct vm_area_struct *vma;
20127 + unsigned long pax_task_size = TASK_SIZE;
20128
20129 if (len & ~huge_page_mask(h))
20130 return -EINVAL;
20131 - if (len > TASK_SIZE)
20132 +
20133 +#ifdef CONFIG_PAX_SEGMEXEC
20134 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20135 + pax_task_size = SEGMEXEC_TASK_SIZE;
20136 +#endif
20137 +
20138 + pax_task_size -= PAGE_SIZE;
20139 +
20140 + if (len > pax_task_size)
20141 return -ENOMEM;
20142
20143 if (flags & MAP_FIXED) {
20144 @@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
20145 if (addr) {
20146 addr = ALIGN(addr, huge_page_size(h));
20147 vma = find_vma(mm, addr);
20148 - if (TASK_SIZE - len >= addr &&
20149 - (!vma || addr + len <= vma->vm_start))
20150 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
20151 return addr;
20152 }
20153 if (mm->get_unmapped_area == arch_get_unmapped_area)
20154 diff -urNp linux-3.0.7/arch/x86/mm/init_32.c linux-3.0.7/arch/x86/mm/init_32.c
20155 --- linux-3.0.7/arch/x86/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
20156 +++ linux-3.0.7/arch/x86/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
20157 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
20158 }
20159
20160 /*
20161 - * Creates a middle page table and puts a pointer to it in the
20162 - * given global directory entry. This only returns the gd entry
20163 - * in non-PAE compilation mode, since the middle layer is folded.
20164 - */
20165 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
20166 -{
20167 - pud_t *pud;
20168 - pmd_t *pmd_table;
20169 -
20170 -#ifdef CONFIG_X86_PAE
20171 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
20172 - if (after_bootmem)
20173 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
20174 - else
20175 - pmd_table = (pmd_t *)alloc_low_page();
20176 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
20177 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
20178 - pud = pud_offset(pgd, 0);
20179 - BUG_ON(pmd_table != pmd_offset(pud, 0));
20180 -
20181 - return pmd_table;
20182 - }
20183 -#endif
20184 - pud = pud_offset(pgd, 0);
20185 - pmd_table = pmd_offset(pud, 0);
20186 -
20187 - return pmd_table;
20188 -}
20189 -
20190 -/*
20191 * Create a page table and place a pointer to it in a middle page
20192 * directory entry:
20193 */
20194 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
20195 page_table = (pte_t *)alloc_low_page();
20196
20197 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
20198 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20199 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
20200 +#else
20201 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
20202 +#endif
20203 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
20204 }
20205
20206 return pte_offset_kernel(pmd, 0);
20207 }
20208
20209 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
20210 +{
20211 + pud_t *pud;
20212 + pmd_t *pmd_table;
20213 +
20214 + pud = pud_offset(pgd, 0);
20215 + pmd_table = pmd_offset(pud, 0);
20216 +
20217 + return pmd_table;
20218 +}
20219 +
20220 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
20221 {
20222 int pgd_idx = pgd_index(vaddr);
20223 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
20224 int pgd_idx, pmd_idx;
20225 unsigned long vaddr;
20226 pgd_t *pgd;
20227 + pud_t *pud;
20228 pmd_t *pmd;
20229 pte_t *pte = NULL;
20230
20231 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
20232 pgd = pgd_base + pgd_idx;
20233
20234 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
20235 - pmd = one_md_table_init(pgd);
20236 - pmd = pmd + pmd_index(vaddr);
20237 + pud = pud_offset(pgd, vaddr);
20238 + pmd = pmd_offset(pud, vaddr);
20239 +
20240 +#ifdef CONFIG_X86_PAE
20241 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20242 +#endif
20243 +
20244 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
20245 pmd++, pmd_idx++) {
20246 pte = page_table_kmap_check(one_page_table_init(pmd),
20247 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
20248 }
20249 }
20250
20251 -static inline int is_kernel_text(unsigned long addr)
20252 +static inline int is_kernel_text(unsigned long start, unsigned long end)
20253 {
20254 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
20255 - return 1;
20256 - return 0;
20257 + if ((start > ktla_ktva((unsigned long)_etext) ||
20258 + end <= ktla_ktva((unsigned long)_stext)) &&
20259 + (start > ktla_ktva((unsigned long)_einittext) ||
20260 + end <= ktla_ktva((unsigned long)_sinittext)) &&
20261 +
20262 +#ifdef CONFIG_ACPI_SLEEP
20263 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
20264 +#endif
20265 +
20266 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
20267 + return 0;
20268 + return 1;
20269 }
20270
20271 /*
20272 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
20273 unsigned long last_map_addr = end;
20274 unsigned long start_pfn, end_pfn;
20275 pgd_t *pgd_base = swapper_pg_dir;
20276 - int pgd_idx, pmd_idx, pte_ofs;
20277 + unsigned int pgd_idx, pmd_idx, pte_ofs;
20278 unsigned long pfn;
20279 pgd_t *pgd;
20280 + pud_t *pud;
20281 pmd_t *pmd;
20282 pte_t *pte;
20283 unsigned pages_2m, pages_4k;
20284 @@ -281,8 +282,13 @@ repeat:
20285 pfn = start_pfn;
20286 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20287 pgd = pgd_base + pgd_idx;
20288 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
20289 - pmd = one_md_table_init(pgd);
20290 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
20291 + pud = pud_offset(pgd, 0);
20292 + pmd = pmd_offset(pud, 0);
20293 +
20294 +#ifdef CONFIG_X86_PAE
20295 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20296 +#endif
20297
20298 if (pfn >= end_pfn)
20299 continue;
20300 @@ -294,14 +300,13 @@ repeat:
20301 #endif
20302 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
20303 pmd++, pmd_idx++) {
20304 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
20305 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
20306
20307 /*
20308 * Map with big pages if possible, otherwise
20309 * create normal page tables:
20310 */
20311 if (use_pse) {
20312 - unsigned int addr2;
20313 pgprot_t prot = PAGE_KERNEL_LARGE;
20314 /*
20315 * first pass will use the same initial
20316 @@ -311,11 +316,7 @@ repeat:
20317 __pgprot(PTE_IDENT_ATTR |
20318 _PAGE_PSE);
20319
20320 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
20321 - PAGE_OFFSET + PAGE_SIZE-1;
20322 -
20323 - if (is_kernel_text(addr) ||
20324 - is_kernel_text(addr2))
20325 + if (is_kernel_text(address, address + PMD_SIZE))
20326 prot = PAGE_KERNEL_LARGE_EXEC;
20327
20328 pages_2m++;
20329 @@ -332,7 +333,7 @@ repeat:
20330 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20331 pte += pte_ofs;
20332 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
20333 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
20334 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
20335 pgprot_t prot = PAGE_KERNEL;
20336 /*
20337 * first pass will use the same initial
20338 @@ -340,7 +341,7 @@ repeat:
20339 */
20340 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
20341
20342 - if (is_kernel_text(addr))
20343 + if (is_kernel_text(address, address + PAGE_SIZE))
20344 prot = PAGE_KERNEL_EXEC;
20345
20346 pages_4k++;
20347 @@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
20348
20349 pud = pud_offset(pgd, va);
20350 pmd = pmd_offset(pud, va);
20351 - if (!pmd_present(*pmd))
20352 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
20353 break;
20354
20355 pte = pte_offset_kernel(pmd, va);
20356 @@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
20357
20358 static void __init pagetable_init(void)
20359 {
20360 - pgd_t *pgd_base = swapper_pg_dir;
20361 -
20362 - permanent_kmaps_init(pgd_base);
20363 + permanent_kmaps_init(swapper_pg_dir);
20364 }
20365
20366 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
20367 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
20368 EXPORT_SYMBOL_GPL(__supported_pte_mask);
20369
20370 /* user-defined highmem size */
20371 @@ -757,6 +756,12 @@ void __init mem_init(void)
20372
20373 pci_iommu_alloc();
20374
20375 +#ifdef CONFIG_PAX_PER_CPU_PGD
20376 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
20377 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20378 + KERNEL_PGD_PTRS);
20379 +#endif
20380 +
20381 #ifdef CONFIG_FLATMEM
20382 BUG_ON(!mem_map);
20383 #endif
20384 @@ -774,7 +779,7 @@ void __init mem_init(void)
20385 set_highmem_pages_init();
20386
20387 codesize = (unsigned long) &_etext - (unsigned long) &_text;
20388 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
20389 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
20390 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
20391
20392 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
20393 @@ -815,10 +820,10 @@ void __init mem_init(void)
20394 ((unsigned long)&__init_end -
20395 (unsigned long)&__init_begin) >> 10,
20396
20397 - (unsigned long)&_etext, (unsigned long)&_edata,
20398 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
20399 + (unsigned long)&_sdata, (unsigned long)&_edata,
20400 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
20401
20402 - (unsigned long)&_text, (unsigned long)&_etext,
20403 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
20404 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
20405
20406 /*
20407 @@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
20408 if (!kernel_set_to_readonly)
20409 return;
20410
20411 + start = ktla_ktva(start);
20412 pr_debug("Set kernel text: %lx - %lx for read write\n",
20413 start, start+size);
20414
20415 @@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
20416 if (!kernel_set_to_readonly)
20417 return;
20418
20419 + start = ktla_ktva(start);
20420 pr_debug("Set kernel text: %lx - %lx for read only\n",
20421 start, start+size);
20422
20423 @@ -938,6 +945,7 @@ void mark_rodata_ro(void)
20424 unsigned long start = PFN_ALIGN(_text);
20425 unsigned long size = PFN_ALIGN(_etext) - start;
20426
20427 + start = ktla_ktva(start);
20428 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
20429 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
20430 size >> 10);
20431 diff -urNp linux-3.0.7/arch/x86/mm/init_64.c linux-3.0.7/arch/x86/mm/init_64.c
20432 --- linux-3.0.7/arch/x86/mm/init_64.c 2011-07-21 22:17:23.000000000 -0400
20433 +++ linux-3.0.7/arch/x86/mm/init_64.c 2011-10-06 04:17:55.000000000 -0400
20434 @@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpa
20435 * around without checking the pgd every time.
20436 */
20437
20438 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
20439 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
20440 EXPORT_SYMBOL_GPL(__supported_pte_mask);
20441
20442 int force_personality32;
20443 @@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long star
20444
20445 for (address = start; address <= end; address += PGDIR_SIZE) {
20446 const pgd_t *pgd_ref = pgd_offset_k(address);
20447 +
20448 +#ifdef CONFIG_PAX_PER_CPU_PGD
20449 + unsigned long cpu;
20450 +#else
20451 struct page *page;
20452 +#endif
20453
20454 if (pgd_none(*pgd_ref))
20455 continue;
20456
20457 spin_lock(&pgd_lock);
20458 +
20459 +#ifdef CONFIG_PAX_PER_CPU_PGD
20460 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20461 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
20462 +#else
20463 list_for_each_entry(page, &pgd_list, lru) {
20464 pgd_t *pgd;
20465 spinlock_t *pgt_lock;
20466 @@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long star
20467 /* the pgt_lock only for Xen */
20468 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
20469 spin_lock(pgt_lock);
20470 +#endif
20471
20472 if (pgd_none(*pgd))
20473 set_pgd(pgd, *pgd_ref);
20474 @@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long star
20475 BUG_ON(pgd_page_vaddr(*pgd)
20476 != pgd_page_vaddr(*pgd_ref));
20477
20478 +#ifndef CONFIG_PAX_PER_CPU_PGD
20479 spin_unlock(pgt_lock);
20480 +#endif
20481 +
20482 }
20483 spin_unlock(&pgd_lock);
20484 }
20485 @@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
20486 pmd = fill_pmd(pud, vaddr);
20487 pte = fill_pte(pmd, vaddr);
20488
20489 + pax_open_kernel();
20490 set_pte(pte, new_pte);
20491 + pax_close_kernel();
20492
20493 /*
20494 * It's enough to flush this one mapping.
20495 @@ -262,14 +278,12 @@ static void __init __init_extra_mapping(
20496 pgd = pgd_offset_k((unsigned long)__va(phys));
20497 if (pgd_none(*pgd)) {
20498 pud = (pud_t *) spp_getpage();
20499 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
20500 - _PAGE_USER));
20501 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
20502 }
20503 pud = pud_offset(pgd, (unsigned long)__va(phys));
20504 if (pud_none(*pud)) {
20505 pmd = (pmd_t *) spp_getpage();
20506 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
20507 - _PAGE_USER));
20508 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
20509 }
20510 pmd = pmd_offset(pud, phys);
20511 BUG_ON(!pmd_none(*pmd));
20512 @@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsign
20513 if (pfn >= pgt_buf_top)
20514 panic("alloc_low_page: ran out of memory");
20515
20516 - adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
20517 + adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
20518 clear_page(adr);
20519 *phys = pfn * PAGE_SIZE;
20520 return adr;
20521 @@ -346,7 +360,7 @@ static __ref void *map_low_page(void *vi
20522
20523 phys = __pa(virt);
20524 left = phys & (PAGE_SIZE - 1);
20525 - adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
20526 + adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
20527 adr = (void *)(((unsigned long)adr) | left);
20528
20529 return adr;
20530 @@ -693,6 +707,12 @@ void __init mem_init(void)
20531
20532 pci_iommu_alloc();
20533
20534 +#ifdef CONFIG_PAX_PER_CPU_PGD
20535 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
20536 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20537 + KERNEL_PGD_PTRS);
20538 +#endif
20539 +
20540 /* clear_bss() already clear the empty_zero_page */
20541
20542 reservedpages = 0;
20543 @@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
20544 static struct vm_area_struct gate_vma = {
20545 .vm_start = VSYSCALL_START,
20546 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
20547 - .vm_page_prot = PAGE_READONLY_EXEC,
20548 - .vm_flags = VM_READ | VM_EXEC
20549 + .vm_page_prot = PAGE_READONLY,
20550 + .vm_flags = VM_READ
20551 };
20552
20553 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
20554 @@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long add
20555
20556 const char *arch_vma_name(struct vm_area_struct *vma)
20557 {
20558 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
20559 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
20560 return "[vdso]";
20561 if (vma == &gate_vma)
20562 return "[vsyscall]";
20563 diff -urNp linux-3.0.7/arch/x86/mm/init.c linux-3.0.7/arch/x86/mm/init.c
20564 --- linux-3.0.7/arch/x86/mm/init.c 2011-07-21 22:17:23.000000000 -0400
20565 +++ linux-3.0.7/arch/x86/mm/init.c 2011-08-23 21:48:14.000000000 -0400
20566 @@ -31,7 +31,7 @@ int direct_gbpages
20567 static void __init find_early_table_space(unsigned long end, int use_pse,
20568 int use_gbpages)
20569 {
20570 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
20571 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
20572 phys_addr_t base;
20573
20574 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
20575 @@ -313,12 +313,34 @@ unsigned long __init_refok init_memory_m
20576 */
20577 int devmem_is_allowed(unsigned long pagenr)
20578 {
20579 - if (pagenr <= 256)
20580 +#ifdef CONFIG_GRKERNSEC_KMEM
20581 + /* allow BDA */
20582 + if (!pagenr)
20583 + return 1;
20584 + /* allow EBDA */
20585 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
20586 + return 1;
20587 +#else
20588 + if (!pagenr)
20589 + return 1;
20590 +#ifdef CONFIG_VM86
20591 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
20592 + return 1;
20593 +#endif
20594 +#endif
20595 +
20596 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
20597 return 1;
20598 +#ifdef CONFIG_GRKERNSEC_KMEM
20599 + /* throw out everything else below 1MB */
20600 + if (pagenr <= 256)
20601 + return 0;
20602 +#endif
20603 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
20604 return 0;
20605 if (!page_is_ram(pagenr))
20606 return 1;
20607 +
20608 return 0;
20609 }
20610
20611 @@ -373,6 +395,86 @@ void free_init_pages(char *what, unsigne
20612
20613 void free_initmem(void)
20614 {
20615 +
20616 +#ifdef CONFIG_PAX_KERNEXEC
20617 +#ifdef CONFIG_X86_32
20618 + /* PaX: limit KERNEL_CS to actual size */
20619 + unsigned long addr, limit;
20620 + struct desc_struct d;
20621 + int cpu;
20622 +
20623 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
20624 + limit = (limit - 1UL) >> PAGE_SHIFT;
20625 +
20626 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
20627 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
20628 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
20629 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
20630 + }
20631 +
20632 + /* PaX: make KERNEL_CS read-only */
20633 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
20634 + if (!paravirt_enabled())
20635 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
20636 +/*
20637 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
20638 + pgd = pgd_offset_k(addr);
20639 + pud = pud_offset(pgd, addr);
20640 + pmd = pmd_offset(pud, addr);
20641 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
20642 + }
20643 +*/
20644 +#ifdef CONFIG_X86_PAE
20645 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
20646 +/*
20647 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
20648 + pgd = pgd_offset_k(addr);
20649 + pud = pud_offset(pgd, addr);
20650 + pmd = pmd_offset(pud, addr);
20651 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
20652 + }
20653 +*/
20654 +#endif
20655 +
20656 +#ifdef CONFIG_MODULES
20657 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
20658 +#endif
20659 +
20660 +#else
20661 + pgd_t *pgd;
20662 + pud_t *pud;
20663 + pmd_t *pmd;
20664 + unsigned long addr, end;
20665 +
20666 + /* PaX: make kernel code/rodata read-only, rest non-executable */
20667 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
20668 + pgd = pgd_offset_k(addr);
20669 + pud = pud_offset(pgd, addr);
20670 + pmd = pmd_offset(pud, addr);
20671 + if (!pmd_present(*pmd))
20672 + continue;
20673 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
20674 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
20675 + else
20676 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
20677 + }
20678 +
20679 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
20680 + end = addr + KERNEL_IMAGE_SIZE;
20681 + for (; addr < end; addr += PMD_SIZE) {
20682 + pgd = pgd_offset_k(addr);
20683 + pud = pud_offset(pgd, addr);
20684 + pmd = pmd_offset(pud, addr);
20685 + if (!pmd_present(*pmd))
20686 + continue;
20687 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
20688 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
20689 + }
20690 +#endif
20691 +
20692 + flush_tlb_all();
20693 +#endif
20694 +
20695 free_init_pages("unused kernel memory",
20696 (unsigned long)(&__init_begin),
20697 (unsigned long)(&__init_end));
20698 diff -urNp linux-3.0.7/arch/x86/mm/iomap_32.c linux-3.0.7/arch/x86/mm/iomap_32.c
20699 --- linux-3.0.7/arch/x86/mm/iomap_32.c 2011-07-21 22:17:23.000000000 -0400
20700 +++ linux-3.0.7/arch/x86/mm/iomap_32.c 2011-08-23 21:47:55.000000000 -0400
20701 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
20702 type = kmap_atomic_idx_push();
20703 idx = type + KM_TYPE_NR * smp_processor_id();
20704 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
20705 +
20706 + pax_open_kernel();
20707 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
20708 + pax_close_kernel();
20709 +
20710 arch_flush_lazy_mmu_mode();
20711
20712 return (void *)vaddr;
20713 diff -urNp linux-3.0.7/arch/x86/mm/ioremap.c linux-3.0.7/arch/x86/mm/ioremap.c
20714 --- linux-3.0.7/arch/x86/mm/ioremap.c 2011-07-21 22:17:23.000000000 -0400
20715 +++ linux-3.0.7/arch/x86/mm/ioremap.c 2011-08-23 21:47:55.000000000 -0400
20716 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(re
20717 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
20718 int is_ram = page_is_ram(pfn);
20719
20720 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
20721 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
20722 return NULL;
20723 WARN_ON_ONCE(is_ram);
20724 }
20725 @@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
20726 early_param("early_ioremap_debug", early_ioremap_debug_setup);
20727
20728 static __initdata int after_paging_init;
20729 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
20730 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
20731
20732 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
20733 {
20734 @@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
20735 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
20736
20737 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
20738 - memset(bm_pte, 0, sizeof(bm_pte));
20739 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
20740 + pmd_populate_user(&init_mm, pmd, bm_pte);
20741
20742 /*
20743 * The boot-ioremap range spans multiple pmds, for which
20744 diff -urNp linux-3.0.7/arch/x86/mm/kmemcheck/kmemcheck.c linux-3.0.7/arch/x86/mm/kmemcheck/kmemcheck.c
20745 --- linux-3.0.7/arch/x86/mm/kmemcheck/kmemcheck.c 2011-07-21 22:17:23.000000000 -0400
20746 +++ linux-3.0.7/arch/x86/mm/kmemcheck/kmemcheck.c 2011-08-23 21:47:55.000000000 -0400
20747 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
20748 * memory (e.g. tracked pages)? For now, we need this to avoid
20749 * invoking kmemcheck for PnP BIOS calls.
20750 */
20751 - if (regs->flags & X86_VM_MASK)
20752 + if (v8086_mode(regs))
20753 return false;
20754 - if (regs->cs != __KERNEL_CS)
20755 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
20756 return false;
20757
20758 pte = kmemcheck_pte_lookup(address);
20759 diff -urNp linux-3.0.7/arch/x86/mm/mmap.c linux-3.0.7/arch/x86/mm/mmap.c
20760 --- linux-3.0.7/arch/x86/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
20761 +++ linux-3.0.7/arch/x86/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
20762 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
20763 * Leave an at least ~128 MB hole with possible stack randomization.
20764 */
20765 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
20766 -#define MAX_GAP (TASK_SIZE/6*5)
20767 +#define MAX_GAP (pax_task_size/6*5)
20768
20769 /*
20770 * True on X86_32 or when emulating IA32 on X86_64
20771 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
20772 return rnd << PAGE_SHIFT;
20773 }
20774
20775 -static unsigned long mmap_base(void)
20776 +static unsigned long mmap_base(struct mm_struct *mm)
20777 {
20778 unsigned long gap = rlimit(RLIMIT_STACK);
20779 + unsigned long pax_task_size = TASK_SIZE;
20780 +
20781 +#ifdef CONFIG_PAX_SEGMEXEC
20782 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20783 + pax_task_size = SEGMEXEC_TASK_SIZE;
20784 +#endif
20785
20786 if (gap < MIN_GAP)
20787 gap = MIN_GAP;
20788 else if (gap > MAX_GAP)
20789 gap = MAX_GAP;
20790
20791 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
20792 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
20793 }
20794
20795 /*
20796 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
20797 * does, but not when emulating X86_32
20798 */
20799 -static unsigned long mmap_legacy_base(void)
20800 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
20801 {
20802 - if (mmap_is_ia32())
20803 + if (mmap_is_ia32()) {
20804 +
20805 +#ifdef CONFIG_PAX_SEGMEXEC
20806 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20807 + return SEGMEXEC_TASK_UNMAPPED_BASE;
20808 + else
20809 +#endif
20810 +
20811 return TASK_UNMAPPED_BASE;
20812 - else
20813 + } else
20814 return TASK_UNMAPPED_BASE + mmap_rnd();
20815 }
20816
20817 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
20818 void arch_pick_mmap_layout(struct mm_struct *mm)
20819 {
20820 if (mmap_is_legacy()) {
20821 - mm->mmap_base = mmap_legacy_base();
20822 + mm->mmap_base = mmap_legacy_base(mm);
20823 +
20824 +#ifdef CONFIG_PAX_RANDMMAP
20825 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20826 + mm->mmap_base += mm->delta_mmap;
20827 +#endif
20828 +
20829 mm->get_unmapped_area = arch_get_unmapped_area;
20830 mm->unmap_area = arch_unmap_area;
20831 } else {
20832 - mm->mmap_base = mmap_base();
20833 + mm->mmap_base = mmap_base(mm);
20834 +
20835 +#ifdef CONFIG_PAX_RANDMMAP
20836 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20837 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
20838 +#endif
20839 +
20840 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
20841 mm->unmap_area = arch_unmap_area_topdown;
20842 }
20843 diff -urNp linux-3.0.7/arch/x86/mm/mmio-mod.c linux-3.0.7/arch/x86/mm/mmio-mod.c
20844 --- linux-3.0.7/arch/x86/mm/mmio-mod.c 2011-07-21 22:17:23.000000000 -0400
20845 +++ linux-3.0.7/arch/x86/mm/mmio-mod.c 2011-08-23 21:47:55.000000000 -0400
20846 @@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
20847 break;
20848 default:
20849 {
20850 - unsigned char *ip = (unsigned char *)instptr;
20851 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
20852 my_trace->opcode = MMIO_UNKNOWN_OP;
20853 my_trace->width = 0;
20854 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
20855 @@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
20856 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
20857 void __iomem *addr)
20858 {
20859 - static atomic_t next_id;
20860 + static atomic_unchecked_t next_id;
20861 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
20862 /* These are page-unaligned. */
20863 struct mmiotrace_map map = {
20864 @@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
20865 .private = trace
20866 },
20867 .phys = offset,
20868 - .id = atomic_inc_return(&next_id)
20869 + .id = atomic_inc_return_unchecked(&next_id)
20870 };
20871 map.map_id = trace->id;
20872
20873 diff -urNp linux-3.0.7/arch/x86/mm/pageattr.c linux-3.0.7/arch/x86/mm/pageattr.c
20874 --- linux-3.0.7/arch/x86/mm/pageattr.c 2011-07-21 22:17:23.000000000 -0400
20875 +++ linux-3.0.7/arch/x86/mm/pageattr.c 2011-08-23 21:47:55.000000000 -0400
20876 @@ -261,7 +261,7 @@ static inline pgprot_t static_protection
20877 */
20878 #ifdef CONFIG_PCI_BIOS
20879 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
20880 - pgprot_val(forbidden) |= _PAGE_NX;
20881 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20882 #endif
20883
20884 /*
20885 @@ -269,9 +269,10 @@ static inline pgprot_t static_protection
20886 * Does not cover __inittext since that is gone later on. On
20887 * 64bit we do not enforce !NX on the low mapping
20888 */
20889 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
20890 - pgprot_val(forbidden) |= _PAGE_NX;
20891 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
20892 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20893
20894 +#ifdef CONFIG_DEBUG_RODATA
20895 /*
20896 * The .rodata section needs to be read-only. Using the pfn
20897 * catches all aliases.
20898 @@ -279,6 +280,7 @@ static inline pgprot_t static_protection
20899 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
20900 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
20901 pgprot_val(forbidden) |= _PAGE_RW;
20902 +#endif
20903
20904 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
20905 /*
20906 @@ -317,6 +319,13 @@ static inline pgprot_t static_protection
20907 }
20908 #endif
20909
20910 +#ifdef CONFIG_PAX_KERNEXEC
20911 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
20912 + pgprot_val(forbidden) |= _PAGE_RW;
20913 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20914 + }
20915 +#endif
20916 +
20917 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
20918
20919 return prot;
20920 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
20921 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
20922 {
20923 /* change init_mm */
20924 + pax_open_kernel();
20925 set_pte_atomic(kpte, pte);
20926 +
20927 #ifdef CONFIG_X86_32
20928 if (!SHARED_KERNEL_PMD) {
20929 +
20930 +#ifdef CONFIG_PAX_PER_CPU_PGD
20931 + unsigned long cpu;
20932 +#else
20933 struct page *page;
20934 +#endif
20935
20936 +#ifdef CONFIG_PAX_PER_CPU_PGD
20937 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20938 + pgd_t *pgd = get_cpu_pgd(cpu);
20939 +#else
20940 list_for_each_entry(page, &pgd_list, lru) {
20941 - pgd_t *pgd;
20942 + pgd_t *pgd = (pgd_t *)page_address(page);
20943 +#endif
20944 +
20945 pud_t *pud;
20946 pmd_t *pmd;
20947
20948 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
20949 + pgd += pgd_index(address);
20950 pud = pud_offset(pgd, address);
20951 pmd = pmd_offset(pud, address);
20952 set_pte_atomic((pte_t *)pmd, pte);
20953 }
20954 }
20955 #endif
20956 + pax_close_kernel();
20957 }
20958
20959 static int
20960 diff -urNp linux-3.0.7/arch/x86/mm/pageattr-test.c linux-3.0.7/arch/x86/mm/pageattr-test.c
20961 --- linux-3.0.7/arch/x86/mm/pageattr-test.c 2011-07-21 22:17:23.000000000 -0400
20962 +++ linux-3.0.7/arch/x86/mm/pageattr-test.c 2011-08-23 21:47:55.000000000 -0400
20963 @@ -36,7 +36,7 @@ enum {
20964
20965 static int pte_testbit(pte_t pte)
20966 {
20967 - return pte_flags(pte) & _PAGE_UNUSED1;
20968 + return pte_flags(pte) & _PAGE_CPA_TEST;
20969 }
20970
20971 struct split_state {
20972 diff -urNp linux-3.0.7/arch/x86/mm/pat.c linux-3.0.7/arch/x86/mm/pat.c
20973 --- linux-3.0.7/arch/x86/mm/pat.c 2011-07-21 22:17:23.000000000 -0400
20974 +++ linux-3.0.7/arch/x86/mm/pat.c 2011-08-23 21:47:55.000000000 -0400
20975 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
20976
20977 if (!entry) {
20978 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
20979 - current->comm, current->pid, start, end);
20980 + current->comm, task_pid_nr(current), start, end);
20981 return -EINVAL;
20982 }
20983
20984 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
20985 while (cursor < to) {
20986 if (!devmem_is_allowed(pfn)) {
20987 printk(KERN_INFO
20988 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
20989 - current->comm, from, to);
20990 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
20991 + current->comm, from, to, cursor);
20992 return 0;
20993 }
20994 cursor += PAGE_SIZE;
20995 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
20996 printk(KERN_INFO
20997 "%s:%d ioremap_change_attr failed %s "
20998 "for %Lx-%Lx\n",
20999 - current->comm, current->pid,
21000 + current->comm, task_pid_nr(current),
21001 cattr_name(flags),
21002 base, (unsigned long long)(base + size));
21003 return -EINVAL;
21004 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
21005 if (want_flags != flags) {
21006 printk(KERN_WARNING
21007 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
21008 - current->comm, current->pid,
21009 + current->comm, task_pid_nr(current),
21010 cattr_name(want_flags),
21011 (unsigned long long)paddr,
21012 (unsigned long long)(paddr + size),
21013 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
21014 free_memtype(paddr, paddr + size);
21015 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
21016 " for %Lx-%Lx, got %s\n",
21017 - current->comm, current->pid,
21018 + current->comm, task_pid_nr(current),
21019 cattr_name(want_flags),
21020 (unsigned long long)paddr,
21021 (unsigned long long)(paddr + size),
21022 diff -urNp linux-3.0.7/arch/x86/mm/pf_in.c linux-3.0.7/arch/x86/mm/pf_in.c
21023 --- linux-3.0.7/arch/x86/mm/pf_in.c 2011-07-21 22:17:23.000000000 -0400
21024 +++ linux-3.0.7/arch/x86/mm/pf_in.c 2011-08-23 21:47:55.000000000 -0400
21025 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
21026 int i;
21027 enum reason_type rv = OTHERS;
21028
21029 - p = (unsigned char *)ins_addr;
21030 + p = (unsigned char *)ktla_ktva(ins_addr);
21031 p += skip_prefix(p, &prf);
21032 p += get_opcode(p, &opcode);
21033
21034 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
21035 struct prefix_bits prf;
21036 int i;
21037
21038 - p = (unsigned char *)ins_addr;
21039 + p = (unsigned char *)ktla_ktva(ins_addr);
21040 p += skip_prefix(p, &prf);
21041 p += get_opcode(p, &opcode);
21042
21043 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
21044 struct prefix_bits prf;
21045 int i;
21046
21047 - p = (unsigned char *)ins_addr;
21048 + p = (unsigned char *)ktla_ktva(ins_addr);
21049 p += skip_prefix(p, &prf);
21050 p += get_opcode(p, &opcode);
21051
21052 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned l
21053 struct prefix_bits prf;
21054 int i;
21055
21056 - p = (unsigned char *)ins_addr;
21057 + p = (unsigned char *)ktla_ktva(ins_addr);
21058 p += skip_prefix(p, &prf);
21059 p += get_opcode(p, &opcode);
21060 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
21061 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned l
21062 struct prefix_bits prf;
21063 int i;
21064
21065 - p = (unsigned char *)ins_addr;
21066 + p = (unsigned char *)ktla_ktva(ins_addr);
21067 p += skip_prefix(p, &prf);
21068 p += get_opcode(p, &opcode);
21069 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
21070 diff -urNp linux-3.0.7/arch/x86/mm/pgtable_32.c linux-3.0.7/arch/x86/mm/pgtable_32.c
21071 --- linux-3.0.7/arch/x86/mm/pgtable_32.c 2011-07-21 22:17:23.000000000 -0400
21072 +++ linux-3.0.7/arch/x86/mm/pgtable_32.c 2011-08-23 21:47:55.000000000 -0400
21073 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
21074 return;
21075 }
21076 pte = pte_offset_kernel(pmd, vaddr);
21077 +
21078 + pax_open_kernel();
21079 if (pte_val(pteval))
21080 set_pte_at(&init_mm, vaddr, pte, pteval);
21081 else
21082 pte_clear(&init_mm, vaddr, pte);
21083 + pax_close_kernel();
21084
21085 /*
21086 * It's enough to flush this one mapping.
21087 diff -urNp linux-3.0.7/arch/x86/mm/pgtable.c linux-3.0.7/arch/x86/mm/pgtable.c
21088 --- linux-3.0.7/arch/x86/mm/pgtable.c 2011-07-21 22:17:23.000000000 -0400
21089 +++ linux-3.0.7/arch/x86/mm/pgtable.c 2011-08-23 21:47:55.000000000 -0400
21090 @@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
21091 list_del(&page->lru);
21092 }
21093
21094 -#define UNSHARED_PTRS_PER_PGD \
21095 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21096 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21097 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
21098
21099 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21100 +{
21101 + while (count--)
21102 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
21103 +}
21104 +#endif
21105 +
21106 +#ifdef CONFIG_PAX_PER_CPU_PGD
21107 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21108 +{
21109 + while (count--)
21110 +
21111 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21112 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
21113 +#else
21114 + *dst++ = *src++;
21115 +#endif
21116
21117 +}
21118 +#endif
21119 +
21120 +#ifdef CONFIG_X86_64
21121 +#define pxd_t pud_t
21122 +#define pyd_t pgd_t
21123 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
21124 +#define pxd_free(mm, pud) pud_free((mm), (pud))
21125 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
21126 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
21127 +#define PYD_SIZE PGDIR_SIZE
21128 +#else
21129 +#define pxd_t pmd_t
21130 +#define pyd_t pud_t
21131 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
21132 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
21133 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
21134 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
21135 +#define PYD_SIZE PUD_SIZE
21136 +#endif
21137 +
21138 +#ifdef CONFIG_PAX_PER_CPU_PGD
21139 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
21140 +static inline void pgd_dtor(pgd_t *pgd) {}
21141 +#else
21142 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
21143 {
21144 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
21145 @@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
21146 pgd_list_del(pgd);
21147 spin_unlock(&pgd_lock);
21148 }
21149 +#endif
21150
21151 /*
21152 * List of all pgd's needed for non-PAE so it can invalidate entries
21153 @@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
21154 * -- wli
21155 */
21156
21157 -#ifdef CONFIG_X86_PAE
21158 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21159 /*
21160 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
21161 * updating the top-level pagetable entries to guarantee the
21162 @@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
21163 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
21164 * and initialize the kernel pmds here.
21165 */
21166 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
21167 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21168
21169 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
21170 {
21171 @@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
21172 */
21173 flush_tlb_mm(mm);
21174 }
21175 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
21176 +#define PREALLOCATED_PXDS USER_PGD_PTRS
21177 #else /* !CONFIG_X86_PAE */
21178
21179 /* No need to prepopulate any pagetable entries in non-PAE modes. */
21180 -#define PREALLOCATED_PMDS 0
21181 +#define PREALLOCATED_PXDS 0
21182
21183 #endif /* CONFIG_X86_PAE */
21184
21185 -static void free_pmds(pmd_t *pmds[])
21186 +static void free_pxds(pxd_t *pxds[])
21187 {
21188 int i;
21189
21190 - for(i = 0; i < PREALLOCATED_PMDS; i++)
21191 - if (pmds[i])
21192 - free_page((unsigned long)pmds[i]);
21193 + for(i = 0; i < PREALLOCATED_PXDS; i++)
21194 + if (pxds[i])
21195 + free_page((unsigned long)pxds[i]);
21196 }
21197
21198 -static int preallocate_pmds(pmd_t *pmds[])
21199 +static int preallocate_pxds(pxd_t *pxds[])
21200 {
21201 int i;
21202 bool failed = false;
21203
21204 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
21205 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
21206 - if (pmd == NULL)
21207 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
21208 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
21209 + if (pxd == NULL)
21210 failed = true;
21211 - pmds[i] = pmd;
21212 + pxds[i] = pxd;
21213 }
21214
21215 if (failed) {
21216 - free_pmds(pmds);
21217 + free_pxds(pxds);
21218 return -ENOMEM;
21219 }
21220
21221 @@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
21222 * preallocate which never got a corresponding vma will need to be
21223 * freed manually.
21224 */
21225 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
21226 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
21227 {
21228 int i;
21229
21230 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
21231 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
21232 pgd_t pgd = pgdp[i];
21233
21234 if (pgd_val(pgd) != 0) {
21235 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
21236 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
21237
21238 - pgdp[i] = native_make_pgd(0);
21239 + set_pgd(pgdp + i, native_make_pgd(0));
21240
21241 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
21242 - pmd_free(mm, pmd);
21243 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
21244 + pxd_free(mm, pxd);
21245 }
21246 }
21247 }
21248
21249 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
21250 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
21251 {
21252 - pud_t *pud;
21253 + pyd_t *pyd;
21254 unsigned long addr;
21255 int i;
21256
21257 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
21258 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
21259 return;
21260
21261 - pud = pud_offset(pgd, 0);
21262 +#ifdef CONFIG_X86_64
21263 + pyd = pyd_offset(mm, 0L);
21264 +#else
21265 + pyd = pyd_offset(pgd, 0L);
21266 +#endif
21267
21268 - for (addr = i = 0; i < PREALLOCATED_PMDS;
21269 - i++, pud++, addr += PUD_SIZE) {
21270 - pmd_t *pmd = pmds[i];
21271 + for (addr = i = 0; i < PREALLOCATED_PXDS;
21272 + i++, pyd++, addr += PYD_SIZE) {
21273 + pxd_t *pxd = pxds[i];
21274
21275 if (i >= KERNEL_PGD_BOUNDARY)
21276 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21277 - sizeof(pmd_t) * PTRS_PER_PMD);
21278 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21279 + sizeof(pxd_t) * PTRS_PER_PMD);
21280
21281 - pud_populate(mm, pud, pmd);
21282 + pyd_populate(mm, pyd, pxd);
21283 }
21284 }
21285
21286 pgd_t *pgd_alloc(struct mm_struct *mm)
21287 {
21288 pgd_t *pgd;
21289 - pmd_t *pmds[PREALLOCATED_PMDS];
21290 + pxd_t *pxds[PREALLOCATED_PXDS];
21291
21292 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
21293
21294 @@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21295
21296 mm->pgd = pgd;
21297
21298 - if (preallocate_pmds(pmds) != 0)
21299 + if (preallocate_pxds(pxds) != 0)
21300 goto out_free_pgd;
21301
21302 if (paravirt_pgd_alloc(mm) != 0)
21303 - goto out_free_pmds;
21304 + goto out_free_pxds;
21305
21306 /*
21307 * Make sure that pre-populating the pmds is atomic with
21308 @@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21309 spin_lock(&pgd_lock);
21310
21311 pgd_ctor(mm, pgd);
21312 - pgd_prepopulate_pmd(mm, pgd, pmds);
21313 + pgd_prepopulate_pxd(mm, pgd, pxds);
21314
21315 spin_unlock(&pgd_lock);
21316
21317 return pgd;
21318
21319 -out_free_pmds:
21320 - free_pmds(pmds);
21321 +out_free_pxds:
21322 + free_pxds(pxds);
21323 out_free_pgd:
21324 free_page((unsigned long)pgd);
21325 out:
21326 @@ -295,7 +344,7 @@ out:
21327
21328 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
21329 {
21330 - pgd_mop_up_pmds(mm, pgd);
21331 + pgd_mop_up_pxds(mm, pgd);
21332 pgd_dtor(pgd);
21333 paravirt_pgd_free(mm, pgd);
21334 free_page((unsigned long)pgd);
21335 diff -urNp linux-3.0.7/arch/x86/mm/setup_nx.c linux-3.0.7/arch/x86/mm/setup_nx.c
21336 --- linux-3.0.7/arch/x86/mm/setup_nx.c 2011-07-21 22:17:23.000000000 -0400
21337 +++ linux-3.0.7/arch/x86/mm/setup_nx.c 2011-08-23 21:47:55.000000000 -0400
21338 @@ -5,8 +5,10 @@
21339 #include <asm/pgtable.h>
21340 #include <asm/proto.h>
21341
21342 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21343 static int disable_nx __cpuinitdata;
21344
21345 +#ifndef CONFIG_PAX_PAGEEXEC
21346 /*
21347 * noexec = on|off
21348 *
21349 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
21350 return 0;
21351 }
21352 early_param("noexec", noexec_setup);
21353 +#endif
21354 +
21355 +#endif
21356
21357 void __cpuinit x86_configure_nx(void)
21358 {
21359 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21360 if (cpu_has_nx && !disable_nx)
21361 __supported_pte_mask |= _PAGE_NX;
21362 else
21363 +#endif
21364 __supported_pte_mask &= ~_PAGE_NX;
21365 }
21366
21367 diff -urNp linux-3.0.7/arch/x86/mm/tlb.c linux-3.0.7/arch/x86/mm/tlb.c
21368 --- linux-3.0.7/arch/x86/mm/tlb.c 2011-07-21 22:17:23.000000000 -0400
21369 +++ linux-3.0.7/arch/x86/mm/tlb.c 2011-08-23 21:47:55.000000000 -0400
21370 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
21371 BUG();
21372 cpumask_clear_cpu(cpu,
21373 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
21374 +
21375 +#ifndef CONFIG_PAX_PER_CPU_PGD
21376 load_cr3(swapper_pg_dir);
21377 +#endif
21378 +
21379 }
21380 EXPORT_SYMBOL_GPL(leave_mm);
21381
21382 diff -urNp linux-3.0.7/arch/x86/net/bpf_jit_comp.c linux-3.0.7/arch/x86/net/bpf_jit_comp.c
21383 --- linux-3.0.7/arch/x86/net/bpf_jit_comp.c 2011-07-21 22:17:23.000000000 -0400
21384 +++ linux-3.0.7/arch/x86/net/bpf_jit_comp.c 2011-08-23 21:47:55.000000000 -0400
21385 @@ -589,7 +589,9 @@ cond_branch: f_offset = addrs[i + filt
21386 module_free(NULL, image);
21387 return;
21388 }
21389 + pax_open_kernel();
21390 memcpy(image + proglen, temp, ilen);
21391 + pax_close_kernel();
21392 }
21393 proglen += ilen;
21394 addrs[i] = proglen;
21395 @@ -609,7 +611,7 @@ cond_branch: f_offset = addrs[i + filt
21396 break;
21397 }
21398 if (proglen == oldproglen) {
21399 - image = module_alloc(max_t(unsigned int,
21400 + image = module_alloc_exec(max_t(unsigned int,
21401 proglen,
21402 sizeof(struct work_struct)));
21403 if (!image)
21404 diff -urNp linux-3.0.7/arch/x86/net/bpf_jit.S linux-3.0.7/arch/x86/net/bpf_jit.S
21405 --- linux-3.0.7/arch/x86/net/bpf_jit.S 2011-07-21 22:17:23.000000000 -0400
21406 +++ linux-3.0.7/arch/x86/net/bpf_jit.S 2011-10-07 19:07:28.000000000 -0400
21407 @@ -9,6 +9,7 @@
21408 */
21409 #include <linux/linkage.h>
21410 #include <asm/dwarf2.h>
21411 +#include <asm/alternative-asm.h>
21412
21413 /*
21414 * Calling convention :
21415 @@ -35,6 +36,7 @@ sk_load_word:
21416 jle bpf_slow_path_word
21417 mov (SKBDATA,%rsi),%eax
21418 bswap %eax /* ntohl() */
21419 + pax_force_retaddr
21420 ret
21421
21422
21423 @@ -53,6 +55,7 @@ sk_load_half:
21424 jle bpf_slow_path_half
21425 movzwl (SKBDATA,%rsi),%eax
21426 rol $8,%ax # ntohs()
21427 + pax_force_retaddr
21428 ret
21429
21430 sk_load_byte_ind:
21431 @@ -66,6 +69,7 @@ sk_load_byte:
21432 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
21433 jle bpf_slow_path_byte
21434 movzbl (SKBDATA,%rsi),%eax
21435 + pax_force_retaddr
21436 ret
21437
21438 /**
21439 @@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
21440 movzbl (SKBDATA,%rsi),%ebx
21441 and $15,%bl
21442 shl $2,%bl
21443 + pax_force_retaddr
21444 ret
21445 CFI_ENDPROC
21446 ENDPROC(sk_load_byte_msh)
21447 @@ -91,6 +96,7 @@ bpf_error:
21448 xor %eax,%eax
21449 mov -8(%rbp),%rbx
21450 leaveq
21451 + pax_force_retaddr
21452 ret
21453
21454 /* rsi contains offset and can be scratched */
21455 @@ -113,6 +119,7 @@ bpf_slow_path_word:
21456 js bpf_error
21457 mov -12(%rbp),%eax
21458 bswap %eax
21459 + pax_force_retaddr
21460 ret
21461
21462 bpf_slow_path_half:
21463 @@ -121,12 +128,14 @@ bpf_slow_path_half:
21464 mov -12(%rbp),%ax
21465 rol $8,%ax
21466 movzwl %ax,%eax
21467 + pax_force_retaddr
21468 ret
21469
21470 bpf_slow_path_byte:
21471 bpf_slow_path_common(1)
21472 js bpf_error
21473 movzbl -12(%rbp),%eax
21474 + pax_force_retaddr
21475 ret
21476
21477 bpf_slow_path_byte_msh:
21478 @@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
21479 and $15,%al
21480 shl $2,%al
21481 xchg %eax,%ebx
21482 + pax_force_retaddr
21483 ret
21484 diff -urNp linux-3.0.7/arch/x86/oprofile/backtrace.c linux-3.0.7/arch/x86/oprofile/backtrace.c
21485 --- linux-3.0.7/arch/x86/oprofile/backtrace.c 2011-09-02 18:11:21.000000000 -0400
21486 +++ linux-3.0.7/arch/x86/oprofile/backtrace.c 2011-10-06 04:17:55.000000000 -0400
21487 @@ -83,11 +83,11 @@ dump_user_backtrace_32(struct stack_fram
21488 struct stack_frame_ia32 *fp;
21489 unsigned long bytes;
21490
21491 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
21492 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
21493 if (bytes != sizeof(bufhead))
21494 return NULL;
21495
21496 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
21497 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
21498
21499 oprofile_add_trace(bufhead[0].return_address);
21500
21501 @@ -129,7 +129,7 @@ static struct stack_frame *dump_user_bac
21502 struct stack_frame bufhead[2];
21503 unsigned long bytes;
21504
21505 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
21506 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
21507 if (bytes != sizeof(bufhead))
21508 return NULL;
21509
21510 @@ -148,7 +148,7 @@ x86_backtrace(struct pt_regs * const reg
21511 {
21512 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
21513
21514 - if (!user_mode_vm(regs)) {
21515 + if (!user_mode(regs)) {
21516 unsigned long stack = kernel_stack_pointer(regs);
21517 if (depth)
21518 dump_trace(NULL, regs, (unsigned long *)stack, 0,
21519 diff -urNp linux-3.0.7/arch/x86/pci/mrst.c linux-3.0.7/arch/x86/pci/mrst.c
21520 --- linux-3.0.7/arch/x86/pci/mrst.c 2011-07-21 22:17:23.000000000 -0400
21521 +++ linux-3.0.7/arch/x86/pci/mrst.c 2011-08-23 21:47:55.000000000 -0400
21522 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
21523 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
21524 pci_mmcfg_late_init();
21525 pcibios_enable_irq = mrst_pci_irq_enable;
21526 - pci_root_ops = pci_mrst_ops;
21527 + pax_open_kernel();
21528 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
21529 + pax_close_kernel();
21530 /* Continue with standard init */
21531 return 1;
21532 }
21533 diff -urNp linux-3.0.7/arch/x86/pci/pcbios.c linux-3.0.7/arch/x86/pci/pcbios.c
21534 --- linux-3.0.7/arch/x86/pci/pcbios.c 2011-07-21 22:17:23.000000000 -0400
21535 +++ linux-3.0.7/arch/x86/pci/pcbios.c 2011-08-23 21:47:55.000000000 -0400
21536 @@ -79,50 +79,93 @@ union bios32 {
21537 static struct {
21538 unsigned long address;
21539 unsigned short segment;
21540 -} bios32_indirect = { 0, __KERNEL_CS };
21541 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
21542
21543 /*
21544 * Returns the entry point for the given service, NULL on error
21545 */
21546
21547 -static unsigned long bios32_service(unsigned long service)
21548 +static unsigned long __devinit bios32_service(unsigned long service)
21549 {
21550 unsigned char return_code; /* %al */
21551 unsigned long address; /* %ebx */
21552 unsigned long length; /* %ecx */
21553 unsigned long entry; /* %edx */
21554 unsigned long flags;
21555 + struct desc_struct d, *gdt;
21556
21557 local_irq_save(flags);
21558 - __asm__("lcall *(%%edi); cld"
21559 +
21560 + gdt = get_cpu_gdt_table(smp_processor_id());
21561 +
21562 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
21563 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
21564 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
21565 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
21566 +
21567 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
21568 : "=a" (return_code),
21569 "=b" (address),
21570 "=c" (length),
21571 "=d" (entry)
21572 : "0" (service),
21573 "1" (0),
21574 - "D" (&bios32_indirect));
21575 + "D" (&bios32_indirect),
21576 + "r"(__PCIBIOS_DS)
21577 + : "memory");
21578 +
21579 + pax_open_kernel();
21580 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
21581 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
21582 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
21583 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
21584 + pax_close_kernel();
21585 +
21586 local_irq_restore(flags);
21587
21588 switch (return_code) {
21589 - case 0:
21590 - return address + entry;
21591 - case 0x80: /* Not present */
21592 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
21593 - return 0;
21594 - default: /* Shouldn't happen */
21595 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
21596 - service, return_code);
21597 + case 0: {
21598 + int cpu;
21599 + unsigned char flags;
21600 +
21601 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
21602 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
21603 + printk(KERN_WARNING "bios32_service: not valid\n");
21604 return 0;
21605 + }
21606 + address = address + PAGE_OFFSET;
21607 + length += 16UL; /* some BIOSs underreport this... */
21608 + flags = 4;
21609 + if (length >= 64*1024*1024) {
21610 + length >>= PAGE_SHIFT;
21611 + flags |= 8;
21612 + }
21613 +
21614 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
21615 + gdt = get_cpu_gdt_table(cpu);
21616 + pack_descriptor(&d, address, length, 0x9b, flags);
21617 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
21618 + pack_descriptor(&d, address, length, 0x93, flags);
21619 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
21620 + }
21621 + return entry;
21622 + }
21623 + case 0x80: /* Not present */
21624 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
21625 + return 0;
21626 + default: /* Shouldn't happen */
21627 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
21628 + service, return_code);
21629 + return 0;
21630 }
21631 }
21632
21633 static struct {
21634 unsigned long address;
21635 unsigned short segment;
21636 -} pci_indirect = { 0, __KERNEL_CS };
21637 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
21638
21639 -static int pci_bios_present;
21640 +static int pci_bios_present __read_only;
21641
21642 static int __devinit check_pcibios(void)
21643 {
21644 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
21645 unsigned long flags, pcibios_entry;
21646
21647 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
21648 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
21649 + pci_indirect.address = pcibios_entry;
21650
21651 local_irq_save(flags);
21652 - __asm__(
21653 - "lcall *(%%edi); cld\n\t"
21654 + __asm__("movw %w6, %%ds\n\t"
21655 + "lcall *%%ss:(%%edi); cld\n\t"
21656 + "push %%ss\n\t"
21657 + "pop %%ds\n\t"
21658 "jc 1f\n\t"
21659 "xor %%ah, %%ah\n"
21660 "1:"
21661 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
21662 "=b" (ebx),
21663 "=c" (ecx)
21664 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
21665 - "D" (&pci_indirect)
21666 + "D" (&pci_indirect),
21667 + "r" (__PCIBIOS_DS)
21668 : "memory");
21669 local_irq_restore(flags);
21670
21671 @@ -188,7 +234,10 @@ static int pci_bios_read(unsigned int se
21672
21673 switch (len) {
21674 case 1:
21675 - __asm__("lcall *(%%esi); cld\n\t"
21676 + __asm__("movw %w6, %%ds\n\t"
21677 + "lcall *%%ss:(%%esi); cld\n\t"
21678 + "push %%ss\n\t"
21679 + "pop %%ds\n\t"
21680 "jc 1f\n\t"
21681 "xor %%ah, %%ah\n"
21682 "1:"
21683 @@ -197,7 +246,8 @@ static int pci_bios_read(unsigned int se
21684 : "1" (PCIBIOS_READ_CONFIG_BYTE),
21685 "b" (bx),
21686 "D" ((long)reg),
21687 - "S" (&pci_indirect));
21688 + "S" (&pci_indirect),
21689 + "r" (__PCIBIOS_DS));
21690 /*
21691 * Zero-extend the result beyond 8 bits, do not trust the
21692 * BIOS having done it:
21693 @@ -205,7 +255,10 @@ static int pci_bios_read(unsigned int se
21694 *value &= 0xff;
21695 break;
21696 case 2:
21697 - __asm__("lcall *(%%esi); cld\n\t"
21698 + __asm__("movw %w6, %%ds\n\t"
21699 + "lcall *%%ss:(%%esi); cld\n\t"
21700 + "push %%ss\n\t"
21701 + "pop %%ds\n\t"
21702 "jc 1f\n\t"
21703 "xor %%ah, %%ah\n"
21704 "1:"
21705 @@ -214,7 +267,8 @@ static int pci_bios_read(unsigned int se
21706 : "1" (PCIBIOS_READ_CONFIG_WORD),
21707 "b" (bx),
21708 "D" ((long)reg),
21709 - "S" (&pci_indirect));
21710 + "S" (&pci_indirect),
21711 + "r" (__PCIBIOS_DS));
21712 /*
21713 * Zero-extend the result beyond 16 bits, do not trust the
21714 * BIOS having done it:
21715 @@ -222,7 +276,10 @@ static int pci_bios_read(unsigned int se
21716 *value &= 0xffff;
21717 break;
21718 case 4:
21719 - __asm__("lcall *(%%esi); cld\n\t"
21720 + __asm__("movw %w6, %%ds\n\t"
21721 + "lcall *%%ss:(%%esi); cld\n\t"
21722 + "push %%ss\n\t"
21723 + "pop %%ds\n\t"
21724 "jc 1f\n\t"
21725 "xor %%ah, %%ah\n"
21726 "1:"
21727 @@ -231,7 +288,8 @@ static int pci_bios_read(unsigned int se
21728 : "1" (PCIBIOS_READ_CONFIG_DWORD),
21729 "b" (bx),
21730 "D" ((long)reg),
21731 - "S" (&pci_indirect));
21732 + "S" (&pci_indirect),
21733 + "r" (__PCIBIOS_DS));
21734 break;
21735 }
21736
21737 @@ -254,7 +312,10 @@ static int pci_bios_write(unsigned int s
21738
21739 switch (len) {
21740 case 1:
21741 - __asm__("lcall *(%%esi); cld\n\t"
21742 + __asm__("movw %w6, %%ds\n\t"
21743 + "lcall *%%ss:(%%esi); cld\n\t"
21744 + "push %%ss\n\t"
21745 + "pop %%ds\n\t"
21746 "jc 1f\n\t"
21747 "xor %%ah, %%ah\n"
21748 "1:"
21749 @@ -263,10 +324,14 @@ static int pci_bios_write(unsigned int s
21750 "c" (value),
21751 "b" (bx),
21752 "D" ((long)reg),
21753 - "S" (&pci_indirect));
21754 + "S" (&pci_indirect),
21755 + "r" (__PCIBIOS_DS));
21756 break;
21757 case 2:
21758 - __asm__("lcall *(%%esi); cld\n\t"
21759 + __asm__("movw %w6, %%ds\n\t"
21760 + "lcall *%%ss:(%%esi); cld\n\t"
21761 + "push %%ss\n\t"
21762 + "pop %%ds\n\t"
21763 "jc 1f\n\t"
21764 "xor %%ah, %%ah\n"
21765 "1:"
21766 @@ -275,10 +340,14 @@ static int pci_bios_write(unsigned int s
21767 "c" (value),
21768 "b" (bx),
21769 "D" ((long)reg),
21770 - "S" (&pci_indirect));
21771 + "S" (&pci_indirect),
21772 + "r" (__PCIBIOS_DS));
21773 break;
21774 case 4:
21775 - __asm__("lcall *(%%esi); cld\n\t"
21776 + __asm__("movw %w6, %%ds\n\t"
21777 + "lcall *%%ss:(%%esi); cld\n\t"
21778 + "push %%ss\n\t"
21779 + "pop %%ds\n\t"
21780 "jc 1f\n\t"
21781 "xor %%ah, %%ah\n"
21782 "1:"
21783 @@ -287,7 +356,8 @@ static int pci_bios_write(unsigned int s
21784 "c" (value),
21785 "b" (bx),
21786 "D" ((long)reg),
21787 - "S" (&pci_indirect));
21788 + "S" (&pci_indirect),
21789 + "r" (__PCIBIOS_DS));
21790 break;
21791 }
21792
21793 @@ -392,10 +462,13 @@ struct irq_routing_table * pcibios_get_i
21794
21795 DBG("PCI: Fetching IRQ routing table... ");
21796 __asm__("push %%es\n\t"
21797 + "movw %w8, %%ds\n\t"
21798 "push %%ds\n\t"
21799 "pop %%es\n\t"
21800 - "lcall *(%%esi); cld\n\t"
21801 + "lcall *%%ss:(%%esi); cld\n\t"
21802 "pop %%es\n\t"
21803 + "push %%ss\n\t"
21804 + "pop %%ds\n"
21805 "jc 1f\n\t"
21806 "xor %%ah, %%ah\n"
21807 "1:"
21808 @@ -406,7 +479,8 @@ struct irq_routing_table * pcibios_get_i
21809 "1" (0),
21810 "D" ((long) &opt),
21811 "S" (&pci_indirect),
21812 - "m" (opt)
21813 + "m" (opt),
21814 + "r" (__PCIBIOS_DS)
21815 : "memory");
21816 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
21817 if (ret & 0xff00)
21818 @@ -430,7 +504,10 @@ int pcibios_set_irq_routing(struct pci_d
21819 {
21820 int ret;
21821
21822 - __asm__("lcall *(%%esi); cld\n\t"
21823 + __asm__("movw %w5, %%ds\n\t"
21824 + "lcall *%%ss:(%%esi); cld\n\t"
21825 + "push %%ss\n\t"
21826 + "pop %%ds\n"
21827 "jc 1f\n\t"
21828 "xor %%ah, %%ah\n"
21829 "1:"
21830 @@ -438,7 +515,8 @@ int pcibios_set_irq_routing(struct pci_d
21831 : "0" (PCIBIOS_SET_PCI_HW_INT),
21832 "b" ((dev->bus->number << 8) | dev->devfn),
21833 "c" ((irq << 8) | (pin + 10)),
21834 - "S" (&pci_indirect));
21835 + "S" (&pci_indirect),
21836 + "r" (__PCIBIOS_DS));
21837 return !(ret & 0xff00);
21838 }
21839 EXPORT_SYMBOL(pcibios_set_irq_routing);
21840 diff -urNp linux-3.0.7/arch/x86/platform/efi/efi_32.c linux-3.0.7/arch/x86/platform/efi/efi_32.c
21841 --- linux-3.0.7/arch/x86/platform/efi/efi_32.c 2011-07-21 22:17:23.000000000 -0400
21842 +++ linux-3.0.7/arch/x86/platform/efi/efi_32.c 2011-10-06 04:17:55.000000000 -0400
21843 @@ -38,70 +38,56 @@
21844 */
21845
21846 static unsigned long efi_rt_eflags;
21847 -static pgd_t efi_bak_pg_dir_pointer[2];
21848 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
21849
21850 -void efi_call_phys_prelog(void)
21851 +void __init efi_call_phys_prelog(void)
21852 {
21853 - unsigned long cr4;
21854 - unsigned long temp;
21855 struct desc_ptr gdt_descr;
21856
21857 - local_irq_save(efi_rt_eflags);
21858 +#ifdef CONFIG_PAX_KERNEXEC
21859 + struct desc_struct d;
21860 +#endif
21861
21862 - /*
21863 - * If I don't have PAE, I should just duplicate two entries in page
21864 - * directory. If I have PAE, I just need to duplicate one entry in
21865 - * page directory.
21866 - */
21867 - cr4 = read_cr4_safe();
21868 + local_irq_save(efi_rt_eflags);
21869
21870 - if (cr4 & X86_CR4_PAE) {
21871 - efi_bak_pg_dir_pointer[0].pgd =
21872 - swapper_pg_dir[pgd_index(0)].pgd;
21873 - swapper_pg_dir[0].pgd =
21874 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
21875 - } else {
21876 - efi_bak_pg_dir_pointer[0].pgd =
21877 - swapper_pg_dir[pgd_index(0)].pgd;
21878 - efi_bak_pg_dir_pointer[1].pgd =
21879 - swapper_pg_dir[pgd_index(0x400000)].pgd;
21880 - swapper_pg_dir[pgd_index(0)].pgd =
21881 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
21882 - temp = PAGE_OFFSET + 0x400000;
21883 - swapper_pg_dir[pgd_index(0x400000)].pgd =
21884 - swapper_pg_dir[pgd_index(temp)].pgd;
21885 - }
21886 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
21887 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21888 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
21889
21890 /*
21891 * After the lock is released, the original page table is restored.
21892 */
21893 __flush_tlb_all();
21894
21895 +#ifdef CONFIG_PAX_KERNEXEC
21896 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
21897 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
21898 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
21899 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
21900 +#endif
21901 +
21902 gdt_descr.address = __pa(get_cpu_gdt_table(0));
21903 gdt_descr.size = GDT_SIZE - 1;
21904 load_gdt(&gdt_descr);
21905 }
21906
21907 -void efi_call_phys_epilog(void)
21908 +void __init efi_call_phys_epilog(void)
21909 {
21910 - unsigned long cr4;
21911 struct desc_ptr gdt_descr;
21912
21913 +#ifdef CONFIG_PAX_KERNEXEC
21914 + struct desc_struct d;
21915 +
21916 + memset(&d, 0, sizeof d);
21917 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
21918 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
21919 +#endif
21920 +
21921 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
21922 gdt_descr.size = GDT_SIZE - 1;
21923 load_gdt(&gdt_descr);
21924
21925 - cr4 = read_cr4_safe();
21926 -
21927 - if (cr4 & X86_CR4_PAE) {
21928 - swapper_pg_dir[pgd_index(0)].pgd =
21929 - efi_bak_pg_dir_pointer[0].pgd;
21930 - } else {
21931 - swapper_pg_dir[pgd_index(0)].pgd =
21932 - efi_bak_pg_dir_pointer[0].pgd;
21933 - swapper_pg_dir[pgd_index(0x400000)].pgd =
21934 - efi_bak_pg_dir_pointer[1].pgd;
21935 - }
21936 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
21937
21938 /*
21939 * After the lock is released, the original page table is restored.
21940 diff -urNp linux-3.0.7/arch/x86/platform/efi/efi_stub_32.S linux-3.0.7/arch/x86/platform/efi/efi_stub_32.S
21941 --- linux-3.0.7/arch/x86/platform/efi/efi_stub_32.S 2011-07-21 22:17:23.000000000 -0400
21942 +++ linux-3.0.7/arch/x86/platform/efi/efi_stub_32.S 2011-09-19 09:16:58.000000000 -0400
21943 @@ -6,7 +6,9 @@
21944 */
21945
21946 #include <linux/linkage.h>
21947 +#include <linux/init.h>
21948 #include <asm/page_types.h>
21949 +#include <asm/segment.h>
21950
21951 /*
21952 * efi_call_phys(void *, ...) is a function with variable parameters.
21953 @@ -20,7 +22,7 @@
21954 * service functions will comply with gcc calling convention, too.
21955 */
21956
21957 -.text
21958 +__INIT
21959 ENTRY(efi_call_phys)
21960 /*
21961 * 0. The function can only be called in Linux kernel. So CS has been
21962 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
21963 * The mapping of lower virtual memory has been created in prelog and
21964 * epilog.
21965 */
21966 - movl $1f, %edx
21967 - subl $__PAGE_OFFSET, %edx
21968 - jmp *%edx
21969 + movl $(__KERNEXEC_EFI_DS), %edx
21970 + mov %edx, %ds
21971 + mov %edx, %es
21972 + mov %edx, %ss
21973 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
21974 1:
21975
21976 /*
21977 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
21978 * parameter 2, ..., param n. To make things easy, we save the return
21979 * address of efi_call_phys in a global variable.
21980 */
21981 - popl %edx
21982 - movl %edx, saved_return_addr
21983 - /* get the function pointer into ECX*/
21984 - popl %ecx
21985 - movl %ecx, efi_rt_function_ptr
21986 - movl $2f, %edx
21987 - subl $__PAGE_OFFSET, %edx
21988 - pushl %edx
21989 + popl (saved_return_addr)
21990 + popl (efi_rt_function_ptr)
21991
21992 /*
21993 * 3. Clear PG bit in %CR0.
21994 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
21995 /*
21996 * 5. Call the physical function.
21997 */
21998 - jmp *%ecx
21999 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
22000
22001 -2:
22002 /*
22003 * 6. After EFI runtime service returns, control will return to
22004 * following instruction. We'd better readjust stack pointer first.
22005 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
22006 movl %cr0, %edx
22007 orl $0x80000000, %edx
22008 movl %edx, %cr0
22009 - jmp 1f
22010 -1:
22011 +
22012 /*
22013 * 8. Now restore the virtual mode from flat mode by
22014 * adding EIP with PAGE_OFFSET.
22015 */
22016 - movl $1f, %edx
22017 - jmp *%edx
22018 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
22019 1:
22020 + movl $(__KERNEL_DS), %edx
22021 + mov %edx, %ds
22022 + mov %edx, %es
22023 + mov %edx, %ss
22024
22025 /*
22026 * 9. Balance the stack. And because EAX contain the return value,
22027 * we'd better not clobber it.
22028 */
22029 - leal efi_rt_function_ptr, %edx
22030 - movl (%edx), %ecx
22031 - pushl %ecx
22032 + pushl (efi_rt_function_ptr)
22033
22034 /*
22035 - * 10. Push the saved return address onto the stack and return.
22036 + * 10. Return to the saved return address.
22037 */
22038 - leal saved_return_addr, %edx
22039 - movl (%edx), %ecx
22040 - pushl %ecx
22041 - ret
22042 + jmpl *(saved_return_addr)
22043 ENDPROC(efi_call_phys)
22044 .previous
22045
22046 -.data
22047 +__INITDATA
22048 saved_return_addr:
22049 .long 0
22050 efi_rt_function_ptr:
22051 diff -urNp linux-3.0.7/arch/x86/platform/efi/efi_stub_64.S linux-3.0.7/arch/x86/platform/efi/efi_stub_64.S
22052 --- linux-3.0.7/arch/x86/platform/efi/efi_stub_64.S 2011-07-21 22:17:23.000000000 -0400
22053 +++ linux-3.0.7/arch/x86/platform/efi/efi_stub_64.S 2011-10-06 04:17:55.000000000 -0400
22054 @@ -7,6 +7,7 @@
22055 */
22056
22057 #include <linux/linkage.h>
22058 +#include <asm/alternative-asm.h>
22059
22060 #define SAVE_XMM \
22061 mov %rsp, %rax; \
22062 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
22063 call *%rdi
22064 addq $32, %rsp
22065 RESTORE_XMM
22066 + pax_force_retaddr
22067 ret
22068 ENDPROC(efi_call0)
22069
22070 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
22071 call *%rdi
22072 addq $32, %rsp
22073 RESTORE_XMM
22074 + pax_force_retaddr
22075 ret
22076 ENDPROC(efi_call1)
22077
22078 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
22079 call *%rdi
22080 addq $32, %rsp
22081 RESTORE_XMM
22082 + pax_force_retaddr
22083 ret
22084 ENDPROC(efi_call2)
22085
22086 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
22087 call *%rdi
22088 addq $32, %rsp
22089 RESTORE_XMM
22090 + pax_force_retaddr
22091 ret
22092 ENDPROC(efi_call3)
22093
22094 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
22095 call *%rdi
22096 addq $32, %rsp
22097 RESTORE_XMM
22098 + pax_force_retaddr
22099 ret
22100 ENDPROC(efi_call4)
22101
22102 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
22103 call *%rdi
22104 addq $48, %rsp
22105 RESTORE_XMM
22106 + pax_force_retaddr
22107 ret
22108 ENDPROC(efi_call5)
22109
22110 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
22111 call *%rdi
22112 addq $48, %rsp
22113 RESTORE_XMM
22114 + pax_force_retaddr
22115 ret
22116 ENDPROC(efi_call6)
22117 diff -urNp linux-3.0.7/arch/x86/platform/mrst/mrst.c linux-3.0.7/arch/x86/platform/mrst/mrst.c
22118 --- linux-3.0.7/arch/x86/platform/mrst/mrst.c 2011-07-21 22:17:23.000000000 -0400
22119 +++ linux-3.0.7/arch/x86/platform/mrst/mrst.c 2011-08-23 21:47:55.000000000 -0400
22120 @@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
22121 }
22122
22123 /* Reboot and power off are handled by the SCU on a MID device */
22124 -static void mrst_power_off(void)
22125 +static __noreturn void mrst_power_off(void)
22126 {
22127 intel_scu_ipc_simple_command(0xf1, 1);
22128 + BUG();
22129 }
22130
22131 -static void mrst_reboot(void)
22132 +static __noreturn void mrst_reboot(void)
22133 {
22134 intel_scu_ipc_simple_command(0xf1, 0);
22135 + BUG();
22136 }
22137
22138 /*
22139 diff -urNp linux-3.0.7/arch/x86/platform/uv/tlb_uv.c linux-3.0.7/arch/x86/platform/uv/tlb_uv.c
22140 --- linux-3.0.7/arch/x86/platform/uv/tlb_uv.c 2011-07-21 22:17:23.000000000 -0400
22141 +++ linux-3.0.7/arch/x86/platform/uv/tlb_uv.c 2011-08-23 21:48:14.000000000 -0400
22142 @@ -373,6 +373,8 @@ static void reset_with_ipi(struct bau_ta
22143 cpumask_t mask;
22144 struct reset_args reset_args;
22145
22146 + pax_track_stack();
22147 +
22148 reset_args.sender = sender;
22149 cpus_clear(mask);
22150 /* find a single cpu for each uvhub in this distribution mask */
22151 diff -urNp linux-3.0.7/arch/x86/power/cpu.c linux-3.0.7/arch/x86/power/cpu.c
22152 --- linux-3.0.7/arch/x86/power/cpu.c 2011-07-21 22:17:23.000000000 -0400
22153 +++ linux-3.0.7/arch/x86/power/cpu.c 2011-08-23 21:47:55.000000000 -0400
22154 @@ -130,7 +130,7 @@ static void do_fpu_end(void)
22155 static void fix_processor_context(void)
22156 {
22157 int cpu = smp_processor_id();
22158 - struct tss_struct *t = &per_cpu(init_tss, cpu);
22159 + struct tss_struct *t = init_tss + cpu;
22160
22161 set_tss_desc(cpu, t); /*
22162 * This just modifies memory; should not be
22163 @@ -140,7 +140,9 @@ static void fix_processor_context(void)
22164 */
22165
22166 #ifdef CONFIG_X86_64
22167 + pax_open_kernel();
22168 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
22169 + pax_close_kernel();
22170
22171 syscall_init(); /* This sets MSR_*STAR and related */
22172 #endif
22173 diff -urNp linux-3.0.7/arch/x86/vdso/Makefile linux-3.0.7/arch/x86/vdso/Makefile
22174 --- linux-3.0.7/arch/x86/vdso/Makefile 2011-07-21 22:17:23.000000000 -0400
22175 +++ linux-3.0.7/arch/x86/vdso/Makefile 2011-08-23 21:47:55.000000000 -0400
22176 @@ -136,7 +136,7 @@ quiet_cmd_vdso = VDSO $@
22177 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
22178 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
22179
22180 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22181 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22182 GCOV_PROFILE := n
22183
22184 #
22185 diff -urNp linux-3.0.7/arch/x86/vdso/vdso32-setup.c linux-3.0.7/arch/x86/vdso/vdso32-setup.c
22186 --- linux-3.0.7/arch/x86/vdso/vdso32-setup.c 2011-07-21 22:17:23.000000000 -0400
22187 +++ linux-3.0.7/arch/x86/vdso/vdso32-setup.c 2011-08-23 21:47:55.000000000 -0400
22188 @@ -25,6 +25,7 @@
22189 #include <asm/tlbflush.h>
22190 #include <asm/vdso.h>
22191 #include <asm/proto.h>
22192 +#include <asm/mman.h>
22193
22194 enum {
22195 VDSO_DISABLED = 0,
22196 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
22197 void enable_sep_cpu(void)
22198 {
22199 int cpu = get_cpu();
22200 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
22201 + struct tss_struct *tss = init_tss + cpu;
22202
22203 if (!boot_cpu_has(X86_FEATURE_SEP)) {
22204 put_cpu();
22205 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
22206 gate_vma.vm_start = FIXADDR_USER_START;
22207 gate_vma.vm_end = FIXADDR_USER_END;
22208 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
22209 - gate_vma.vm_page_prot = __P101;
22210 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
22211 /*
22212 * Make sure the vDSO gets into every core dump.
22213 * Dumping its contents makes post-mortem fully interpretable later
22214 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
22215 if (compat)
22216 addr = VDSO_HIGH_BASE;
22217 else {
22218 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
22219 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
22220 if (IS_ERR_VALUE(addr)) {
22221 ret = addr;
22222 goto up_fail;
22223 }
22224 }
22225
22226 - current->mm->context.vdso = (void *)addr;
22227 + current->mm->context.vdso = addr;
22228
22229 if (compat_uses_vma || !compat) {
22230 /*
22231 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
22232 }
22233
22234 current_thread_info()->sysenter_return =
22235 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22236 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22237
22238 up_fail:
22239 if (ret)
22240 - current->mm->context.vdso = NULL;
22241 + current->mm->context.vdso = 0;
22242
22243 up_write(&mm->mmap_sem);
22244
22245 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
22246
22247 const char *arch_vma_name(struct vm_area_struct *vma)
22248 {
22249 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22250 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22251 return "[vdso]";
22252 +
22253 +#ifdef CONFIG_PAX_SEGMEXEC
22254 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
22255 + return "[vdso]";
22256 +#endif
22257 +
22258 return NULL;
22259 }
22260
22261 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
22262 * Check to see if the corresponding task was created in compat vdso
22263 * mode.
22264 */
22265 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
22266 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
22267 return &gate_vma;
22268 return NULL;
22269 }
22270 diff -urNp linux-3.0.7/arch/x86/vdso/vma.c linux-3.0.7/arch/x86/vdso/vma.c
22271 --- linux-3.0.7/arch/x86/vdso/vma.c 2011-07-21 22:17:23.000000000 -0400
22272 +++ linux-3.0.7/arch/x86/vdso/vma.c 2011-08-23 21:47:55.000000000 -0400
22273 @@ -15,18 +15,19 @@
22274 #include <asm/proto.h>
22275 #include <asm/vdso.h>
22276
22277 -unsigned int __read_mostly vdso_enabled = 1;
22278 -
22279 extern char vdso_start[], vdso_end[];
22280 extern unsigned short vdso_sync_cpuid;
22281 +extern char __vsyscall_0;
22282
22283 static struct page **vdso_pages;
22284 +static struct page *vsyscall_page;
22285 static unsigned vdso_size;
22286
22287 static int __init init_vdso_vars(void)
22288 {
22289 - int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
22290 - int i;
22291 + size_t nbytes = vdso_end - vdso_start;
22292 + size_t npages = (nbytes + PAGE_SIZE - 1) / PAGE_SIZE;
22293 + size_t i;
22294
22295 vdso_size = npages << PAGE_SHIFT;
22296 vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
22297 @@ -34,19 +35,19 @@ static int __init init_vdso_vars(void)
22298 goto oom;
22299 for (i = 0; i < npages; i++) {
22300 struct page *p;
22301 - p = alloc_page(GFP_KERNEL);
22302 + p = alloc_page(GFP_KERNEL | __GFP_ZERO);
22303 if (!p)
22304 goto oom;
22305 vdso_pages[i] = p;
22306 - copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
22307 + memcpy(page_address(p), vdso_start + i*PAGE_SIZE, nbytes > PAGE_SIZE ? PAGE_SIZE : nbytes);
22308 + nbytes -= PAGE_SIZE;
22309 }
22310 + vsyscall_page = pfn_to_page((__pa_symbol(&__vsyscall_0)) >> PAGE_SHIFT);
22311
22312 return 0;
22313
22314 oom:
22315 - printk("Cannot allocate vdso\n");
22316 - vdso_enabled = 0;
22317 - return -ENOMEM;
22318 + panic("Cannot allocate vdso\n");
22319 }
22320 subsys_initcall(init_vdso_vars);
22321
22322 @@ -80,37 +81,35 @@ int arch_setup_additional_pages(struct l
22323 unsigned long addr;
22324 int ret;
22325
22326 - if (!vdso_enabled)
22327 - return 0;
22328 -
22329 down_write(&mm->mmap_sem);
22330 - addr = vdso_addr(mm->start_stack, vdso_size);
22331 - addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
22332 + addr = vdso_addr(mm->start_stack, vdso_size + PAGE_SIZE);
22333 + addr = get_unmapped_area(NULL, addr, vdso_size + PAGE_SIZE, 0, 0);
22334 if (IS_ERR_VALUE(addr)) {
22335 ret = addr;
22336 goto up_fail;
22337 }
22338
22339 - current->mm->context.vdso = (void *)addr;
22340 + mm->context.vdso = addr + PAGE_SIZE;
22341
22342 - ret = install_special_mapping(mm, addr, vdso_size,
22343 + ret = install_special_mapping(mm, addr, PAGE_SIZE,
22344 VM_READ|VM_EXEC|
22345 - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
22346 + VM_MAYREAD|VM_MAYEXEC|
22347 VM_ALWAYSDUMP,
22348 - vdso_pages);
22349 + &vsyscall_page);
22350 if (ret) {
22351 - current->mm->context.vdso = NULL;
22352 + mm->context.vdso = 0;
22353 goto up_fail;
22354 }
22355
22356 + ret = install_special_mapping(mm, addr + PAGE_SIZE, vdso_size,
22357 + VM_READ|VM_EXEC|
22358 + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
22359 + VM_ALWAYSDUMP,
22360 + vdso_pages);
22361 + if (ret)
22362 + mm->context.vdso = 0;
22363 +
22364 up_fail:
22365 up_write(&mm->mmap_sem);
22366 return ret;
22367 }
22368 -
22369 -static __init int vdso_setup(char *s)
22370 -{
22371 - vdso_enabled = simple_strtoul(s, NULL, 0);
22372 - return 0;
22373 -}
22374 -__setup("vdso=", vdso_setup);
22375 diff -urNp linux-3.0.7/arch/x86/xen/enlighten.c linux-3.0.7/arch/x86/xen/enlighten.c
22376 --- linux-3.0.7/arch/x86/xen/enlighten.c 2011-09-02 18:11:26.000000000 -0400
22377 +++ linux-3.0.7/arch/x86/xen/enlighten.c 2011-08-29 23:26:21.000000000 -0400
22378 @@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
22379
22380 struct shared_info xen_dummy_shared_info;
22381
22382 -void *xen_initial_gdt;
22383 -
22384 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
22385 __read_mostly int xen_have_vector_callback;
22386 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
22387 @@ -1010,7 +1008,7 @@ static const struct pv_apic_ops xen_apic
22388 #endif
22389 };
22390
22391 -static void xen_reboot(int reason)
22392 +static __noreturn void xen_reboot(int reason)
22393 {
22394 struct sched_shutdown r = { .reason = reason };
22395
22396 @@ -1018,17 +1016,17 @@ static void xen_reboot(int reason)
22397 BUG();
22398 }
22399
22400 -static void xen_restart(char *msg)
22401 +static __noreturn void xen_restart(char *msg)
22402 {
22403 xen_reboot(SHUTDOWN_reboot);
22404 }
22405
22406 -static void xen_emergency_restart(void)
22407 +static __noreturn void xen_emergency_restart(void)
22408 {
22409 xen_reboot(SHUTDOWN_reboot);
22410 }
22411
22412 -static void xen_machine_halt(void)
22413 +static __noreturn void xen_machine_halt(void)
22414 {
22415 xen_reboot(SHUTDOWN_poweroff);
22416 }
22417 @@ -1134,7 +1132,17 @@ asmlinkage void __init xen_start_kernel(
22418 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
22419
22420 /* Work out if we support NX */
22421 - x86_configure_nx();
22422 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22423 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
22424 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
22425 + unsigned l, h;
22426 +
22427 + __supported_pte_mask |= _PAGE_NX;
22428 + rdmsr(MSR_EFER, l, h);
22429 + l |= EFER_NX;
22430 + wrmsr(MSR_EFER, l, h);
22431 + }
22432 +#endif
22433
22434 xen_setup_features();
22435
22436 @@ -1165,13 +1173,6 @@ asmlinkage void __init xen_start_kernel(
22437
22438 machine_ops = xen_machine_ops;
22439
22440 - /*
22441 - * The only reliable way to retain the initial address of the
22442 - * percpu gdt_page is to remember it here, so we can go and
22443 - * mark it RW later, when the initial percpu area is freed.
22444 - */
22445 - xen_initial_gdt = &per_cpu(gdt_page, 0);
22446 -
22447 xen_smp_init();
22448
22449 #ifdef CONFIG_ACPI_NUMA
22450 diff -urNp linux-3.0.7/arch/x86/xen/mmu.c linux-3.0.7/arch/x86/xen/mmu.c
22451 --- linux-3.0.7/arch/x86/xen/mmu.c 2011-09-02 18:11:26.000000000 -0400
22452 +++ linux-3.0.7/arch/x86/xen/mmu.c 2011-08-29 23:26:21.000000000 -0400
22453 @@ -1683,6 +1683,8 @@ pgd_t * __init xen_setup_kernel_pagetabl
22454 convert_pfn_mfn(init_level4_pgt);
22455 convert_pfn_mfn(level3_ident_pgt);
22456 convert_pfn_mfn(level3_kernel_pgt);
22457 + convert_pfn_mfn(level3_vmalloc_pgt);
22458 + convert_pfn_mfn(level3_vmemmap_pgt);
22459
22460 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
22461 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
22462 @@ -1701,7 +1703,10 @@ pgd_t * __init xen_setup_kernel_pagetabl
22463 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
22464 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
22465 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
22466 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
22467 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
22468 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
22469 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
22470 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
22471 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
22472
22473 @@ -1913,6 +1918,7 @@ static void __init xen_post_allocator_in
22474 pv_mmu_ops.set_pud = xen_set_pud;
22475 #if PAGETABLE_LEVELS == 4
22476 pv_mmu_ops.set_pgd = xen_set_pgd;
22477 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
22478 #endif
22479
22480 /* This will work as long as patching hasn't happened yet
22481 @@ -1994,6 +2000,7 @@ static const struct pv_mmu_ops xen_mmu_o
22482 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
22483 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
22484 .set_pgd = xen_set_pgd_hyper,
22485 + .set_pgd_batched = xen_set_pgd_hyper,
22486
22487 .alloc_pud = xen_alloc_pmd_init,
22488 .release_pud = xen_release_pmd_init,
22489 diff -urNp linux-3.0.7/arch/x86/xen/smp.c linux-3.0.7/arch/x86/xen/smp.c
22490 --- linux-3.0.7/arch/x86/xen/smp.c 2011-10-16 21:54:53.000000000 -0400
22491 +++ linux-3.0.7/arch/x86/xen/smp.c 2011-10-16 21:55:27.000000000 -0400
22492 @@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_
22493 {
22494 BUG_ON(smp_processor_id() != 0);
22495 native_smp_prepare_boot_cpu();
22496 -
22497 - /* We've switched to the "real" per-cpu gdt, so make sure the
22498 - old memory can be recycled */
22499 - make_lowmem_page_readwrite(xen_initial_gdt);
22500 -
22501 xen_filter_cpu_maps();
22502 xen_setup_vcpu_info_placement();
22503 }
22504 @@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu,
22505 gdt = get_cpu_gdt_table(cpu);
22506
22507 ctxt->flags = VGCF_IN_KERNEL;
22508 - ctxt->user_regs.ds = __USER_DS;
22509 - ctxt->user_regs.es = __USER_DS;
22510 + ctxt->user_regs.ds = __KERNEL_DS;
22511 + ctxt->user_regs.es = __KERNEL_DS;
22512 ctxt->user_regs.ss = __KERNEL_DS;
22513 #ifdef CONFIG_X86_32
22514 ctxt->user_regs.fs = __KERNEL_PERCPU;
22515 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
22516 + savesegment(gs, ctxt->user_regs.gs);
22517 #else
22518 ctxt->gs_base_kernel = per_cpu_offset(cpu);
22519 #endif
22520 @@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned
22521 int rc;
22522
22523 per_cpu(current_task, cpu) = idle;
22524 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
22525 #ifdef CONFIG_X86_32
22526 irq_ctx_init(cpu);
22527 #else
22528 clear_tsk_thread_flag(idle, TIF_FORK);
22529 - per_cpu(kernel_stack, cpu) =
22530 - (unsigned long)task_stack_page(idle) -
22531 - KERNEL_STACK_OFFSET + THREAD_SIZE;
22532 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
22533 #endif
22534 xen_setup_runstate_info(cpu);
22535 xen_setup_timer(cpu);
22536 diff -urNp linux-3.0.7/arch/x86/xen/xen-asm_32.S linux-3.0.7/arch/x86/xen/xen-asm_32.S
22537 --- linux-3.0.7/arch/x86/xen/xen-asm_32.S 2011-10-16 21:54:53.000000000 -0400
22538 +++ linux-3.0.7/arch/x86/xen/xen-asm_32.S 2011-10-16 21:55:27.000000000 -0400
22539 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
22540 ESP_OFFSET=4 # bytes pushed onto stack
22541
22542 /*
22543 - * Store vcpu_info pointer for easy access. Do it this way to
22544 - * avoid having to reload %fs
22545 + * Store vcpu_info pointer for easy access.
22546 */
22547 #ifdef CONFIG_SMP
22548 - GET_THREAD_INFO(%eax)
22549 - movl TI_cpu(%eax), %eax
22550 - movl __per_cpu_offset(,%eax,4), %eax
22551 - mov xen_vcpu(%eax), %eax
22552 + push %fs
22553 + mov $(__KERNEL_PERCPU), %eax
22554 + mov %eax, %fs
22555 + mov PER_CPU_VAR(xen_vcpu), %eax
22556 + pop %fs
22557 #else
22558 movl xen_vcpu, %eax
22559 #endif
22560 diff -urNp linux-3.0.7/arch/x86/xen/xen-head.S linux-3.0.7/arch/x86/xen/xen-head.S
22561 --- linux-3.0.7/arch/x86/xen/xen-head.S 2011-07-21 22:17:23.000000000 -0400
22562 +++ linux-3.0.7/arch/x86/xen/xen-head.S 2011-08-23 21:47:55.000000000 -0400
22563 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
22564 #ifdef CONFIG_X86_32
22565 mov %esi,xen_start_info
22566 mov $init_thread_union+THREAD_SIZE,%esp
22567 +#ifdef CONFIG_SMP
22568 + movl $cpu_gdt_table,%edi
22569 + movl $__per_cpu_load,%eax
22570 + movw %ax,__KERNEL_PERCPU + 2(%edi)
22571 + rorl $16,%eax
22572 + movb %al,__KERNEL_PERCPU + 4(%edi)
22573 + movb %ah,__KERNEL_PERCPU + 7(%edi)
22574 + movl $__per_cpu_end - 1,%eax
22575 + subl $__per_cpu_start,%eax
22576 + movw %ax,__KERNEL_PERCPU + 0(%edi)
22577 +#endif
22578 #else
22579 mov %rsi,xen_start_info
22580 mov $init_thread_union+THREAD_SIZE,%rsp
22581 diff -urNp linux-3.0.7/arch/x86/xen/xen-ops.h linux-3.0.7/arch/x86/xen/xen-ops.h
22582 --- linux-3.0.7/arch/x86/xen/xen-ops.h 2011-09-02 18:11:21.000000000 -0400
22583 +++ linux-3.0.7/arch/x86/xen/xen-ops.h 2011-08-23 21:47:55.000000000 -0400
22584 @@ -10,8 +10,6 @@
22585 extern const char xen_hypervisor_callback[];
22586 extern const char xen_failsafe_callback[];
22587
22588 -extern void *xen_initial_gdt;
22589 -
22590 struct trap_info;
22591 void xen_copy_trap_info(struct trap_info *traps);
22592
22593 diff -urNp linux-3.0.7/block/blk-iopoll.c linux-3.0.7/block/blk-iopoll.c
22594 --- linux-3.0.7/block/blk-iopoll.c 2011-07-21 22:17:23.000000000 -0400
22595 +++ linux-3.0.7/block/blk-iopoll.c 2011-08-23 21:47:55.000000000 -0400
22596 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
22597 }
22598 EXPORT_SYMBOL(blk_iopoll_complete);
22599
22600 -static void blk_iopoll_softirq(struct softirq_action *h)
22601 +static void blk_iopoll_softirq(void)
22602 {
22603 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
22604 int rearm = 0, budget = blk_iopoll_budget;
22605 diff -urNp linux-3.0.7/block/blk-map.c linux-3.0.7/block/blk-map.c
22606 --- linux-3.0.7/block/blk-map.c 2011-07-21 22:17:23.000000000 -0400
22607 +++ linux-3.0.7/block/blk-map.c 2011-08-23 21:47:55.000000000 -0400
22608 @@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
22609 if (!len || !kbuf)
22610 return -EINVAL;
22611
22612 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
22613 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
22614 if (do_copy)
22615 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
22616 else
22617 diff -urNp linux-3.0.7/block/blk-softirq.c linux-3.0.7/block/blk-softirq.c
22618 --- linux-3.0.7/block/blk-softirq.c 2011-07-21 22:17:23.000000000 -0400
22619 +++ linux-3.0.7/block/blk-softirq.c 2011-08-23 21:47:55.000000000 -0400
22620 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
22621 * Softirq action handler - move entries to local list and loop over them
22622 * while passing them to the queue registered handler.
22623 */
22624 -static void blk_done_softirq(struct softirq_action *h)
22625 +static void blk_done_softirq(void)
22626 {
22627 struct list_head *cpu_list, local_list;
22628
22629 diff -urNp linux-3.0.7/block/bsg.c linux-3.0.7/block/bsg.c
22630 --- linux-3.0.7/block/bsg.c 2011-07-21 22:17:23.000000000 -0400
22631 +++ linux-3.0.7/block/bsg.c 2011-10-06 04:17:55.000000000 -0400
22632 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
22633 struct sg_io_v4 *hdr, struct bsg_device *bd,
22634 fmode_t has_write_perm)
22635 {
22636 + unsigned char tmpcmd[sizeof(rq->__cmd)];
22637 + unsigned char *cmdptr;
22638 +
22639 if (hdr->request_len > BLK_MAX_CDB) {
22640 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
22641 if (!rq->cmd)
22642 return -ENOMEM;
22643 - }
22644 + cmdptr = rq->cmd;
22645 + } else
22646 + cmdptr = tmpcmd;
22647
22648 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
22649 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
22650 hdr->request_len))
22651 return -EFAULT;
22652
22653 + if (cmdptr != rq->cmd)
22654 + memcpy(rq->cmd, cmdptr, hdr->request_len);
22655 +
22656 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
22657 if (blk_verify_command(rq->cmd, has_write_perm))
22658 return -EPERM;
22659 @@ -249,7 +257,7 @@ bsg_map_hdr(struct bsg_device *bd, struc
22660 struct request *rq, *next_rq = NULL;
22661 int ret, rw;
22662 unsigned int dxfer_len;
22663 - void *dxferp = NULL;
22664 + void __user *dxferp = NULL;
22665 struct bsg_class_device *bcd = &q->bsg_dev;
22666
22667 /* if the LLD has been removed then the bsg_unregister_queue will
22668 @@ -291,7 +299,7 @@ bsg_map_hdr(struct bsg_device *bd, struc
22669 rq->next_rq = next_rq;
22670 next_rq->cmd_type = rq->cmd_type;
22671
22672 - dxferp = (void*)(unsigned long)hdr->din_xferp;
22673 + dxferp = (void __user *)(unsigned long)hdr->din_xferp;
22674 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
22675 hdr->din_xfer_len, GFP_KERNEL);
22676 if (ret)
22677 @@ -300,10 +308,10 @@ bsg_map_hdr(struct bsg_device *bd, struc
22678
22679 if (hdr->dout_xfer_len) {
22680 dxfer_len = hdr->dout_xfer_len;
22681 - dxferp = (void*)(unsigned long)hdr->dout_xferp;
22682 + dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
22683 } else if (hdr->din_xfer_len) {
22684 dxfer_len = hdr->din_xfer_len;
22685 - dxferp = (void*)(unsigned long)hdr->din_xferp;
22686 + dxferp = (void __user *)(unsigned long)hdr->din_xferp;
22687 } else
22688 dxfer_len = 0;
22689
22690 @@ -445,7 +453,7 @@ static int blk_complete_sgv4_hdr_rq(stru
22691 int len = min_t(unsigned int, hdr->max_response_len,
22692 rq->sense_len);
22693
22694 - ret = copy_to_user((void*)(unsigned long)hdr->response,
22695 + ret = copy_to_user((void __user *)(unsigned long)hdr->response,
22696 rq->sense, len);
22697 if (!ret)
22698 hdr->response_len = len;
22699 diff -urNp linux-3.0.7/block/compat_ioctl.c linux-3.0.7/block/compat_ioctl.c
22700 --- linux-3.0.7/block/compat_ioctl.c 2011-07-21 22:17:23.000000000 -0400
22701 +++ linux-3.0.7/block/compat_ioctl.c 2011-10-06 04:17:55.000000000 -0400
22702 @@ -354,7 +354,7 @@ static int compat_fd_ioctl(struct block_
22703 err |= __get_user(f->spec1, &uf->spec1);
22704 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
22705 err |= __get_user(name, &uf->name);
22706 - f->name = compat_ptr(name);
22707 + f->name = (void __force_kernel *)compat_ptr(name);
22708 if (err) {
22709 err = -EFAULT;
22710 goto out;
22711 diff -urNp linux-3.0.7/block/scsi_ioctl.c linux-3.0.7/block/scsi_ioctl.c
22712 --- linux-3.0.7/block/scsi_ioctl.c 2011-07-21 22:17:23.000000000 -0400
22713 +++ linux-3.0.7/block/scsi_ioctl.c 2011-08-23 21:47:55.000000000 -0400
22714 @@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
22715 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
22716 struct sg_io_hdr *hdr, fmode_t mode)
22717 {
22718 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
22719 + unsigned char tmpcmd[sizeof(rq->__cmd)];
22720 + unsigned char *cmdptr;
22721 +
22722 + if (rq->cmd != rq->__cmd)
22723 + cmdptr = rq->cmd;
22724 + else
22725 + cmdptr = tmpcmd;
22726 +
22727 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
22728 return -EFAULT;
22729 +
22730 + if (cmdptr != rq->cmd)
22731 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
22732 +
22733 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
22734 return -EPERM;
22735
22736 @@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
22737 int err;
22738 unsigned int in_len, out_len, bytes, opcode, cmdlen;
22739 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
22740 + unsigned char tmpcmd[sizeof(rq->__cmd)];
22741 + unsigned char *cmdptr;
22742
22743 if (!sic)
22744 return -EINVAL;
22745 @@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
22746 */
22747 err = -EFAULT;
22748 rq->cmd_len = cmdlen;
22749 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
22750 +
22751 + if (rq->cmd != rq->__cmd)
22752 + cmdptr = rq->cmd;
22753 + else
22754 + cmdptr = tmpcmd;
22755 +
22756 + if (copy_from_user(cmdptr, sic->data, cmdlen))
22757 goto error;
22758
22759 + if (rq->cmd != cmdptr)
22760 + memcpy(rq->cmd, cmdptr, cmdlen);
22761 +
22762 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
22763 goto error;
22764
22765 diff -urNp linux-3.0.7/crypto/cryptd.c linux-3.0.7/crypto/cryptd.c
22766 --- linux-3.0.7/crypto/cryptd.c 2011-07-21 22:17:23.000000000 -0400
22767 +++ linux-3.0.7/crypto/cryptd.c 2011-08-23 21:47:55.000000000 -0400
22768 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
22769
22770 struct cryptd_blkcipher_request_ctx {
22771 crypto_completion_t complete;
22772 -};
22773 +} __no_const;
22774
22775 struct cryptd_hash_ctx {
22776 struct crypto_shash *child;
22777 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
22778
22779 struct cryptd_aead_request_ctx {
22780 crypto_completion_t complete;
22781 -};
22782 +} __no_const;
22783
22784 static void cryptd_queue_worker(struct work_struct *work);
22785
22786 diff -urNp linux-3.0.7/crypto/gf128mul.c linux-3.0.7/crypto/gf128mul.c
22787 --- linux-3.0.7/crypto/gf128mul.c 2011-07-21 22:17:23.000000000 -0400
22788 +++ linux-3.0.7/crypto/gf128mul.c 2011-08-23 21:47:55.000000000 -0400
22789 @@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
22790 for (i = 0; i < 7; ++i)
22791 gf128mul_x_lle(&p[i + 1], &p[i]);
22792
22793 - memset(r, 0, sizeof(r));
22794 + memset(r, 0, sizeof(*r));
22795 for (i = 0;;) {
22796 u8 ch = ((u8 *)b)[15 - i];
22797
22798 @@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
22799 for (i = 0; i < 7; ++i)
22800 gf128mul_x_bbe(&p[i + 1], &p[i]);
22801
22802 - memset(r, 0, sizeof(r));
22803 + memset(r, 0, sizeof(*r));
22804 for (i = 0;;) {
22805 u8 ch = ((u8 *)b)[i];
22806
22807 diff -urNp linux-3.0.7/crypto/serpent.c linux-3.0.7/crypto/serpent.c
22808 --- linux-3.0.7/crypto/serpent.c 2011-07-21 22:17:23.000000000 -0400
22809 +++ linux-3.0.7/crypto/serpent.c 2011-08-23 21:48:14.000000000 -0400
22810 @@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
22811 u32 r0,r1,r2,r3,r4;
22812 int i;
22813
22814 + pax_track_stack();
22815 +
22816 /* Copy key, add padding */
22817
22818 for (i = 0; i < keylen; ++i)
22819 diff -urNp linux-3.0.7/Documentation/dontdiff linux-3.0.7/Documentation/dontdiff
22820 --- linux-3.0.7/Documentation/dontdiff 2011-07-21 22:17:23.000000000 -0400
22821 +++ linux-3.0.7/Documentation/dontdiff 2011-10-07 19:07:23.000000000 -0400
22822 @@ -5,6 +5,7 @@
22823 *.cis
22824 *.cpio
22825 *.csp
22826 +*.dbg
22827 *.dsp
22828 *.dvi
22829 *.elf
22830 @@ -48,9 +49,11 @@
22831 *.tab.h
22832 *.tex
22833 *.ver
22834 +*.vim
22835 *.xml
22836 *.xz
22837 *_MODULES
22838 +*_reg_safe.h
22839 *_vga16.c
22840 *~
22841 \#*#
22842 @@ -70,6 +73,7 @@ Kerntypes
22843 Module.markers
22844 Module.symvers
22845 PENDING
22846 +PERF*
22847 SCCS
22848 System.map*
22849 TAGS
22850 @@ -98,6 +102,8 @@ bzImage*
22851 capability_names.h
22852 capflags.c
22853 classlist.h*
22854 +clut_vga16.c
22855 +common-cmds.h
22856 comp*.log
22857 compile.h*
22858 conf
22859 @@ -126,12 +132,14 @@ fore200e_pca_fw.c*
22860 gconf
22861 gconf.glade.h
22862 gen-devlist
22863 +gen-kdb_cmds.c
22864 gen_crc32table
22865 gen_init_cpio
22866 generated
22867 genheaders
22868 genksyms
22869 *_gray256.c
22870 +hash
22871 hpet_example
22872 hugepage-mmap
22873 hugepage-shm
22874 @@ -146,7 +154,6 @@ int32.c
22875 int4.c
22876 int8.c
22877 kallsyms
22878 -kconfig
22879 keywords.c
22880 ksym.c*
22881 ksym.h*
22882 @@ -154,7 +161,6 @@ kxgettext
22883 lkc_defs.h
22884 lex.c
22885 lex.*.c
22886 -linux
22887 logo_*.c
22888 logo_*_clut224.c
22889 logo_*_mono.c
22890 @@ -166,7 +172,6 @@ machtypes.h
22891 map
22892 map_hugetlb
22893 maui_boot.h
22894 -media
22895 mconf
22896 miboot*
22897 mk_elfconfig
22898 @@ -174,6 +179,7 @@ mkboot
22899 mkbugboot
22900 mkcpustr
22901 mkdep
22902 +mkpiggy
22903 mkprep
22904 mkregtable
22905 mktables
22906 @@ -209,6 +215,7 @@ r300_reg_safe.h
22907 r420_reg_safe.h
22908 r600_reg_safe.h
22909 recordmcount
22910 +regdb.c
22911 relocs
22912 rlim_names.h
22913 rn50_reg_safe.h
22914 @@ -219,6 +226,7 @@ setup
22915 setup.bin
22916 setup.elf
22917 sImage
22918 +slabinfo
22919 sm_tbl*
22920 split-include
22921 syscalltab.h
22922 @@ -246,7 +254,9 @@ vmlinux
22923 vmlinux-*
22924 vmlinux.aout
22925 vmlinux.bin.all
22926 +vmlinux.bin.bz2
22927 vmlinux.lds
22928 +vmlinux.relocs
22929 vmlinuz
22930 voffset.h
22931 vsyscall.lds
22932 @@ -254,6 +264,7 @@ vsyscall_32.lds
22933 wanxlfw.inc
22934 uImage
22935 unifdef
22936 +utsrelease.h
22937 wakeup.bin
22938 wakeup.elf
22939 wakeup.lds
22940 diff -urNp linux-3.0.7/Documentation/kernel-parameters.txt linux-3.0.7/Documentation/kernel-parameters.txt
22941 --- linux-3.0.7/Documentation/kernel-parameters.txt 2011-07-21 22:17:23.000000000 -0400
22942 +++ linux-3.0.7/Documentation/kernel-parameters.txt 2011-08-23 21:47:55.000000000 -0400
22943 @@ -1883,6 +1883,13 @@ bytes respectively. Such letter suffixes
22944 the specified number of seconds. This is to be used if
22945 your oopses keep scrolling off the screen.
22946
22947 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
22948 + virtualization environments that don't cope well with the
22949 + expand down segment used by UDEREF on X86-32 or the frequent
22950 + page table updates on X86-64.
22951 +
22952 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
22953 +
22954 pcbit= [HW,ISDN]
22955
22956 pcd. [PARIDE]
22957 diff -urNp linux-3.0.7/drivers/acpi/apei/cper.c linux-3.0.7/drivers/acpi/apei/cper.c
22958 --- linux-3.0.7/drivers/acpi/apei/cper.c 2011-07-21 22:17:23.000000000 -0400
22959 +++ linux-3.0.7/drivers/acpi/apei/cper.c 2011-08-23 21:47:55.000000000 -0400
22960 @@ -38,12 +38,12 @@
22961 */
22962 u64 cper_next_record_id(void)
22963 {
22964 - static atomic64_t seq;
22965 + static atomic64_unchecked_t seq;
22966
22967 - if (!atomic64_read(&seq))
22968 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
22969 + if (!atomic64_read_unchecked(&seq))
22970 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
22971
22972 - return atomic64_inc_return(&seq);
22973 + return atomic64_inc_return_unchecked(&seq);
22974 }
22975 EXPORT_SYMBOL_GPL(cper_next_record_id);
22976
22977 diff -urNp linux-3.0.7/drivers/acpi/ec_sys.c linux-3.0.7/drivers/acpi/ec_sys.c
22978 --- linux-3.0.7/drivers/acpi/ec_sys.c 2011-07-21 22:17:23.000000000 -0400
22979 +++ linux-3.0.7/drivers/acpi/ec_sys.c 2011-08-24 19:06:55.000000000 -0400
22980 @@ -11,6 +11,7 @@
22981 #include <linux/kernel.h>
22982 #include <linux/acpi.h>
22983 #include <linux/debugfs.h>
22984 +#include <asm/uaccess.h>
22985 #include "internal.h"
22986
22987 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
22988 @@ -39,7 +40,7 @@ static ssize_t acpi_ec_read_io(struct fi
22989 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
22990 */
22991 unsigned int size = EC_SPACE_SIZE;
22992 - u8 *data = (u8 *) buf;
22993 + u8 data;
22994 loff_t init_off = *off;
22995 int err = 0;
22996
22997 @@ -52,9 +53,11 @@ static ssize_t acpi_ec_read_io(struct fi
22998 size = count;
22999
23000 while (size) {
23001 - err = ec_read(*off, &data[*off - init_off]);
23002 + err = ec_read(*off, &data);
23003 if (err)
23004 return err;
23005 + if (put_user(data, &buf[*off - init_off]))
23006 + return -EFAULT;
23007 *off += 1;
23008 size--;
23009 }
23010 @@ -70,7 +73,6 @@ static ssize_t acpi_ec_write_io(struct f
23011
23012 unsigned int size = count;
23013 loff_t init_off = *off;
23014 - u8 *data = (u8 *) buf;
23015 int err = 0;
23016
23017 if (*off >= EC_SPACE_SIZE)
23018 @@ -81,7 +83,9 @@ static ssize_t acpi_ec_write_io(struct f
23019 }
23020
23021 while (size) {
23022 - u8 byte_write = data[*off - init_off];
23023 + u8 byte_write;
23024 + if (get_user(byte_write, &buf[*off - init_off]))
23025 + return -EFAULT;
23026 err = ec_write(*off, byte_write);
23027 if (err)
23028 return err;
23029 diff -urNp linux-3.0.7/drivers/acpi/proc.c linux-3.0.7/drivers/acpi/proc.c
23030 --- linux-3.0.7/drivers/acpi/proc.c 2011-07-21 22:17:23.000000000 -0400
23031 +++ linux-3.0.7/drivers/acpi/proc.c 2011-08-23 21:47:55.000000000 -0400
23032 @@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
23033 size_t count, loff_t * ppos)
23034 {
23035 struct list_head *node, *next;
23036 - char strbuf[5];
23037 - char str[5] = "";
23038 - unsigned int len = count;
23039 -
23040 - if (len > 4)
23041 - len = 4;
23042 - if (len < 0)
23043 - return -EFAULT;
23044 + char strbuf[5] = {0};
23045
23046 - if (copy_from_user(strbuf, buffer, len))
23047 + if (count > 4)
23048 + count = 4;
23049 + if (copy_from_user(strbuf, buffer, count))
23050 return -EFAULT;
23051 - strbuf[len] = '\0';
23052 - sscanf(strbuf, "%s", str);
23053 + strbuf[count] = '\0';
23054
23055 mutex_lock(&acpi_device_lock);
23056 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
23057 @@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
23058 if (!dev->wakeup.flags.valid)
23059 continue;
23060
23061 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
23062 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
23063 if (device_can_wakeup(&dev->dev)) {
23064 bool enable = !device_may_wakeup(&dev->dev);
23065 device_set_wakeup_enable(&dev->dev, enable);
23066 diff -urNp linux-3.0.7/drivers/acpi/processor_driver.c linux-3.0.7/drivers/acpi/processor_driver.c
23067 --- linux-3.0.7/drivers/acpi/processor_driver.c 2011-07-21 22:17:23.000000000 -0400
23068 +++ linux-3.0.7/drivers/acpi/processor_driver.c 2011-08-23 21:47:55.000000000 -0400
23069 @@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
23070 return 0;
23071 #endif
23072
23073 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
23074 + BUG_ON(pr->id >= nr_cpu_ids);
23075
23076 /*
23077 * Buggy BIOS check
23078 diff -urNp linux-3.0.7/drivers/ata/libata-core.c linux-3.0.7/drivers/ata/libata-core.c
23079 --- linux-3.0.7/drivers/ata/libata-core.c 2011-07-21 22:17:23.000000000 -0400
23080 +++ linux-3.0.7/drivers/ata/libata-core.c 2011-08-23 21:47:55.000000000 -0400
23081 @@ -4753,7 +4753,7 @@ void ata_qc_free(struct ata_queued_cmd *
23082 struct ata_port *ap;
23083 unsigned int tag;
23084
23085 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23086 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23087 ap = qc->ap;
23088
23089 qc->flags = 0;
23090 @@ -4769,7 +4769,7 @@ void __ata_qc_complete(struct ata_queued
23091 struct ata_port *ap;
23092 struct ata_link *link;
23093
23094 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23095 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23096 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
23097 ap = qc->ap;
23098 link = qc->dev->link;
23099 @@ -5774,6 +5774,7 @@ static void ata_finalize_port_ops(struct
23100 return;
23101
23102 spin_lock(&lock);
23103 + pax_open_kernel();
23104
23105 for (cur = ops->inherits; cur; cur = cur->inherits) {
23106 void **inherit = (void **)cur;
23107 @@ -5787,8 +5788,9 @@ static void ata_finalize_port_ops(struct
23108 if (IS_ERR(*pp))
23109 *pp = NULL;
23110
23111 - ops->inherits = NULL;
23112 + *(struct ata_port_operations **)&ops->inherits = NULL;
23113
23114 + pax_close_kernel();
23115 spin_unlock(&lock);
23116 }
23117
23118 diff -urNp linux-3.0.7/drivers/ata/libata-eh.c linux-3.0.7/drivers/ata/libata-eh.c
23119 --- linux-3.0.7/drivers/ata/libata-eh.c 2011-07-21 22:17:23.000000000 -0400
23120 +++ linux-3.0.7/drivers/ata/libata-eh.c 2011-08-23 21:48:14.000000000 -0400
23121 @@ -2518,6 +2518,8 @@ void ata_eh_report(struct ata_port *ap)
23122 {
23123 struct ata_link *link;
23124
23125 + pax_track_stack();
23126 +
23127 ata_for_each_link(link, ap, HOST_FIRST)
23128 ata_eh_link_report(link);
23129 }
23130 diff -urNp linux-3.0.7/drivers/ata/pata_arasan_cf.c linux-3.0.7/drivers/ata/pata_arasan_cf.c
23131 --- linux-3.0.7/drivers/ata/pata_arasan_cf.c 2011-07-21 22:17:23.000000000 -0400
23132 +++ linux-3.0.7/drivers/ata/pata_arasan_cf.c 2011-08-23 21:47:55.000000000 -0400
23133 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
23134 /* Handle platform specific quirks */
23135 if (pdata->quirk) {
23136 if (pdata->quirk & CF_BROKEN_PIO) {
23137 - ap->ops->set_piomode = NULL;
23138 + pax_open_kernel();
23139 + *(void **)&ap->ops->set_piomode = NULL;
23140 + pax_close_kernel();
23141 ap->pio_mask = 0;
23142 }
23143 if (pdata->quirk & CF_BROKEN_MWDMA)
23144 diff -urNp linux-3.0.7/drivers/atm/adummy.c linux-3.0.7/drivers/atm/adummy.c
23145 --- linux-3.0.7/drivers/atm/adummy.c 2011-07-21 22:17:23.000000000 -0400
23146 +++ linux-3.0.7/drivers/atm/adummy.c 2011-08-23 21:47:55.000000000 -0400
23147 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
23148 vcc->pop(vcc, skb);
23149 else
23150 dev_kfree_skb_any(skb);
23151 - atomic_inc(&vcc->stats->tx);
23152 + atomic_inc_unchecked(&vcc->stats->tx);
23153
23154 return 0;
23155 }
23156 diff -urNp linux-3.0.7/drivers/atm/ambassador.c linux-3.0.7/drivers/atm/ambassador.c
23157 --- linux-3.0.7/drivers/atm/ambassador.c 2011-07-21 22:17:23.000000000 -0400
23158 +++ linux-3.0.7/drivers/atm/ambassador.c 2011-08-23 21:47:55.000000000 -0400
23159 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
23160 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
23161
23162 // VC layer stats
23163 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
23164 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
23165
23166 // free the descriptor
23167 kfree (tx_descr);
23168 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
23169 dump_skb ("<<<", vc, skb);
23170
23171 // VC layer stats
23172 - atomic_inc(&atm_vcc->stats->rx);
23173 + atomic_inc_unchecked(&atm_vcc->stats->rx);
23174 __net_timestamp(skb);
23175 // end of our responsibility
23176 atm_vcc->push (atm_vcc, skb);
23177 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
23178 } else {
23179 PRINTK (KERN_INFO, "dropped over-size frame");
23180 // should we count this?
23181 - atomic_inc(&atm_vcc->stats->rx_drop);
23182 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23183 }
23184
23185 } else {
23186 @@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at
23187 }
23188
23189 if (check_area (skb->data, skb->len)) {
23190 - atomic_inc(&atm_vcc->stats->tx_err);
23191 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
23192 return -ENOMEM; // ?
23193 }
23194
23195 diff -urNp linux-3.0.7/drivers/atm/atmtcp.c linux-3.0.7/drivers/atm/atmtcp.c
23196 --- linux-3.0.7/drivers/atm/atmtcp.c 2011-07-21 22:17:23.000000000 -0400
23197 +++ linux-3.0.7/drivers/atm/atmtcp.c 2011-08-23 21:47:55.000000000 -0400
23198 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
23199 if (vcc->pop) vcc->pop(vcc,skb);
23200 else dev_kfree_skb(skb);
23201 if (dev_data) return 0;
23202 - atomic_inc(&vcc->stats->tx_err);
23203 + atomic_inc_unchecked(&vcc->stats->tx_err);
23204 return -ENOLINK;
23205 }
23206 size = skb->len+sizeof(struct atmtcp_hdr);
23207 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
23208 if (!new_skb) {
23209 if (vcc->pop) vcc->pop(vcc,skb);
23210 else dev_kfree_skb(skb);
23211 - atomic_inc(&vcc->stats->tx_err);
23212 + atomic_inc_unchecked(&vcc->stats->tx_err);
23213 return -ENOBUFS;
23214 }
23215 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
23216 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
23217 if (vcc->pop) vcc->pop(vcc,skb);
23218 else dev_kfree_skb(skb);
23219 out_vcc->push(out_vcc,new_skb);
23220 - atomic_inc(&vcc->stats->tx);
23221 - atomic_inc(&out_vcc->stats->rx);
23222 + atomic_inc_unchecked(&vcc->stats->tx);
23223 + atomic_inc_unchecked(&out_vcc->stats->rx);
23224 return 0;
23225 }
23226
23227 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
23228 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
23229 read_unlock(&vcc_sklist_lock);
23230 if (!out_vcc) {
23231 - atomic_inc(&vcc->stats->tx_err);
23232 + atomic_inc_unchecked(&vcc->stats->tx_err);
23233 goto done;
23234 }
23235 skb_pull(skb,sizeof(struct atmtcp_hdr));
23236 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
23237 __net_timestamp(new_skb);
23238 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
23239 out_vcc->push(out_vcc,new_skb);
23240 - atomic_inc(&vcc->stats->tx);
23241 - atomic_inc(&out_vcc->stats->rx);
23242 + atomic_inc_unchecked(&vcc->stats->tx);
23243 + atomic_inc_unchecked(&out_vcc->stats->rx);
23244 done:
23245 if (vcc->pop) vcc->pop(vcc,skb);
23246 else dev_kfree_skb(skb);
23247 diff -urNp linux-3.0.7/drivers/atm/eni.c linux-3.0.7/drivers/atm/eni.c
23248 --- linux-3.0.7/drivers/atm/eni.c 2011-07-21 22:17:23.000000000 -0400
23249 +++ linux-3.0.7/drivers/atm/eni.c 2011-10-11 10:44:33.000000000 -0400
23250 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
23251 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
23252 vcc->dev->number);
23253 length = 0;
23254 - atomic_inc(&vcc->stats->rx_err);
23255 + atomic_inc_unchecked(&vcc->stats->rx_err);
23256 }
23257 else {
23258 length = ATM_CELL_SIZE-1; /* no HEC */
23259 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
23260 size);
23261 }
23262 eff = length = 0;
23263 - atomic_inc(&vcc->stats->rx_err);
23264 + atomic_inc_unchecked(&vcc->stats->rx_err);
23265 }
23266 else {
23267 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
23268 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
23269 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
23270 vcc->dev->number,vcc->vci,length,size << 2,descr);
23271 length = eff = 0;
23272 - atomic_inc(&vcc->stats->rx_err);
23273 + atomic_inc_unchecked(&vcc->stats->rx_err);
23274 }
23275 }
23276 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
23277 @@ -771,7 +771,7 @@ rx_dequeued++;
23278 vcc->push(vcc,skb);
23279 pushed++;
23280 }
23281 - atomic_inc(&vcc->stats->rx);
23282 + atomic_inc_unchecked(&vcc->stats->rx);
23283 }
23284 wake_up(&eni_dev->rx_wait);
23285 }
23286 @@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
23287 PCI_DMA_TODEVICE);
23288 if (vcc->pop) vcc->pop(vcc,skb);
23289 else dev_kfree_skb_irq(skb);
23290 - atomic_inc(&vcc->stats->tx);
23291 + atomic_inc_unchecked(&vcc->stats->tx);
23292 wake_up(&eni_dev->tx_wait);
23293 dma_complete++;
23294 }
23295 @@ -1568,7 +1568,7 @@ tx_complete++;
23296 /*--------------------------------- entries ---------------------------------*/
23297
23298
23299 -static const char *media_name[] __devinitdata = {
23300 +static const char *media_name[] __devinitconst = {
23301 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
23302 "UTP", "05?", "06?", "07?", /* 4- 7 */
23303 "TAXI","09?", "10?", "11?", /* 8-11 */
23304 diff -urNp linux-3.0.7/drivers/atm/firestream.c linux-3.0.7/drivers/atm/firestream.c
23305 --- linux-3.0.7/drivers/atm/firestream.c 2011-07-21 22:17:23.000000000 -0400
23306 +++ linux-3.0.7/drivers/atm/firestream.c 2011-08-23 21:47:55.000000000 -0400
23307 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct
23308 }
23309 }
23310
23311 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
23312 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
23313
23314 fs_dprintk (FS_DEBUG_TXMEM, "i");
23315 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
23316 @@ -816,7 +816,7 @@ static void process_incoming (struct fs_
23317 #endif
23318 skb_put (skb, qe->p1 & 0xffff);
23319 ATM_SKB(skb)->vcc = atm_vcc;
23320 - atomic_inc(&atm_vcc->stats->rx);
23321 + atomic_inc_unchecked(&atm_vcc->stats->rx);
23322 __net_timestamp(skb);
23323 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
23324 atm_vcc->push (atm_vcc, skb);
23325 @@ -837,12 +837,12 @@ static void process_incoming (struct fs_
23326 kfree (pe);
23327 }
23328 if (atm_vcc)
23329 - atomic_inc(&atm_vcc->stats->rx_drop);
23330 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23331 break;
23332 case 0x1f: /* Reassembly abort: no buffers. */
23333 /* Silently increment error counter. */
23334 if (atm_vcc)
23335 - atomic_inc(&atm_vcc->stats->rx_drop);
23336 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23337 break;
23338 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
23339 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
23340 diff -urNp linux-3.0.7/drivers/atm/fore200e.c linux-3.0.7/drivers/atm/fore200e.c
23341 --- linux-3.0.7/drivers/atm/fore200e.c 2011-07-21 22:17:23.000000000 -0400
23342 +++ linux-3.0.7/drivers/atm/fore200e.c 2011-08-23 21:47:55.000000000 -0400
23343 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
23344 #endif
23345 /* check error condition */
23346 if (*entry->status & STATUS_ERROR)
23347 - atomic_inc(&vcc->stats->tx_err);
23348 + atomic_inc_unchecked(&vcc->stats->tx_err);
23349 else
23350 - atomic_inc(&vcc->stats->tx);
23351 + atomic_inc_unchecked(&vcc->stats->tx);
23352 }
23353 }
23354
23355 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
23356 if (skb == NULL) {
23357 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
23358
23359 - atomic_inc(&vcc->stats->rx_drop);
23360 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23361 return -ENOMEM;
23362 }
23363
23364 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
23365
23366 dev_kfree_skb_any(skb);
23367
23368 - atomic_inc(&vcc->stats->rx_drop);
23369 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23370 return -ENOMEM;
23371 }
23372
23373 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
23374
23375 vcc->push(vcc, skb);
23376 - atomic_inc(&vcc->stats->rx);
23377 + atomic_inc_unchecked(&vcc->stats->rx);
23378
23379 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
23380
23381 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
23382 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
23383 fore200e->atm_dev->number,
23384 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
23385 - atomic_inc(&vcc->stats->rx_err);
23386 + atomic_inc_unchecked(&vcc->stats->rx_err);
23387 }
23388 }
23389
23390 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
23391 goto retry_here;
23392 }
23393
23394 - atomic_inc(&vcc->stats->tx_err);
23395 + atomic_inc_unchecked(&vcc->stats->tx_err);
23396
23397 fore200e->tx_sat++;
23398 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
23399 diff -urNp linux-3.0.7/drivers/atm/he.c linux-3.0.7/drivers/atm/he.c
23400 --- linux-3.0.7/drivers/atm/he.c 2011-07-21 22:17:23.000000000 -0400
23401 +++ linux-3.0.7/drivers/atm/he.c 2011-08-23 21:47:55.000000000 -0400
23402 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23403
23404 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
23405 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
23406 - atomic_inc(&vcc->stats->rx_drop);
23407 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23408 goto return_host_buffers;
23409 }
23410
23411 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23412 RBRQ_LEN_ERR(he_dev->rbrq_head)
23413 ? "LEN_ERR" : "",
23414 vcc->vpi, vcc->vci);
23415 - atomic_inc(&vcc->stats->rx_err);
23416 + atomic_inc_unchecked(&vcc->stats->rx_err);
23417 goto return_host_buffers;
23418 }
23419
23420 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23421 vcc->push(vcc, skb);
23422 spin_lock(&he_dev->global_lock);
23423
23424 - atomic_inc(&vcc->stats->rx);
23425 + atomic_inc_unchecked(&vcc->stats->rx);
23426
23427 return_host_buffers:
23428 ++pdus_assembled;
23429 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
23430 tpd->vcc->pop(tpd->vcc, tpd->skb);
23431 else
23432 dev_kfree_skb_any(tpd->skb);
23433 - atomic_inc(&tpd->vcc->stats->tx_err);
23434 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
23435 }
23436 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
23437 return;
23438 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23439 vcc->pop(vcc, skb);
23440 else
23441 dev_kfree_skb_any(skb);
23442 - atomic_inc(&vcc->stats->tx_err);
23443 + atomic_inc_unchecked(&vcc->stats->tx_err);
23444 return -EINVAL;
23445 }
23446
23447 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23448 vcc->pop(vcc, skb);
23449 else
23450 dev_kfree_skb_any(skb);
23451 - atomic_inc(&vcc->stats->tx_err);
23452 + atomic_inc_unchecked(&vcc->stats->tx_err);
23453 return -EINVAL;
23454 }
23455 #endif
23456 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23457 vcc->pop(vcc, skb);
23458 else
23459 dev_kfree_skb_any(skb);
23460 - atomic_inc(&vcc->stats->tx_err);
23461 + atomic_inc_unchecked(&vcc->stats->tx_err);
23462 spin_unlock_irqrestore(&he_dev->global_lock, flags);
23463 return -ENOMEM;
23464 }
23465 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23466 vcc->pop(vcc, skb);
23467 else
23468 dev_kfree_skb_any(skb);
23469 - atomic_inc(&vcc->stats->tx_err);
23470 + atomic_inc_unchecked(&vcc->stats->tx_err);
23471 spin_unlock_irqrestore(&he_dev->global_lock, flags);
23472 return -ENOMEM;
23473 }
23474 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23475 __enqueue_tpd(he_dev, tpd, cid);
23476 spin_unlock_irqrestore(&he_dev->global_lock, flags);
23477
23478 - atomic_inc(&vcc->stats->tx);
23479 + atomic_inc_unchecked(&vcc->stats->tx);
23480
23481 return 0;
23482 }
23483 diff -urNp linux-3.0.7/drivers/atm/horizon.c linux-3.0.7/drivers/atm/horizon.c
23484 --- linux-3.0.7/drivers/atm/horizon.c 2011-07-21 22:17:23.000000000 -0400
23485 +++ linux-3.0.7/drivers/atm/horizon.c 2011-08-23 21:47:55.000000000 -0400
23486 @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev,
23487 {
23488 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
23489 // VC layer stats
23490 - atomic_inc(&vcc->stats->rx);
23491 + atomic_inc_unchecked(&vcc->stats->rx);
23492 __net_timestamp(skb);
23493 // end of our responsibility
23494 vcc->push (vcc, skb);
23495 @@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
23496 dev->tx_iovec = NULL;
23497
23498 // VC layer stats
23499 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
23500 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
23501
23502 // free the skb
23503 hrz_kfree_skb (skb);
23504 diff -urNp linux-3.0.7/drivers/atm/idt77252.c linux-3.0.7/drivers/atm/idt77252.c
23505 --- linux-3.0.7/drivers/atm/idt77252.c 2011-07-21 22:17:23.000000000 -0400
23506 +++ linux-3.0.7/drivers/atm/idt77252.c 2011-08-23 21:47:55.000000000 -0400
23507 @@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str
23508 else
23509 dev_kfree_skb(skb);
23510
23511 - atomic_inc(&vcc->stats->tx);
23512 + atomic_inc_unchecked(&vcc->stats->tx);
23513 }
23514
23515 atomic_dec(&scq->used);
23516 @@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st
23517 if ((sb = dev_alloc_skb(64)) == NULL) {
23518 printk("%s: Can't allocate buffers for aal0.\n",
23519 card->name);
23520 - atomic_add(i, &vcc->stats->rx_drop);
23521 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
23522 break;
23523 }
23524 if (!atm_charge(vcc, sb->truesize)) {
23525 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
23526 card->name);
23527 - atomic_add(i - 1, &vcc->stats->rx_drop);
23528 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
23529 dev_kfree_skb(sb);
23530 break;
23531 }
23532 @@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st
23533 ATM_SKB(sb)->vcc = vcc;
23534 __net_timestamp(sb);
23535 vcc->push(vcc, sb);
23536 - atomic_inc(&vcc->stats->rx);
23537 + atomic_inc_unchecked(&vcc->stats->rx);
23538
23539 cell += ATM_CELL_PAYLOAD;
23540 }
23541 @@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st
23542 "(CDC: %08x)\n",
23543 card->name, len, rpp->len, readl(SAR_REG_CDC));
23544 recycle_rx_pool_skb(card, rpp);
23545 - atomic_inc(&vcc->stats->rx_err);
23546 + atomic_inc_unchecked(&vcc->stats->rx_err);
23547 return;
23548 }
23549 if (stat & SAR_RSQE_CRC) {
23550 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
23551 recycle_rx_pool_skb(card, rpp);
23552 - atomic_inc(&vcc->stats->rx_err);
23553 + atomic_inc_unchecked(&vcc->stats->rx_err);
23554 return;
23555 }
23556 if (skb_queue_len(&rpp->queue) > 1) {
23557 @@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st
23558 RXPRINTK("%s: Can't alloc RX skb.\n",
23559 card->name);
23560 recycle_rx_pool_skb(card, rpp);
23561 - atomic_inc(&vcc->stats->rx_err);
23562 + atomic_inc_unchecked(&vcc->stats->rx_err);
23563 return;
23564 }
23565 if (!atm_charge(vcc, skb->truesize)) {
23566 @@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st
23567 __net_timestamp(skb);
23568
23569 vcc->push(vcc, skb);
23570 - atomic_inc(&vcc->stats->rx);
23571 + atomic_inc_unchecked(&vcc->stats->rx);
23572
23573 return;
23574 }
23575 @@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st
23576 __net_timestamp(skb);
23577
23578 vcc->push(vcc, skb);
23579 - atomic_inc(&vcc->stats->rx);
23580 + atomic_inc_unchecked(&vcc->stats->rx);
23581
23582 if (skb->truesize > SAR_FB_SIZE_3)
23583 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
23584 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
23585 if (vcc->qos.aal != ATM_AAL0) {
23586 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
23587 card->name, vpi, vci);
23588 - atomic_inc(&vcc->stats->rx_drop);
23589 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23590 goto drop;
23591 }
23592
23593 if ((sb = dev_alloc_skb(64)) == NULL) {
23594 printk("%s: Can't allocate buffers for AAL0.\n",
23595 card->name);
23596 - atomic_inc(&vcc->stats->rx_err);
23597 + atomic_inc_unchecked(&vcc->stats->rx_err);
23598 goto drop;
23599 }
23600
23601 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
23602 ATM_SKB(sb)->vcc = vcc;
23603 __net_timestamp(sb);
23604 vcc->push(vcc, sb);
23605 - atomic_inc(&vcc->stats->rx);
23606 + atomic_inc_unchecked(&vcc->stats->rx);
23607
23608 drop:
23609 skb_pull(queue, 64);
23610 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
23611
23612 if (vc == NULL) {
23613 printk("%s: NULL connection in send().\n", card->name);
23614 - atomic_inc(&vcc->stats->tx_err);
23615 + atomic_inc_unchecked(&vcc->stats->tx_err);
23616 dev_kfree_skb(skb);
23617 return -EINVAL;
23618 }
23619 if (!test_bit(VCF_TX, &vc->flags)) {
23620 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
23621 - atomic_inc(&vcc->stats->tx_err);
23622 + atomic_inc_unchecked(&vcc->stats->tx_err);
23623 dev_kfree_skb(skb);
23624 return -EINVAL;
23625 }
23626 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
23627 break;
23628 default:
23629 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
23630 - atomic_inc(&vcc->stats->tx_err);
23631 + atomic_inc_unchecked(&vcc->stats->tx_err);
23632 dev_kfree_skb(skb);
23633 return -EINVAL;
23634 }
23635
23636 if (skb_shinfo(skb)->nr_frags != 0) {
23637 printk("%s: No scatter-gather yet.\n", card->name);
23638 - atomic_inc(&vcc->stats->tx_err);
23639 + atomic_inc_unchecked(&vcc->stats->tx_err);
23640 dev_kfree_skb(skb);
23641 return -EINVAL;
23642 }
23643 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
23644
23645 err = queue_skb(card, vc, skb, oam);
23646 if (err) {
23647 - atomic_inc(&vcc->stats->tx_err);
23648 + atomic_inc_unchecked(&vcc->stats->tx_err);
23649 dev_kfree_skb(skb);
23650 return err;
23651 }
23652 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
23653 skb = dev_alloc_skb(64);
23654 if (!skb) {
23655 printk("%s: Out of memory in send_oam().\n", card->name);
23656 - atomic_inc(&vcc->stats->tx_err);
23657 + atomic_inc_unchecked(&vcc->stats->tx_err);
23658 return -ENOMEM;
23659 }
23660 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
23661 diff -urNp linux-3.0.7/drivers/atm/iphase.c linux-3.0.7/drivers/atm/iphase.c
23662 --- linux-3.0.7/drivers/atm/iphase.c 2011-07-21 22:17:23.000000000 -0400
23663 +++ linux-3.0.7/drivers/atm/iphase.c 2011-08-23 21:47:55.000000000 -0400
23664 @@ -1120,7 +1120,7 @@ static int rx_pkt(struct atm_dev *dev)
23665 status = (u_short) (buf_desc_ptr->desc_mode);
23666 if (status & (RX_CER | RX_PTE | RX_OFL))
23667 {
23668 - atomic_inc(&vcc->stats->rx_err);
23669 + atomic_inc_unchecked(&vcc->stats->rx_err);
23670 IF_ERR(printk("IA: bad packet, dropping it");)
23671 if (status & RX_CER) {
23672 IF_ERR(printk(" cause: packet CRC error\n");)
23673 @@ -1143,7 +1143,7 @@ static int rx_pkt(struct atm_dev *dev)
23674 len = dma_addr - buf_addr;
23675 if (len > iadev->rx_buf_sz) {
23676 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
23677 - atomic_inc(&vcc->stats->rx_err);
23678 + atomic_inc_unchecked(&vcc->stats->rx_err);
23679 goto out_free_desc;
23680 }
23681
23682 @@ -1293,7 +1293,7 @@ static void rx_dle_intr(struct atm_dev *
23683 ia_vcc = INPH_IA_VCC(vcc);
23684 if (ia_vcc == NULL)
23685 {
23686 - atomic_inc(&vcc->stats->rx_err);
23687 + atomic_inc_unchecked(&vcc->stats->rx_err);
23688 dev_kfree_skb_any(skb);
23689 atm_return(vcc, atm_guess_pdu2truesize(len));
23690 goto INCR_DLE;
23691 @@ -1305,7 +1305,7 @@ static void rx_dle_intr(struct atm_dev *
23692 if ((length > iadev->rx_buf_sz) || (length >
23693 (skb->len - sizeof(struct cpcs_trailer))))
23694 {
23695 - atomic_inc(&vcc->stats->rx_err);
23696 + atomic_inc_unchecked(&vcc->stats->rx_err);
23697 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
23698 length, skb->len);)
23699 dev_kfree_skb_any(skb);
23700 @@ -1321,7 +1321,7 @@ static void rx_dle_intr(struct atm_dev *
23701
23702 IF_RX(printk("rx_dle_intr: skb push");)
23703 vcc->push(vcc,skb);
23704 - atomic_inc(&vcc->stats->rx);
23705 + atomic_inc_unchecked(&vcc->stats->rx);
23706 iadev->rx_pkt_cnt++;
23707 }
23708 INCR_DLE:
23709 @@ -2801,15 +2801,15 @@ static int ia_ioctl(struct atm_dev *dev,
23710 {
23711 struct k_sonet_stats *stats;
23712 stats = &PRIV(_ia_dev[board])->sonet_stats;
23713 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
23714 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
23715 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
23716 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
23717 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
23718 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
23719 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
23720 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
23721 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
23722 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
23723 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
23724 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
23725 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
23726 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
23727 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
23728 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
23729 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
23730 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
23731 }
23732 ia_cmds.status = 0;
23733 break;
23734 @@ -2914,7 +2914,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
23735 if ((desc == 0) || (desc > iadev->num_tx_desc))
23736 {
23737 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
23738 - atomic_inc(&vcc->stats->tx);
23739 + atomic_inc_unchecked(&vcc->stats->tx);
23740 if (vcc->pop)
23741 vcc->pop(vcc, skb);
23742 else
23743 @@ -3019,14 +3019,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
23744 ATM_DESC(skb) = vcc->vci;
23745 skb_queue_tail(&iadev->tx_dma_q, skb);
23746
23747 - atomic_inc(&vcc->stats->tx);
23748 + atomic_inc_unchecked(&vcc->stats->tx);
23749 iadev->tx_pkt_cnt++;
23750 /* Increment transaction counter */
23751 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
23752
23753 #if 0
23754 /* add flow control logic */
23755 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
23756 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
23757 if (iavcc->vc_desc_cnt > 10) {
23758 vcc->tx_quota = vcc->tx_quota * 3 / 4;
23759 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
23760 diff -urNp linux-3.0.7/drivers/atm/lanai.c linux-3.0.7/drivers/atm/lanai.c
23761 --- linux-3.0.7/drivers/atm/lanai.c 2011-07-21 22:17:23.000000000 -0400
23762 +++ linux-3.0.7/drivers/atm/lanai.c 2011-08-23 21:47:55.000000000 -0400
23763 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
23764 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
23765 lanai_endtx(lanai, lvcc);
23766 lanai_free_skb(lvcc->tx.atmvcc, skb);
23767 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
23768 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
23769 }
23770
23771 /* Try to fill the buffer - don't call unless there is backlog */
23772 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
23773 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
23774 __net_timestamp(skb);
23775 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
23776 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
23777 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
23778 out:
23779 lvcc->rx.buf.ptr = end;
23780 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
23781 @@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
23782 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
23783 "vcc %d\n", lanai->number, (unsigned int) s, vci);
23784 lanai->stats.service_rxnotaal5++;
23785 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23786 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23787 return 0;
23788 }
23789 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
23790 @@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
23791 int bytes;
23792 read_unlock(&vcc_sklist_lock);
23793 DPRINTK("got trashed rx pdu on vci %d\n", vci);
23794 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23795 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23796 lvcc->stats.x.aal5.service_trash++;
23797 bytes = (SERVICE_GET_END(s) * 16) -
23798 (((unsigned long) lvcc->rx.buf.ptr) -
23799 @@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
23800 }
23801 if (s & SERVICE_STREAM) {
23802 read_unlock(&vcc_sklist_lock);
23803 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23804 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23805 lvcc->stats.x.aal5.service_stream++;
23806 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
23807 "PDU on VCI %d!\n", lanai->number, vci);
23808 @@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
23809 return 0;
23810 }
23811 DPRINTK("got rx crc error on vci %d\n", vci);
23812 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23813 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23814 lvcc->stats.x.aal5.service_rxcrc++;
23815 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
23816 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
23817 diff -urNp linux-3.0.7/drivers/atm/nicstar.c linux-3.0.7/drivers/atm/nicstar.c
23818 --- linux-3.0.7/drivers/atm/nicstar.c 2011-07-21 22:17:23.000000000 -0400
23819 +++ linux-3.0.7/drivers/atm/nicstar.c 2011-08-23 21:47:55.000000000 -0400
23820 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
23821 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
23822 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
23823 card->index);
23824 - atomic_inc(&vcc->stats->tx_err);
23825 + atomic_inc_unchecked(&vcc->stats->tx_err);
23826 dev_kfree_skb_any(skb);
23827 return -EINVAL;
23828 }
23829 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
23830 if (!vc->tx) {
23831 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
23832 card->index);
23833 - atomic_inc(&vcc->stats->tx_err);
23834 + atomic_inc_unchecked(&vcc->stats->tx_err);
23835 dev_kfree_skb_any(skb);
23836 return -EINVAL;
23837 }
23838 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
23839 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
23840 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
23841 card->index);
23842 - atomic_inc(&vcc->stats->tx_err);
23843 + atomic_inc_unchecked(&vcc->stats->tx_err);
23844 dev_kfree_skb_any(skb);
23845 return -EINVAL;
23846 }
23847
23848 if (skb_shinfo(skb)->nr_frags != 0) {
23849 printk("nicstar%d: No scatter-gather yet.\n", card->index);
23850 - atomic_inc(&vcc->stats->tx_err);
23851 + atomic_inc_unchecked(&vcc->stats->tx_err);
23852 dev_kfree_skb_any(skb);
23853 return -EINVAL;
23854 }
23855 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
23856 }
23857
23858 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
23859 - atomic_inc(&vcc->stats->tx_err);
23860 + atomic_inc_unchecked(&vcc->stats->tx_err);
23861 dev_kfree_skb_any(skb);
23862 return -EIO;
23863 }
23864 - atomic_inc(&vcc->stats->tx);
23865 + atomic_inc_unchecked(&vcc->stats->tx);
23866
23867 return 0;
23868 }
23869 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
23870 printk
23871 ("nicstar%d: Can't allocate buffers for aal0.\n",
23872 card->index);
23873 - atomic_add(i, &vcc->stats->rx_drop);
23874 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
23875 break;
23876 }
23877 if (!atm_charge(vcc, sb->truesize)) {
23878 RXPRINTK
23879 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
23880 card->index);
23881 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
23882 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
23883 dev_kfree_skb_any(sb);
23884 break;
23885 }
23886 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
23887 ATM_SKB(sb)->vcc = vcc;
23888 __net_timestamp(sb);
23889 vcc->push(vcc, sb);
23890 - atomic_inc(&vcc->stats->rx);
23891 + atomic_inc_unchecked(&vcc->stats->rx);
23892 cell += ATM_CELL_PAYLOAD;
23893 }
23894
23895 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
23896 if (iovb == NULL) {
23897 printk("nicstar%d: Out of iovec buffers.\n",
23898 card->index);
23899 - atomic_inc(&vcc->stats->rx_drop);
23900 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23901 recycle_rx_buf(card, skb);
23902 return;
23903 }
23904 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
23905 small or large buffer itself. */
23906 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
23907 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
23908 - atomic_inc(&vcc->stats->rx_err);
23909 + atomic_inc_unchecked(&vcc->stats->rx_err);
23910 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
23911 NS_MAX_IOVECS);
23912 NS_PRV_IOVCNT(iovb) = 0;
23913 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
23914 ("nicstar%d: Expected a small buffer, and this is not one.\n",
23915 card->index);
23916 which_list(card, skb);
23917 - atomic_inc(&vcc->stats->rx_err);
23918 + atomic_inc_unchecked(&vcc->stats->rx_err);
23919 recycle_rx_buf(card, skb);
23920 vc->rx_iov = NULL;
23921 recycle_iov_buf(card, iovb);
23922 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
23923 ("nicstar%d: Expected a large buffer, and this is not one.\n",
23924 card->index);
23925 which_list(card, skb);
23926 - atomic_inc(&vcc->stats->rx_err);
23927 + atomic_inc_unchecked(&vcc->stats->rx_err);
23928 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
23929 NS_PRV_IOVCNT(iovb));
23930 vc->rx_iov = NULL;
23931 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
23932 printk(" - PDU size mismatch.\n");
23933 else
23934 printk(".\n");
23935 - atomic_inc(&vcc->stats->rx_err);
23936 + atomic_inc_unchecked(&vcc->stats->rx_err);
23937 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
23938 NS_PRV_IOVCNT(iovb));
23939 vc->rx_iov = NULL;
23940 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
23941 /* skb points to a small buffer */
23942 if (!atm_charge(vcc, skb->truesize)) {
23943 push_rxbufs(card, skb);
23944 - atomic_inc(&vcc->stats->rx_drop);
23945 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23946 } else {
23947 skb_put(skb, len);
23948 dequeue_sm_buf(card, skb);
23949 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
23950 ATM_SKB(skb)->vcc = vcc;
23951 __net_timestamp(skb);
23952 vcc->push(vcc, skb);
23953 - atomic_inc(&vcc->stats->rx);
23954 + atomic_inc_unchecked(&vcc->stats->rx);
23955 }
23956 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
23957 struct sk_buff *sb;
23958 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
23959 if (len <= NS_SMBUFSIZE) {
23960 if (!atm_charge(vcc, sb->truesize)) {
23961 push_rxbufs(card, sb);
23962 - atomic_inc(&vcc->stats->rx_drop);
23963 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23964 } else {
23965 skb_put(sb, len);
23966 dequeue_sm_buf(card, sb);
23967 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
23968 ATM_SKB(sb)->vcc = vcc;
23969 __net_timestamp(sb);
23970 vcc->push(vcc, sb);
23971 - atomic_inc(&vcc->stats->rx);
23972 + atomic_inc_unchecked(&vcc->stats->rx);
23973 }
23974
23975 push_rxbufs(card, skb);
23976 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
23977
23978 if (!atm_charge(vcc, skb->truesize)) {
23979 push_rxbufs(card, skb);
23980 - atomic_inc(&vcc->stats->rx_drop);
23981 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23982 } else {
23983 dequeue_lg_buf(card, skb);
23984 #ifdef NS_USE_DESTRUCTORS
23985 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
23986 ATM_SKB(skb)->vcc = vcc;
23987 __net_timestamp(skb);
23988 vcc->push(vcc, skb);
23989 - atomic_inc(&vcc->stats->rx);
23990 + atomic_inc_unchecked(&vcc->stats->rx);
23991 }
23992
23993 push_rxbufs(card, sb);
23994 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
23995 printk
23996 ("nicstar%d: Out of huge buffers.\n",
23997 card->index);
23998 - atomic_inc(&vcc->stats->rx_drop);
23999 + atomic_inc_unchecked(&vcc->stats->rx_drop);
24000 recycle_iovec_rx_bufs(card,
24001 (struct iovec *)
24002 iovb->data,
24003 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
24004 card->hbpool.count++;
24005 } else
24006 dev_kfree_skb_any(hb);
24007 - atomic_inc(&vcc->stats->rx_drop);
24008 + atomic_inc_unchecked(&vcc->stats->rx_drop);
24009 } else {
24010 /* Copy the small buffer to the huge buffer */
24011 sb = (struct sk_buff *)iov->iov_base;
24012 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
24013 #endif /* NS_USE_DESTRUCTORS */
24014 __net_timestamp(hb);
24015 vcc->push(vcc, hb);
24016 - atomic_inc(&vcc->stats->rx);
24017 + atomic_inc_unchecked(&vcc->stats->rx);
24018 }
24019 }
24020
24021 diff -urNp linux-3.0.7/drivers/atm/solos-pci.c linux-3.0.7/drivers/atm/solos-pci.c
24022 --- linux-3.0.7/drivers/atm/solos-pci.c 2011-07-21 22:17:23.000000000 -0400
24023 +++ linux-3.0.7/drivers/atm/solos-pci.c 2011-08-23 21:48:14.000000000 -0400
24024 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
24025 }
24026 atm_charge(vcc, skb->truesize);
24027 vcc->push(vcc, skb);
24028 - atomic_inc(&vcc->stats->rx);
24029 + atomic_inc_unchecked(&vcc->stats->rx);
24030 break;
24031
24032 case PKT_STATUS:
24033 @@ -899,6 +899,8 @@ static int print_buffer(struct sk_buff *
24034 char msg[500];
24035 char item[10];
24036
24037 + pax_track_stack();
24038 +
24039 len = buf->len;
24040 for (i = 0; i < len; i++){
24041 if(i % 8 == 0)
24042 @@ -1008,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_car
24043 vcc = SKB_CB(oldskb)->vcc;
24044
24045 if (vcc) {
24046 - atomic_inc(&vcc->stats->tx);
24047 + atomic_inc_unchecked(&vcc->stats->tx);
24048 solos_pop(vcc, oldskb);
24049 } else
24050 dev_kfree_skb_irq(oldskb);
24051 diff -urNp linux-3.0.7/drivers/atm/suni.c linux-3.0.7/drivers/atm/suni.c
24052 --- linux-3.0.7/drivers/atm/suni.c 2011-07-21 22:17:23.000000000 -0400
24053 +++ linux-3.0.7/drivers/atm/suni.c 2011-08-23 21:47:55.000000000 -0400
24054 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
24055
24056
24057 #define ADD_LIMITED(s,v) \
24058 - atomic_add((v),&stats->s); \
24059 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
24060 + atomic_add_unchecked((v),&stats->s); \
24061 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
24062
24063
24064 static void suni_hz(unsigned long from_timer)
24065 diff -urNp linux-3.0.7/drivers/atm/uPD98402.c linux-3.0.7/drivers/atm/uPD98402.c
24066 --- linux-3.0.7/drivers/atm/uPD98402.c 2011-07-21 22:17:23.000000000 -0400
24067 +++ linux-3.0.7/drivers/atm/uPD98402.c 2011-08-23 21:47:55.000000000 -0400
24068 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
24069 struct sonet_stats tmp;
24070 int error = 0;
24071
24072 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
24073 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
24074 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
24075 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
24076 if (zero && !error) {
24077 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
24078
24079
24080 #define ADD_LIMITED(s,v) \
24081 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
24082 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
24083 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
24084 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
24085 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
24086 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
24087
24088
24089 static void stat_event(struct atm_dev *dev)
24090 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
24091 if (reason & uPD98402_INT_PFM) stat_event(dev);
24092 if (reason & uPD98402_INT_PCO) {
24093 (void) GET(PCOCR); /* clear interrupt cause */
24094 - atomic_add(GET(HECCT),
24095 + atomic_add_unchecked(GET(HECCT),
24096 &PRIV(dev)->sonet_stats.uncorr_hcs);
24097 }
24098 if ((reason & uPD98402_INT_RFO) &&
24099 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
24100 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
24101 uPD98402_INT_LOS),PIMR); /* enable them */
24102 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
24103 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
24104 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
24105 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
24106 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
24107 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
24108 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
24109 return 0;
24110 }
24111
24112 diff -urNp linux-3.0.7/drivers/atm/zatm.c linux-3.0.7/drivers/atm/zatm.c
24113 --- linux-3.0.7/drivers/atm/zatm.c 2011-07-21 22:17:23.000000000 -0400
24114 +++ linux-3.0.7/drivers/atm/zatm.c 2011-08-23 21:47:55.000000000 -0400
24115 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
24116 }
24117 if (!size) {
24118 dev_kfree_skb_irq(skb);
24119 - if (vcc) atomic_inc(&vcc->stats->rx_err);
24120 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
24121 continue;
24122 }
24123 if (!atm_charge(vcc,skb->truesize)) {
24124 @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
24125 skb->len = size;
24126 ATM_SKB(skb)->vcc = vcc;
24127 vcc->push(vcc,skb);
24128 - atomic_inc(&vcc->stats->rx);
24129 + atomic_inc_unchecked(&vcc->stats->rx);
24130 }
24131 zout(pos & 0xffff,MTA(mbx));
24132 #if 0 /* probably a stupid idea */
24133 @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
24134 skb_queue_head(&zatm_vcc->backlog,skb);
24135 break;
24136 }
24137 - atomic_inc(&vcc->stats->tx);
24138 + atomic_inc_unchecked(&vcc->stats->tx);
24139 wake_up(&zatm_vcc->tx_wait);
24140 }
24141
24142 diff -urNp linux-3.0.7/drivers/base/devtmpfs.c linux-3.0.7/drivers/base/devtmpfs.c
24143 --- linux-3.0.7/drivers/base/devtmpfs.c 2011-07-21 22:17:23.000000000 -0400
24144 +++ linux-3.0.7/drivers/base/devtmpfs.c 2011-10-06 04:17:55.000000000 -0400
24145 @@ -357,7 +357,7 @@ int devtmpfs_mount(const char *mntdir)
24146 if (!dev_mnt)
24147 return 0;
24148
24149 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
24150 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
24151 if (err)
24152 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
24153 else
24154 diff -urNp linux-3.0.7/drivers/base/power/wakeup.c linux-3.0.7/drivers/base/power/wakeup.c
24155 --- linux-3.0.7/drivers/base/power/wakeup.c 2011-07-21 22:17:23.000000000 -0400
24156 +++ linux-3.0.7/drivers/base/power/wakeup.c 2011-08-23 21:47:55.000000000 -0400
24157 @@ -29,14 +29,14 @@ bool events_check_enabled;
24158 * They need to be modified together atomically, so it's better to use one
24159 * atomic variable to hold them both.
24160 */
24161 -static atomic_t combined_event_count = ATOMIC_INIT(0);
24162 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
24163
24164 #define IN_PROGRESS_BITS (sizeof(int) * 4)
24165 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
24166
24167 static void split_counters(unsigned int *cnt, unsigned int *inpr)
24168 {
24169 - unsigned int comb = atomic_read(&combined_event_count);
24170 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
24171
24172 *cnt = (comb >> IN_PROGRESS_BITS);
24173 *inpr = comb & MAX_IN_PROGRESS;
24174 @@ -350,7 +350,7 @@ static void wakeup_source_activate(struc
24175 ws->last_time = ktime_get();
24176
24177 /* Increment the counter of events in progress. */
24178 - atomic_inc(&combined_event_count);
24179 + atomic_inc_unchecked(&combined_event_count);
24180 }
24181
24182 /**
24183 @@ -440,7 +440,7 @@ static void wakeup_source_deactivate(str
24184 * Increment the counter of registered wakeup events and decrement the
24185 * couter of wakeup events in progress simultaneously.
24186 */
24187 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
24188 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
24189 }
24190
24191 /**
24192 diff -urNp linux-3.0.7/drivers/block/cciss.c linux-3.0.7/drivers/block/cciss.c
24193 --- linux-3.0.7/drivers/block/cciss.c 2011-07-21 22:17:23.000000000 -0400
24194 +++ linux-3.0.7/drivers/block/cciss.c 2011-08-23 21:48:14.000000000 -0400
24195 @@ -1179,6 +1179,8 @@ static int cciss_ioctl32_passthru(struct
24196 int err;
24197 u32 cp;
24198
24199 + memset(&arg64, 0, sizeof(arg64));
24200 +
24201 err = 0;
24202 err |=
24203 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
24204 @@ -2986,7 +2988,7 @@ static void start_io(ctlr_info_t *h)
24205 while (!list_empty(&h->reqQ)) {
24206 c = list_entry(h->reqQ.next, CommandList_struct, list);
24207 /* can't do anything if fifo is full */
24208 - if ((h->access.fifo_full(h))) {
24209 + if ((h->access->fifo_full(h))) {
24210 dev_warn(&h->pdev->dev, "fifo full\n");
24211 break;
24212 }
24213 @@ -2996,7 +2998,7 @@ static void start_io(ctlr_info_t *h)
24214 h->Qdepth--;
24215
24216 /* Tell the controller execute command */
24217 - h->access.submit_command(h, c);
24218 + h->access->submit_command(h, c);
24219
24220 /* Put job onto the completed Q */
24221 addQ(&h->cmpQ, c);
24222 @@ -3422,17 +3424,17 @@ startio:
24223
24224 static inline unsigned long get_next_completion(ctlr_info_t *h)
24225 {
24226 - return h->access.command_completed(h);
24227 + return h->access->command_completed(h);
24228 }
24229
24230 static inline int interrupt_pending(ctlr_info_t *h)
24231 {
24232 - return h->access.intr_pending(h);
24233 + return h->access->intr_pending(h);
24234 }
24235
24236 static inline long interrupt_not_for_us(ctlr_info_t *h)
24237 {
24238 - return ((h->access.intr_pending(h) == 0) ||
24239 + return ((h->access->intr_pending(h) == 0) ||
24240 (h->interrupts_enabled == 0));
24241 }
24242
24243 @@ -3465,7 +3467,7 @@ static inline u32 next_command(ctlr_info
24244 u32 a;
24245
24246 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
24247 - return h->access.command_completed(h);
24248 + return h->access->command_completed(h);
24249
24250 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
24251 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
24252 @@ -4020,7 +4022,7 @@ static void __devinit cciss_put_controll
24253 trans_support & CFGTBL_Trans_use_short_tags);
24254
24255 /* Change the access methods to the performant access methods */
24256 - h->access = SA5_performant_access;
24257 + h->access = &SA5_performant_access;
24258 h->transMethod = CFGTBL_Trans_Performant;
24259
24260 return;
24261 @@ -4292,7 +4294,7 @@ static int __devinit cciss_pci_init(ctlr
24262 if (prod_index < 0)
24263 return -ENODEV;
24264 h->product_name = products[prod_index].product_name;
24265 - h->access = *(products[prod_index].access);
24266 + h->access = products[prod_index].access;
24267
24268 if (cciss_board_disabled(h)) {
24269 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
24270 @@ -5002,7 +5004,7 @@ reinit_after_soft_reset:
24271 }
24272
24273 /* make sure the board interrupts are off */
24274 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
24275 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
24276 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
24277 if (rc)
24278 goto clean2;
24279 @@ -5054,7 +5056,7 @@ reinit_after_soft_reset:
24280 * fake ones to scoop up any residual completions.
24281 */
24282 spin_lock_irqsave(&h->lock, flags);
24283 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
24284 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
24285 spin_unlock_irqrestore(&h->lock, flags);
24286 free_irq(h->intr[PERF_MODE_INT], h);
24287 rc = cciss_request_irq(h, cciss_msix_discard_completions,
24288 @@ -5074,9 +5076,9 @@ reinit_after_soft_reset:
24289 dev_info(&h->pdev->dev, "Board READY.\n");
24290 dev_info(&h->pdev->dev,
24291 "Waiting for stale completions to drain.\n");
24292 - h->access.set_intr_mask(h, CCISS_INTR_ON);
24293 + h->access->set_intr_mask(h, CCISS_INTR_ON);
24294 msleep(10000);
24295 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
24296 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
24297
24298 rc = controller_reset_failed(h->cfgtable);
24299 if (rc)
24300 @@ -5099,7 +5101,7 @@ reinit_after_soft_reset:
24301 cciss_scsi_setup(h);
24302
24303 /* Turn the interrupts on so we can service requests */
24304 - h->access.set_intr_mask(h, CCISS_INTR_ON);
24305 + h->access->set_intr_mask(h, CCISS_INTR_ON);
24306
24307 /* Get the firmware version */
24308 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
24309 @@ -5171,7 +5173,7 @@ static void cciss_shutdown(struct pci_de
24310 kfree(flush_buf);
24311 if (return_code != IO_OK)
24312 dev_warn(&h->pdev->dev, "Error flushing cache\n");
24313 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
24314 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
24315 free_irq(h->intr[PERF_MODE_INT], h);
24316 }
24317
24318 diff -urNp linux-3.0.7/drivers/block/cciss.h linux-3.0.7/drivers/block/cciss.h
24319 --- linux-3.0.7/drivers/block/cciss.h 2011-09-02 18:11:21.000000000 -0400
24320 +++ linux-3.0.7/drivers/block/cciss.h 2011-08-23 21:47:55.000000000 -0400
24321 @@ -100,7 +100,7 @@ struct ctlr_info
24322 /* information about each logical volume */
24323 drive_info_struct *drv[CISS_MAX_LUN];
24324
24325 - struct access_method access;
24326 + struct access_method *access;
24327
24328 /* queue and queue Info */
24329 struct list_head reqQ;
24330 diff -urNp linux-3.0.7/drivers/block/cpqarray.c linux-3.0.7/drivers/block/cpqarray.c
24331 --- linux-3.0.7/drivers/block/cpqarray.c 2011-07-21 22:17:23.000000000 -0400
24332 +++ linux-3.0.7/drivers/block/cpqarray.c 2011-08-23 21:48:14.000000000 -0400
24333 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
24334 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
24335 goto Enomem4;
24336 }
24337 - hba[i]->access.set_intr_mask(hba[i], 0);
24338 + hba[i]->access->set_intr_mask(hba[i], 0);
24339 if (request_irq(hba[i]->intr, do_ida_intr,
24340 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
24341 {
24342 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
24343 add_timer(&hba[i]->timer);
24344
24345 /* Enable IRQ now that spinlock and rate limit timer are set up */
24346 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
24347 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
24348
24349 for(j=0; j<NWD; j++) {
24350 struct gendisk *disk = ida_gendisk[i][j];
24351 @@ -694,7 +694,7 @@ DBGINFO(
24352 for(i=0; i<NR_PRODUCTS; i++) {
24353 if (board_id == products[i].board_id) {
24354 c->product_name = products[i].product_name;
24355 - c->access = *(products[i].access);
24356 + c->access = products[i].access;
24357 break;
24358 }
24359 }
24360 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
24361 hba[ctlr]->intr = intr;
24362 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
24363 hba[ctlr]->product_name = products[j].product_name;
24364 - hba[ctlr]->access = *(products[j].access);
24365 + hba[ctlr]->access = products[j].access;
24366 hba[ctlr]->ctlr = ctlr;
24367 hba[ctlr]->board_id = board_id;
24368 hba[ctlr]->pci_dev = NULL; /* not PCI */
24369 @@ -911,6 +911,8 @@ static void do_ida_request(struct reques
24370 struct scatterlist tmp_sg[SG_MAX];
24371 int i, dir, seg;
24372
24373 + pax_track_stack();
24374 +
24375 queue_next:
24376 creq = blk_peek_request(q);
24377 if (!creq)
24378 @@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
24379
24380 while((c = h->reqQ) != NULL) {
24381 /* Can't do anything if we're busy */
24382 - if (h->access.fifo_full(h) == 0)
24383 + if (h->access->fifo_full(h) == 0)
24384 return;
24385
24386 /* Get the first entry from the request Q */
24387 @@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
24388 h->Qdepth--;
24389
24390 /* Tell the controller to do our bidding */
24391 - h->access.submit_command(h, c);
24392 + h->access->submit_command(h, c);
24393
24394 /* Get onto the completion Q */
24395 addQ(&h->cmpQ, c);
24396 @@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq,
24397 unsigned long flags;
24398 __u32 a,a1;
24399
24400 - istat = h->access.intr_pending(h);
24401 + istat = h->access->intr_pending(h);
24402 /* Is this interrupt for us? */
24403 if (istat == 0)
24404 return IRQ_NONE;
24405 @@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq,
24406 */
24407 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
24408 if (istat & FIFO_NOT_EMPTY) {
24409 - while((a = h->access.command_completed(h))) {
24410 + while((a = h->access->command_completed(h))) {
24411 a1 = a; a &= ~3;
24412 if ((c = h->cmpQ) == NULL)
24413 {
24414 @@ -1449,11 +1451,11 @@ static int sendcmd(
24415 /*
24416 * Disable interrupt
24417 */
24418 - info_p->access.set_intr_mask(info_p, 0);
24419 + info_p->access->set_intr_mask(info_p, 0);
24420 /* Make sure there is room in the command FIFO */
24421 /* Actually it should be completely empty at this time. */
24422 for (i = 200000; i > 0; i--) {
24423 - temp = info_p->access.fifo_full(info_p);
24424 + temp = info_p->access->fifo_full(info_p);
24425 if (temp != 0) {
24426 break;
24427 }
24428 @@ -1466,7 +1468,7 @@ DBG(
24429 /*
24430 * Send the cmd
24431 */
24432 - info_p->access.submit_command(info_p, c);
24433 + info_p->access->submit_command(info_p, c);
24434 complete = pollcomplete(ctlr);
24435
24436 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
24437 @@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
24438 * we check the new geometry. Then turn interrupts back on when
24439 * we're done.
24440 */
24441 - host->access.set_intr_mask(host, 0);
24442 + host->access->set_intr_mask(host, 0);
24443 getgeometry(ctlr);
24444 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
24445 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
24446
24447 for(i=0; i<NWD; i++) {
24448 struct gendisk *disk = ida_gendisk[ctlr][i];
24449 @@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
24450 /* Wait (up to 2 seconds) for a command to complete */
24451
24452 for (i = 200000; i > 0; i--) {
24453 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
24454 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
24455 if (done == 0) {
24456 udelay(10); /* a short fixed delay */
24457 } else
24458 diff -urNp linux-3.0.7/drivers/block/cpqarray.h linux-3.0.7/drivers/block/cpqarray.h
24459 --- linux-3.0.7/drivers/block/cpqarray.h 2011-07-21 22:17:23.000000000 -0400
24460 +++ linux-3.0.7/drivers/block/cpqarray.h 2011-08-23 21:47:55.000000000 -0400
24461 @@ -99,7 +99,7 @@ struct ctlr_info {
24462 drv_info_t drv[NWD];
24463 struct proc_dir_entry *proc;
24464
24465 - struct access_method access;
24466 + struct access_method *access;
24467
24468 cmdlist_t *reqQ;
24469 cmdlist_t *cmpQ;
24470 diff -urNp linux-3.0.7/drivers/block/DAC960.c linux-3.0.7/drivers/block/DAC960.c
24471 --- linux-3.0.7/drivers/block/DAC960.c 2011-07-21 22:17:23.000000000 -0400
24472 +++ linux-3.0.7/drivers/block/DAC960.c 2011-08-23 21:48:14.000000000 -0400
24473 @@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
24474 unsigned long flags;
24475 int Channel, TargetID;
24476
24477 + pax_track_stack();
24478 +
24479 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
24480 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
24481 sizeof(DAC960_SCSI_Inquiry_T) +
24482 diff -urNp linux-3.0.7/drivers/block/drbd/drbd_int.h linux-3.0.7/drivers/block/drbd/drbd_int.h
24483 --- linux-3.0.7/drivers/block/drbd/drbd_int.h 2011-07-21 22:17:23.000000000 -0400
24484 +++ linux-3.0.7/drivers/block/drbd/drbd_int.h 2011-10-06 04:17:55.000000000 -0400
24485 @@ -737,7 +737,7 @@ struct drbd_request;
24486 struct drbd_epoch {
24487 struct list_head list;
24488 unsigned int barrier_nr;
24489 - atomic_t epoch_size; /* increased on every request added. */
24490 + atomic_unchecked_t epoch_size; /* increased on every request added. */
24491 atomic_t active; /* increased on every req. added, and dec on every finished. */
24492 unsigned long flags;
24493 };
24494 @@ -1109,7 +1109,7 @@ struct drbd_conf {
24495 void *int_dig_in;
24496 void *int_dig_vv;
24497 wait_queue_head_t seq_wait;
24498 - atomic_t packet_seq;
24499 + atomic_unchecked_t packet_seq;
24500 unsigned int peer_seq;
24501 spinlock_t peer_seq_lock;
24502 unsigned int minor;
24503 @@ -1618,30 +1618,30 @@ static inline int drbd_setsockopt(struct
24504
24505 static inline void drbd_tcp_cork(struct socket *sock)
24506 {
24507 - int __user val = 1;
24508 + int val = 1;
24509 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
24510 - (char __user *)&val, sizeof(val));
24511 + (char __force_user *)&val, sizeof(val));
24512 }
24513
24514 static inline void drbd_tcp_uncork(struct socket *sock)
24515 {
24516 - int __user val = 0;
24517 + int val = 0;
24518 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
24519 - (char __user *)&val, sizeof(val));
24520 + (char __force_user *)&val, sizeof(val));
24521 }
24522
24523 static inline void drbd_tcp_nodelay(struct socket *sock)
24524 {
24525 - int __user val = 1;
24526 + int val = 1;
24527 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
24528 - (char __user *)&val, sizeof(val));
24529 + (char __force_user *)&val, sizeof(val));
24530 }
24531
24532 static inline void drbd_tcp_quickack(struct socket *sock)
24533 {
24534 - int __user val = 2;
24535 + int val = 2;
24536 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
24537 - (char __user *)&val, sizeof(val));
24538 + (char __force_user *)&val, sizeof(val));
24539 }
24540
24541 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
24542 diff -urNp linux-3.0.7/drivers/block/drbd/drbd_main.c linux-3.0.7/drivers/block/drbd/drbd_main.c
24543 --- linux-3.0.7/drivers/block/drbd/drbd_main.c 2011-07-21 22:17:23.000000000 -0400
24544 +++ linux-3.0.7/drivers/block/drbd/drbd_main.c 2011-08-23 21:47:55.000000000 -0400
24545 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_co
24546 p.sector = sector;
24547 p.block_id = block_id;
24548 p.blksize = blksize;
24549 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
24550 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
24551
24552 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
24553 return false;
24554 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *m
24555 p.sector = cpu_to_be64(req->sector);
24556 p.block_id = (unsigned long)req;
24557 p.seq_num = cpu_to_be32(req->seq_num =
24558 - atomic_add_return(1, &mdev->packet_seq));
24559 + atomic_add_return_unchecked(1, &mdev->packet_seq));
24560
24561 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
24562
24563 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_
24564 atomic_set(&mdev->unacked_cnt, 0);
24565 atomic_set(&mdev->local_cnt, 0);
24566 atomic_set(&mdev->net_cnt, 0);
24567 - atomic_set(&mdev->packet_seq, 0);
24568 + atomic_set_unchecked(&mdev->packet_seq, 0);
24569 atomic_set(&mdev->pp_in_use, 0);
24570 atomic_set(&mdev->pp_in_use_by_net, 0);
24571 atomic_set(&mdev->rs_sect_in, 0);
24572 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf
24573 mdev->receiver.t_state);
24574
24575 /* no need to lock it, I'm the only thread alive */
24576 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
24577 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
24578 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
24579 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
24580 mdev->al_writ_cnt =
24581 mdev->bm_writ_cnt =
24582 mdev->read_cnt =
24583 diff -urNp linux-3.0.7/drivers/block/drbd/drbd_nl.c linux-3.0.7/drivers/block/drbd/drbd_nl.c
24584 --- linux-3.0.7/drivers/block/drbd/drbd_nl.c 2011-07-21 22:17:23.000000000 -0400
24585 +++ linux-3.0.7/drivers/block/drbd/drbd_nl.c 2011-08-23 21:47:55.000000000 -0400
24586 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(stru
24587 module_put(THIS_MODULE);
24588 }
24589
24590 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
24591 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
24592
24593 static unsigned short *
24594 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
24595 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *
24596 cn_reply->id.idx = CN_IDX_DRBD;
24597 cn_reply->id.val = CN_VAL_DRBD;
24598
24599 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
24600 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
24601 cn_reply->ack = 0; /* not used here. */
24602 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
24603 (int)((char *)tl - (char *)reply->tag_list);
24604 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_co
24605 cn_reply->id.idx = CN_IDX_DRBD;
24606 cn_reply->id.val = CN_VAL_DRBD;
24607
24608 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
24609 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
24610 cn_reply->ack = 0; /* not used here. */
24611 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
24612 (int)((char *)tl - (char *)reply->tag_list);
24613 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
24614 cn_reply->id.idx = CN_IDX_DRBD;
24615 cn_reply->id.val = CN_VAL_DRBD;
24616
24617 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
24618 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
24619 cn_reply->ack = 0; // not used here.
24620 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
24621 (int)((char*)tl - (char*)reply->tag_list);
24622 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drb
24623 cn_reply->id.idx = CN_IDX_DRBD;
24624 cn_reply->id.val = CN_VAL_DRBD;
24625
24626 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
24627 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
24628 cn_reply->ack = 0; /* not used here. */
24629 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
24630 (int)((char *)tl - (char *)reply->tag_list);
24631 diff -urNp linux-3.0.7/drivers/block/drbd/drbd_receiver.c linux-3.0.7/drivers/block/drbd/drbd_receiver.c
24632 --- linux-3.0.7/drivers/block/drbd/drbd_receiver.c 2011-07-21 22:17:23.000000000 -0400
24633 +++ linux-3.0.7/drivers/block/drbd/drbd_receiver.c 2011-08-23 21:47:55.000000000 -0400
24634 @@ -894,7 +894,7 @@ retry:
24635 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
24636 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
24637
24638 - atomic_set(&mdev->packet_seq, 0);
24639 + atomic_set_unchecked(&mdev->packet_seq, 0);
24640 mdev->peer_seq = 0;
24641
24642 drbd_thread_start(&mdev->asender);
24643 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish
24644 do {
24645 next_epoch = NULL;
24646
24647 - epoch_size = atomic_read(&epoch->epoch_size);
24648 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
24649
24650 switch (ev & ~EV_CLEANUP) {
24651 case EV_PUT:
24652 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish
24653 rv = FE_DESTROYED;
24654 } else {
24655 epoch->flags = 0;
24656 - atomic_set(&epoch->epoch_size, 0);
24657 + atomic_set_unchecked(&epoch->epoch_size, 0);
24658 /* atomic_set(&epoch->active, 0); is already zero */
24659 if (rv == FE_STILL_LIVE)
24660 rv = FE_RECYCLED;
24661 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_c
24662 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
24663 drbd_flush(mdev);
24664
24665 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
24666 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
24667 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
24668 if (epoch)
24669 break;
24670 }
24671
24672 epoch = mdev->current_epoch;
24673 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
24674 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
24675
24676 D_ASSERT(atomic_read(&epoch->active) == 0);
24677 D_ASSERT(epoch->flags == 0);
24678 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_c
24679 }
24680
24681 epoch->flags = 0;
24682 - atomic_set(&epoch->epoch_size, 0);
24683 + atomic_set_unchecked(&epoch->epoch_size, 0);
24684 atomic_set(&epoch->active, 0);
24685
24686 spin_lock(&mdev->epoch_lock);
24687 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
24688 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
24689 list_add(&epoch->list, &mdev->current_epoch->list);
24690 mdev->current_epoch = epoch;
24691 mdev->epochs++;
24692 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf
24693 spin_unlock(&mdev->peer_seq_lock);
24694
24695 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
24696 - atomic_inc(&mdev->current_epoch->epoch_size);
24697 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
24698 return drbd_drain_block(mdev, data_size);
24699 }
24700
24701 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf
24702
24703 spin_lock(&mdev->epoch_lock);
24704 e->epoch = mdev->current_epoch;
24705 - atomic_inc(&e->epoch->epoch_size);
24706 + atomic_inc_unchecked(&e->epoch->epoch_size);
24707 atomic_inc(&e->epoch->active);
24708 spin_unlock(&mdev->epoch_lock);
24709
24710 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_
24711 D_ASSERT(list_empty(&mdev->done_ee));
24712
24713 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
24714 - atomic_set(&mdev->current_epoch->epoch_size, 0);
24715 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
24716 D_ASSERT(list_empty(&mdev->current_epoch->list));
24717 }
24718
24719 diff -urNp linux-3.0.7/drivers/block/loop.c linux-3.0.7/drivers/block/loop.c
24720 --- linux-3.0.7/drivers/block/loop.c 2011-09-02 18:11:26.000000000 -0400
24721 +++ linux-3.0.7/drivers/block/loop.c 2011-10-06 04:17:55.000000000 -0400
24722 @@ -283,7 +283,7 @@ static int __do_lo_send_write(struct fil
24723 mm_segment_t old_fs = get_fs();
24724
24725 set_fs(get_ds());
24726 - bw = file->f_op->write(file, buf, len, &pos);
24727 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
24728 set_fs(old_fs);
24729 if (likely(bw == len))
24730 return 0;
24731 diff -urNp linux-3.0.7/drivers/block/nbd.c linux-3.0.7/drivers/block/nbd.c
24732 --- linux-3.0.7/drivers/block/nbd.c 2011-07-21 22:17:23.000000000 -0400
24733 +++ linux-3.0.7/drivers/block/nbd.c 2011-08-23 21:48:14.000000000 -0400
24734 @@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
24735 struct kvec iov;
24736 sigset_t blocked, oldset;
24737
24738 + pax_track_stack();
24739 +
24740 if (unlikely(!sock)) {
24741 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
24742 lo->disk->disk_name, (send ? "send" : "recv"));
24743 @@ -572,6 +574,8 @@ static void do_nbd_request(struct reques
24744 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
24745 unsigned int cmd, unsigned long arg)
24746 {
24747 + pax_track_stack();
24748 +
24749 switch (cmd) {
24750 case NBD_DISCONNECT: {
24751 struct request sreq;
24752 diff -urNp linux-3.0.7/drivers/char/agp/frontend.c linux-3.0.7/drivers/char/agp/frontend.c
24753 --- linux-3.0.7/drivers/char/agp/frontend.c 2011-07-21 22:17:23.000000000 -0400
24754 +++ linux-3.0.7/drivers/char/agp/frontend.c 2011-08-23 21:47:55.000000000 -0400
24755 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
24756 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
24757 return -EFAULT;
24758
24759 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
24760 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
24761 return -EFAULT;
24762
24763 client = agp_find_client_by_pid(reserve.pid);
24764 diff -urNp linux-3.0.7/drivers/char/briq_panel.c linux-3.0.7/drivers/char/briq_panel.c
24765 --- linux-3.0.7/drivers/char/briq_panel.c 2011-07-21 22:17:23.000000000 -0400
24766 +++ linux-3.0.7/drivers/char/briq_panel.c 2011-08-23 21:48:14.000000000 -0400
24767 @@ -9,6 +9,7 @@
24768 #include <linux/types.h>
24769 #include <linux/errno.h>
24770 #include <linux/tty.h>
24771 +#include <linux/mutex.h>
24772 #include <linux/timer.h>
24773 #include <linux/kernel.h>
24774 #include <linux/wait.h>
24775 @@ -34,6 +35,7 @@ static int vfd_is_open;
24776 static unsigned char vfd[40];
24777 static int vfd_cursor;
24778 static unsigned char ledpb, led;
24779 +static DEFINE_MUTEX(vfd_mutex);
24780
24781 static void update_vfd(void)
24782 {
24783 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
24784 if (!vfd_is_open)
24785 return -EBUSY;
24786
24787 + mutex_lock(&vfd_mutex);
24788 for (;;) {
24789 char c;
24790 if (!indx)
24791 break;
24792 - if (get_user(c, buf))
24793 + if (get_user(c, buf)) {
24794 + mutex_unlock(&vfd_mutex);
24795 return -EFAULT;
24796 + }
24797 if (esc) {
24798 set_led(c);
24799 esc = 0;
24800 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
24801 buf++;
24802 }
24803 update_vfd();
24804 + mutex_unlock(&vfd_mutex);
24805
24806 return len;
24807 }
24808 diff -urNp linux-3.0.7/drivers/char/genrtc.c linux-3.0.7/drivers/char/genrtc.c
24809 --- linux-3.0.7/drivers/char/genrtc.c 2011-07-21 22:17:23.000000000 -0400
24810 +++ linux-3.0.7/drivers/char/genrtc.c 2011-08-23 21:48:14.000000000 -0400
24811 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
24812 switch (cmd) {
24813
24814 case RTC_PLL_GET:
24815 + memset(&pll, 0, sizeof(pll));
24816 if (get_rtc_pll(&pll))
24817 return -EINVAL;
24818 else
24819 diff -urNp linux-3.0.7/drivers/char/hpet.c linux-3.0.7/drivers/char/hpet.c
24820 --- linux-3.0.7/drivers/char/hpet.c 2011-07-21 22:17:23.000000000 -0400
24821 +++ linux-3.0.7/drivers/char/hpet.c 2011-08-23 21:47:55.000000000 -0400
24822 @@ -572,7 +572,7 @@ static inline unsigned long hpet_time_di
24823 }
24824
24825 static int
24826 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
24827 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
24828 struct hpet_info *info)
24829 {
24830 struct hpet_timer __iomem *timer;
24831 diff -urNp linux-3.0.7/drivers/char/ipmi/ipmi_msghandler.c linux-3.0.7/drivers/char/ipmi/ipmi_msghandler.c
24832 --- linux-3.0.7/drivers/char/ipmi/ipmi_msghandler.c 2011-07-21 22:17:23.000000000 -0400
24833 +++ linux-3.0.7/drivers/char/ipmi/ipmi_msghandler.c 2011-08-23 21:48:14.000000000 -0400
24834 @@ -415,7 +415,7 @@ struct ipmi_smi {
24835 struct proc_dir_entry *proc_dir;
24836 char proc_dir_name[10];
24837
24838 - atomic_t stats[IPMI_NUM_STATS];
24839 + atomic_unchecked_t stats[IPMI_NUM_STATS];
24840
24841 /*
24842 * run_to_completion duplicate of smb_info, smi_info
24843 @@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
24844
24845
24846 #define ipmi_inc_stat(intf, stat) \
24847 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
24848 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
24849 #define ipmi_get_stat(intf, stat) \
24850 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
24851 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
24852
24853 static int is_lan_addr(struct ipmi_addr *addr)
24854 {
24855 @@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
24856 INIT_LIST_HEAD(&intf->cmd_rcvrs);
24857 init_waitqueue_head(&intf->waitq);
24858 for (i = 0; i < IPMI_NUM_STATS; i++)
24859 - atomic_set(&intf->stats[i], 0);
24860 + atomic_set_unchecked(&intf->stats[i], 0);
24861
24862 intf->proc_dir = NULL;
24863
24864 @@ -4220,6 +4220,8 @@ static void send_panic_events(char *str)
24865 struct ipmi_smi_msg smi_msg;
24866 struct ipmi_recv_msg recv_msg;
24867
24868 + pax_track_stack();
24869 +
24870 si = (struct ipmi_system_interface_addr *) &addr;
24871 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
24872 si->channel = IPMI_BMC_CHANNEL;
24873 diff -urNp linux-3.0.7/drivers/char/ipmi/ipmi_si_intf.c linux-3.0.7/drivers/char/ipmi/ipmi_si_intf.c
24874 --- linux-3.0.7/drivers/char/ipmi/ipmi_si_intf.c 2011-07-21 22:17:23.000000000 -0400
24875 +++ linux-3.0.7/drivers/char/ipmi/ipmi_si_intf.c 2011-08-23 21:47:55.000000000 -0400
24876 @@ -277,7 +277,7 @@ struct smi_info {
24877 unsigned char slave_addr;
24878
24879 /* Counters and things for the proc filesystem. */
24880 - atomic_t stats[SI_NUM_STATS];
24881 + atomic_unchecked_t stats[SI_NUM_STATS];
24882
24883 struct task_struct *thread;
24884
24885 @@ -286,9 +286,9 @@ struct smi_info {
24886 };
24887
24888 #define smi_inc_stat(smi, stat) \
24889 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
24890 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
24891 #define smi_get_stat(smi, stat) \
24892 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
24893 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
24894
24895 #define SI_MAX_PARMS 4
24896
24897 @@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info
24898 atomic_set(&new_smi->req_events, 0);
24899 new_smi->run_to_completion = 0;
24900 for (i = 0; i < SI_NUM_STATS; i++)
24901 - atomic_set(&new_smi->stats[i], 0);
24902 + atomic_set_unchecked(&new_smi->stats[i], 0);
24903
24904 new_smi->interrupt_disabled = 1;
24905 atomic_set(&new_smi->stop_operation, 0);
24906 diff -urNp linux-3.0.7/drivers/char/Kconfig linux-3.0.7/drivers/char/Kconfig
24907 --- linux-3.0.7/drivers/char/Kconfig 2011-07-21 22:17:23.000000000 -0400
24908 +++ linux-3.0.7/drivers/char/Kconfig 2011-08-23 21:48:14.000000000 -0400
24909 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
24910
24911 config DEVKMEM
24912 bool "/dev/kmem virtual device support"
24913 - default y
24914 + default n
24915 + depends on !GRKERNSEC_KMEM
24916 help
24917 Say Y here if you want to support the /dev/kmem device. The
24918 /dev/kmem device is rarely used, but can be used for certain
24919 @@ -596,6 +597,7 @@ config DEVPORT
24920 bool
24921 depends on !M68K
24922 depends on ISA || PCI
24923 + depends on !GRKERNSEC_KMEM
24924 default y
24925
24926 source "drivers/s390/char/Kconfig"
24927 diff -urNp linux-3.0.7/drivers/char/mbcs.c linux-3.0.7/drivers/char/mbcs.c
24928 --- linux-3.0.7/drivers/char/mbcs.c 2011-07-21 22:17:23.000000000 -0400
24929 +++ linux-3.0.7/drivers/char/mbcs.c 2011-10-11 10:44:33.000000000 -0400
24930 @@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *de
24931 return 0;
24932 }
24933
24934 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
24935 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
24936 {
24937 .part_num = MBCS_PART_NUM,
24938 .mfg_num = MBCS_MFG_NUM,
24939 diff -urNp linux-3.0.7/drivers/char/mem.c linux-3.0.7/drivers/char/mem.c
24940 --- linux-3.0.7/drivers/char/mem.c 2011-07-21 22:17:23.000000000 -0400
24941 +++ linux-3.0.7/drivers/char/mem.c 2011-08-23 21:48:14.000000000 -0400
24942 @@ -18,6 +18,7 @@
24943 #include <linux/raw.h>
24944 #include <linux/tty.h>
24945 #include <linux/capability.h>
24946 +#include <linux/security.h>
24947 #include <linux/ptrace.h>
24948 #include <linux/device.h>
24949 #include <linux/highmem.h>
24950 @@ -34,6 +35,10 @@
24951 # include <linux/efi.h>
24952 #endif
24953
24954 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
24955 +extern struct file_operations grsec_fops;
24956 +#endif
24957 +
24958 static inline unsigned long size_inside_page(unsigned long start,
24959 unsigned long size)
24960 {
24961 @@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
24962
24963 while (cursor < to) {
24964 if (!devmem_is_allowed(pfn)) {
24965 +#ifdef CONFIG_GRKERNSEC_KMEM
24966 + gr_handle_mem_readwrite(from, to);
24967 +#else
24968 printk(KERN_INFO
24969 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
24970 current->comm, from, to);
24971 +#endif
24972 return 0;
24973 }
24974 cursor += PAGE_SIZE;
24975 @@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
24976 }
24977 return 1;
24978 }
24979 +#elif defined(CONFIG_GRKERNSEC_KMEM)
24980 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
24981 +{
24982 + return 0;
24983 +}
24984 #else
24985 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
24986 {
24987 @@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
24988
24989 while (count > 0) {
24990 unsigned long remaining;
24991 + char *temp;
24992
24993 sz = size_inside_page(p, count);
24994
24995 @@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
24996 if (!ptr)
24997 return -EFAULT;
24998
24999 - remaining = copy_to_user(buf, ptr, sz);
25000 +#ifdef CONFIG_PAX_USERCOPY
25001 + temp = kmalloc(sz, GFP_KERNEL);
25002 + if (!temp) {
25003 + unxlate_dev_mem_ptr(p, ptr);
25004 + return -ENOMEM;
25005 + }
25006 + memcpy(temp, ptr, sz);
25007 +#else
25008 + temp = ptr;
25009 +#endif
25010 +
25011 + remaining = copy_to_user(buf, temp, sz);
25012 +
25013 +#ifdef CONFIG_PAX_USERCOPY
25014 + kfree(temp);
25015 +#endif
25016 +
25017 unxlate_dev_mem_ptr(p, ptr);
25018 if (remaining)
25019 return -EFAULT;
25020 @@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
25021 size_t count, loff_t *ppos)
25022 {
25023 unsigned long p = *ppos;
25024 - ssize_t low_count, read, sz;
25025 + ssize_t low_count, read, sz, err = 0;
25026 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
25027 - int err = 0;
25028
25029 read = 0;
25030 if (p < (unsigned long) high_memory) {
25031 @@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
25032 }
25033 #endif
25034 while (low_count > 0) {
25035 + char *temp;
25036 +
25037 sz = size_inside_page(p, low_count);
25038
25039 /*
25040 @@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
25041 */
25042 kbuf = xlate_dev_kmem_ptr((char *)p);
25043
25044 - if (copy_to_user(buf, kbuf, sz))
25045 +#ifdef CONFIG_PAX_USERCOPY
25046 + temp = kmalloc(sz, GFP_KERNEL);
25047 + if (!temp)
25048 + return -ENOMEM;
25049 + memcpy(temp, kbuf, sz);
25050 +#else
25051 + temp = kbuf;
25052 +#endif
25053 +
25054 + err = copy_to_user(buf, temp, sz);
25055 +
25056 +#ifdef CONFIG_PAX_USERCOPY
25057 + kfree(temp);
25058 +#endif
25059 +
25060 + if (err)
25061 return -EFAULT;
25062 buf += sz;
25063 p += sz;
25064 @@ -866,6 +913,9 @@ static const struct memdev {
25065 #ifdef CONFIG_CRASH_DUMP
25066 [12] = { "oldmem", 0, &oldmem_fops, NULL },
25067 #endif
25068 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
25069 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
25070 +#endif
25071 };
25072
25073 static int memory_open(struct inode *inode, struct file *filp)
25074 diff -urNp linux-3.0.7/drivers/char/nvram.c linux-3.0.7/drivers/char/nvram.c
25075 --- linux-3.0.7/drivers/char/nvram.c 2011-07-21 22:17:23.000000000 -0400
25076 +++ linux-3.0.7/drivers/char/nvram.c 2011-08-23 21:47:55.000000000 -0400
25077 @@ -246,7 +246,7 @@ static ssize_t nvram_read(struct file *f
25078
25079 spin_unlock_irq(&rtc_lock);
25080
25081 - if (copy_to_user(buf, contents, tmp - contents))
25082 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
25083 return -EFAULT;
25084
25085 *ppos = i;
25086 diff -urNp linux-3.0.7/drivers/char/random.c linux-3.0.7/drivers/char/random.c
25087 --- linux-3.0.7/drivers/char/random.c 2011-09-02 18:11:21.000000000 -0400
25088 +++ linux-3.0.7/drivers/char/random.c 2011-08-23 21:48:14.000000000 -0400
25089 @@ -261,8 +261,13 @@
25090 /*
25091 * Configuration information
25092 */
25093 +#ifdef CONFIG_GRKERNSEC_RANDNET
25094 +#define INPUT_POOL_WORDS 512
25095 +#define OUTPUT_POOL_WORDS 128
25096 +#else
25097 #define INPUT_POOL_WORDS 128
25098 #define OUTPUT_POOL_WORDS 32
25099 +#endif
25100 #define SEC_XFER_SIZE 512
25101 #define EXTRACT_SIZE 10
25102
25103 @@ -300,10 +305,17 @@ static struct poolinfo {
25104 int poolwords;
25105 int tap1, tap2, tap3, tap4, tap5;
25106 } poolinfo_table[] = {
25107 +#ifdef CONFIG_GRKERNSEC_RANDNET
25108 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
25109 + { 512, 411, 308, 208, 104, 1 },
25110 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
25111 + { 128, 103, 76, 51, 25, 1 },
25112 +#else
25113 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
25114 { 128, 103, 76, 51, 25, 1 },
25115 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
25116 { 32, 26, 20, 14, 7, 1 },
25117 +#endif
25118 #if 0
25119 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
25120 { 2048, 1638, 1231, 819, 411, 1 },
25121 @@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
25122
25123 extract_buf(r, tmp);
25124 i = min_t(int, nbytes, EXTRACT_SIZE);
25125 - if (copy_to_user(buf, tmp, i)) {
25126 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
25127 ret = -EFAULT;
25128 break;
25129 }
25130 @@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
25131 #include <linux/sysctl.h>
25132
25133 static int min_read_thresh = 8, min_write_thresh;
25134 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
25135 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
25136 static int max_write_thresh = INPUT_POOL_WORDS * 32;
25137 static char sysctl_bootid[16];
25138
25139 diff -urNp linux-3.0.7/drivers/char/sonypi.c linux-3.0.7/drivers/char/sonypi.c
25140 --- linux-3.0.7/drivers/char/sonypi.c 2011-07-21 22:17:23.000000000 -0400
25141 +++ linux-3.0.7/drivers/char/sonypi.c 2011-08-23 21:47:55.000000000 -0400
25142 @@ -55,6 +55,7 @@
25143 #include <asm/uaccess.h>
25144 #include <asm/io.h>
25145 #include <asm/system.h>
25146 +#include <asm/local.h>
25147
25148 #include <linux/sonypi.h>
25149
25150 @@ -491,7 +492,7 @@ static struct sonypi_device {
25151 spinlock_t fifo_lock;
25152 wait_queue_head_t fifo_proc_list;
25153 struct fasync_struct *fifo_async;
25154 - int open_count;
25155 + local_t open_count;
25156 int model;
25157 struct input_dev *input_jog_dev;
25158 struct input_dev *input_key_dev;
25159 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
25160 static int sonypi_misc_release(struct inode *inode, struct file *file)
25161 {
25162 mutex_lock(&sonypi_device.lock);
25163 - sonypi_device.open_count--;
25164 + local_dec(&sonypi_device.open_count);
25165 mutex_unlock(&sonypi_device.lock);
25166 return 0;
25167 }
25168 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
25169 {
25170 mutex_lock(&sonypi_device.lock);
25171 /* Flush input queue on first open */
25172 - if (!sonypi_device.open_count)
25173 + if (!local_read(&sonypi_device.open_count))
25174 kfifo_reset(&sonypi_device.fifo);
25175 - sonypi_device.open_count++;
25176 + local_inc(&sonypi_device.open_count);
25177 mutex_unlock(&sonypi_device.lock);
25178
25179 return 0;
25180 diff -urNp linux-3.0.7/drivers/char/tpm/tpm_bios.c linux-3.0.7/drivers/char/tpm/tpm_bios.c
25181 --- linux-3.0.7/drivers/char/tpm/tpm_bios.c 2011-07-21 22:17:23.000000000 -0400
25182 +++ linux-3.0.7/drivers/char/tpm/tpm_bios.c 2011-10-06 04:17:55.000000000 -0400
25183 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
25184 event = addr;
25185
25186 if ((event->event_type == 0 && event->event_size == 0) ||
25187 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
25188 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
25189 return NULL;
25190
25191 return addr;
25192 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
25193 return NULL;
25194
25195 if ((event->event_type == 0 && event->event_size == 0) ||
25196 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
25197 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
25198 return NULL;
25199
25200 (*pos)++;
25201 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
25202 int i;
25203
25204 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
25205 - seq_putc(m, data[i]);
25206 + if (!seq_putc(m, data[i]))
25207 + return -EFAULT;
25208
25209 return 0;
25210 }
25211 @@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log
25212 log->bios_event_log_end = log->bios_event_log + len;
25213
25214 virt = acpi_os_map_memory(start, len);
25215 + if (!virt) {
25216 + kfree(log->bios_event_log);
25217 + log->bios_event_log = NULL;
25218 + return -EFAULT;
25219 + }
25220
25221 - memcpy(log->bios_event_log, virt, len);
25222 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
25223
25224 acpi_os_unmap_memory(virt, len);
25225 return 0;
25226 diff -urNp linux-3.0.7/drivers/char/tpm/tpm.c linux-3.0.7/drivers/char/tpm/tpm.c
25227 --- linux-3.0.7/drivers/char/tpm/tpm.c 2011-10-16 21:54:53.000000000 -0400
25228 +++ linux-3.0.7/drivers/char/tpm/tpm.c 2011-10-16 21:55:27.000000000 -0400
25229 @@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_c
25230 chip->vendor.req_complete_val)
25231 goto out_recv;
25232
25233 - if ((status == chip->vendor.req_canceled)) {
25234 + if (status == chip->vendor.req_canceled) {
25235 dev_err(chip->dev, "Operation Canceled\n");
25236 rc = -ECANCELED;
25237 goto out;
25238 @@ -847,6 +847,8 @@ ssize_t tpm_show_pubek(struct device *de
25239
25240 struct tpm_chip *chip = dev_get_drvdata(dev);
25241
25242 + pax_track_stack();
25243 +
25244 tpm_cmd.header.in = tpm_readpubek_header;
25245 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
25246 "attempting to read the PUBEK");
25247 diff -urNp linux-3.0.7/drivers/char/virtio_console.c linux-3.0.7/drivers/char/virtio_console.c
25248 --- linux-3.0.7/drivers/char/virtio_console.c 2011-07-21 22:17:23.000000000 -0400
25249 +++ linux-3.0.7/drivers/char/virtio_console.c 2011-10-06 04:17:55.000000000 -0400
25250 @@ -555,7 +555,7 @@ static ssize_t fill_readbuf(struct port
25251 if (to_user) {
25252 ssize_t ret;
25253
25254 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
25255 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
25256 if (ret)
25257 return -EFAULT;
25258 } else {
25259 @@ -654,7 +654,7 @@ static ssize_t port_fops_read(struct fil
25260 if (!port_has_data(port) && !port->host_connected)
25261 return 0;
25262
25263 - return fill_readbuf(port, ubuf, count, true);
25264 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
25265 }
25266
25267 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
25268 diff -urNp linux-3.0.7/drivers/crypto/hifn_795x.c linux-3.0.7/drivers/crypto/hifn_795x.c
25269 --- linux-3.0.7/drivers/crypto/hifn_795x.c 2011-07-21 22:17:23.000000000 -0400
25270 +++ linux-3.0.7/drivers/crypto/hifn_795x.c 2011-08-23 21:48:14.000000000 -0400
25271 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
25272 0xCA, 0x34, 0x2B, 0x2E};
25273 struct scatterlist sg;
25274
25275 + pax_track_stack();
25276 +
25277 memset(src, 0, sizeof(src));
25278 memset(ctx.key, 0, sizeof(ctx.key));
25279
25280 diff -urNp linux-3.0.7/drivers/crypto/padlock-aes.c linux-3.0.7/drivers/crypto/padlock-aes.c
25281 --- linux-3.0.7/drivers/crypto/padlock-aes.c 2011-07-21 22:17:23.000000000 -0400
25282 +++ linux-3.0.7/drivers/crypto/padlock-aes.c 2011-08-23 21:48:14.000000000 -0400
25283 @@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
25284 struct crypto_aes_ctx gen_aes;
25285 int cpu;
25286
25287 + pax_track_stack();
25288 +
25289 if (key_len % 8) {
25290 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
25291 return -EINVAL;
25292 diff -urNp linux-3.0.7/drivers/dma/ioat/dma_v3.c linux-3.0.7/drivers/dma/ioat/dma_v3.c
25293 --- linux-3.0.7/drivers/dma/ioat/dma_v3.c 2011-07-21 22:17:23.000000000 -0400
25294 +++ linux-3.0.7/drivers/dma/ioat/dma_v3.c 2011-10-11 10:44:33.000000000 -0400
25295 @@ -73,10 +73,10 @@
25296 /* provide a lookup table for setting the source address in the base or
25297 * extended descriptor of an xor or pq descriptor
25298 */
25299 -static const u8 xor_idx_to_desc __read_mostly = 0xd0;
25300 -static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
25301 -static const u8 pq_idx_to_desc __read_mostly = 0xf8;
25302 -static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
25303 +static const u8 xor_idx_to_desc = 0xd0;
25304 +static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
25305 +static const u8 pq_idx_to_desc = 0xf8;
25306 +static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
25307
25308 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
25309 {
25310 diff -urNp linux-3.0.7/drivers/edac/amd64_edac.c linux-3.0.7/drivers/edac/amd64_edac.c
25311 --- linux-3.0.7/drivers/edac/amd64_edac.c 2011-07-21 22:17:23.000000000 -0400
25312 +++ linux-3.0.7/drivers/edac/amd64_edac.c 2011-10-11 10:44:33.000000000 -0400
25313 @@ -2670,7 +2670,7 @@ static void __devexit amd64_remove_one_i
25314 * PCI core identifies what devices are on a system during boot, and then
25315 * inquiry this table to see if this driver is for a given device found.
25316 */
25317 -static const struct pci_device_id amd64_pci_table[] __devinitdata = {
25318 +static const struct pci_device_id amd64_pci_table[] __devinitconst = {
25319 {
25320 .vendor = PCI_VENDOR_ID_AMD,
25321 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
25322 diff -urNp linux-3.0.7/drivers/edac/amd76x_edac.c linux-3.0.7/drivers/edac/amd76x_edac.c
25323 --- linux-3.0.7/drivers/edac/amd76x_edac.c 2011-07-21 22:17:23.000000000 -0400
25324 +++ linux-3.0.7/drivers/edac/amd76x_edac.c 2011-10-11 10:44:33.000000000 -0400
25325 @@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(
25326 edac_mc_free(mci);
25327 }
25328
25329 -static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
25330 +static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
25331 {
25332 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25333 AMD762},
25334 diff -urNp linux-3.0.7/drivers/edac/e752x_edac.c linux-3.0.7/drivers/edac/e752x_edac.c
25335 --- linux-3.0.7/drivers/edac/e752x_edac.c 2011-07-21 22:17:23.000000000 -0400
25336 +++ linux-3.0.7/drivers/edac/e752x_edac.c 2011-10-11 10:44:33.000000000 -0400
25337 @@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(s
25338 edac_mc_free(mci);
25339 }
25340
25341 -static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
25342 +static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
25343 {
25344 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25345 E7520},
25346 diff -urNp linux-3.0.7/drivers/edac/e7xxx_edac.c linux-3.0.7/drivers/edac/e7xxx_edac.c
25347 --- linux-3.0.7/drivers/edac/e7xxx_edac.c 2011-07-21 22:17:23.000000000 -0400
25348 +++ linux-3.0.7/drivers/edac/e7xxx_edac.c 2011-10-11 10:44:33.000000000 -0400
25349 @@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(s
25350 edac_mc_free(mci);
25351 }
25352
25353 -static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
25354 +static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
25355 {
25356 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25357 E7205},
25358 diff -urNp linux-3.0.7/drivers/edac/edac_pci_sysfs.c linux-3.0.7/drivers/edac/edac_pci_sysfs.c
25359 --- linux-3.0.7/drivers/edac/edac_pci_sysfs.c 2011-07-21 22:17:23.000000000 -0400
25360 +++ linux-3.0.7/drivers/edac/edac_pci_sysfs.c 2011-08-23 21:47:55.000000000 -0400
25361 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
25362 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
25363 static int edac_pci_poll_msec = 1000; /* one second workq period */
25364
25365 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
25366 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
25367 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
25368 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
25369
25370 static struct kobject *edac_pci_top_main_kobj;
25371 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
25372 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
25373 edac_printk(KERN_CRIT, EDAC_PCI,
25374 "Signaled System Error on %s\n",
25375 pci_name(dev));
25376 - atomic_inc(&pci_nonparity_count);
25377 + atomic_inc_unchecked(&pci_nonparity_count);
25378 }
25379
25380 if (status & (PCI_STATUS_PARITY)) {
25381 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
25382 "Master Data Parity Error on %s\n",
25383 pci_name(dev));
25384
25385 - atomic_inc(&pci_parity_count);
25386 + atomic_inc_unchecked(&pci_parity_count);
25387 }
25388
25389 if (status & (PCI_STATUS_DETECTED_PARITY)) {
25390 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
25391 "Detected Parity Error on %s\n",
25392 pci_name(dev));
25393
25394 - atomic_inc(&pci_parity_count);
25395 + atomic_inc_unchecked(&pci_parity_count);
25396 }
25397 }
25398
25399 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
25400 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
25401 "Signaled System Error on %s\n",
25402 pci_name(dev));
25403 - atomic_inc(&pci_nonparity_count);
25404 + atomic_inc_unchecked(&pci_nonparity_count);
25405 }
25406
25407 if (status & (PCI_STATUS_PARITY)) {
25408 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
25409 "Master Data Parity Error on "
25410 "%s\n", pci_name(dev));
25411
25412 - atomic_inc(&pci_parity_count);
25413 + atomic_inc_unchecked(&pci_parity_count);
25414 }
25415
25416 if (status & (PCI_STATUS_DETECTED_PARITY)) {
25417 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
25418 "Detected Parity Error on %s\n",
25419 pci_name(dev));
25420
25421 - atomic_inc(&pci_parity_count);
25422 + atomic_inc_unchecked(&pci_parity_count);
25423 }
25424 }
25425 }
25426 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
25427 if (!check_pci_errors)
25428 return;
25429
25430 - before_count = atomic_read(&pci_parity_count);
25431 + before_count = atomic_read_unchecked(&pci_parity_count);
25432
25433 /* scan all PCI devices looking for a Parity Error on devices and
25434 * bridges.
25435 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
25436 /* Only if operator has selected panic on PCI Error */
25437 if (edac_pci_get_panic_on_pe()) {
25438 /* If the count is different 'after' from 'before' */
25439 - if (before_count != atomic_read(&pci_parity_count))
25440 + if (before_count != atomic_read_unchecked(&pci_parity_count))
25441 panic("EDAC: PCI Parity Error");
25442 }
25443 }
25444 diff -urNp linux-3.0.7/drivers/edac/i3000_edac.c linux-3.0.7/drivers/edac/i3000_edac.c
25445 --- linux-3.0.7/drivers/edac/i3000_edac.c 2011-07-21 22:17:23.000000000 -0400
25446 +++ linux-3.0.7/drivers/edac/i3000_edac.c 2011-10-11 10:44:33.000000000 -0400
25447 @@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(s
25448 edac_mc_free(mci);
25449 }
25450
25451 -static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
25452 +static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
25453 {
25454 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25455 I3000},
25456 diff -urNp linux-3.0.7/drivers/edac/i3200_edac.c linux-3.0.7/drivers/edac/i3200_edac.c
25457 --- linux-3.0.7/drivers/edac/i3200_edac.c 2011-07-21 22:17:23.000000000 -0400
25458 +++ linux-3.0.7/drivers/edac/i3200_edac.c 2011-10-11 10:44:33.000000000 -0400
25459 @@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(s
25460 edac_mc_free(mci);
25461 }
25462
25463 -static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
25464 +static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
25465 {
25466 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25467 I3200},
25468 diff -urNp linux-3.0.7/drivers/edac/i5000_edac.c linux-3.0.7/drivers/edac/i5000_edac.c
25469 --- linux-3.0.7/drivers/edac/i5000_edac.c 2011-07-21 22:17:23.000000000 -0400
25470 +++ linux-3.0.7/drivers/edac/i5000_edac.c 2011-10-11 10:44:33.000000000 -0400
25471 @@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(s
25472 *
25473 * The "E500P" device is the first device supported.
25474 */
25475 -static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
25476 +static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
25477 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
25478 .driver_data = I5000P},
25479
25480 diff -urNp linux-3.0.7/drivers/edac/i5100_edac.c linux-3.0.7/drivers/edac/i5100_edac.c
25481 --- linux-3.0.7/drivers/edac/i5100_edac.c 2011-07-21 22:17:23.000000000 -0400
25482 +++ linux-3.0.7/drivers/edac/i5100_edac.c 2011-10-11 10:44:33.000000000 -0400
25483 @@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(s
25484 edac_mc_free(mci);
25485 }
25486
25487 -static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
25488 +static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
25489 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
25490 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
25491 { 0, }
25492 diff -urNp linux-3.0.7/drivers/edac/i5400_edac.c linux-3.0.7/drivers/edac/i5400_edac.c
25493 --- linux-3.0.7/drivers/edac/i5400_edac.c 2011-07-21 22:17:23.000000000 -0400
25494 +++ linux-3.0.7/drivers/edac/i5400_edac.c 2011-10-11 10:44:33.000000000 -0400
25495 @@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(s
25496 *
25497 * The "E500P" device is the first device supported.
25498 */
25499 -static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
25500 +static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
25501 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
25502 {0,} /* 0 terminated list. */
25503 };
25504 diff -urNp linux-3.0.7/drivers/edac/i7300_edac.c linux-3.0.7/drivers/edac/i7300_edac.c
25505 --- linux-3.0.7/drivers/edac/i7300_edac.c 2011-07-21 22:17:23.000000000 -0400
25506 +++ linux-3.0.7/drivers/edac/i7300_edac.c 2011-10-11 10:44:33.000000000 -0400
25507 @@ -1191,7 +1191,7 @@ static void __devexit i7300_remove_one(s
25508 *
25509 * Has only 8086:360c PCI ID
25510 */
25511 -static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
25512 +static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
25513 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
25514 {0,} /* 0 terminated list. */
25515 };
25516 diff -urNp linux-3.0.7/drivers/edac/i7core_edac.c linux-3.0.7/drivers/edac/i7core_edac.c
25517 --- linux-3.0.7/drivers/edac/i7core_edac.c 2011-09-02 18:11:26.000000000 -0400
25518 +++ linux-3.0.7/drivers/edac/i7core_edac.c 2011-10-11 10:44:33.000000000 -0400
25519 @@ -359,7 +359,7 @@ static const struct pci_id_table pci_dev
25520 /*
25521 * pci_device_id table for which devices we are looking for
25522 */
25523 -static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
25524 +static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
25525 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
25526 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
25527 {0,} /* 0 terminated list. */
25528 diff -urNp linux-3.0.7/drivers/edac/i82443bxgx_edac.c linux-3.0.7/drivers/edac/i82443bxgx_edac.c
25529 --- linux-3.0.7/drivers/edac/i82443bxgx_edac.c 2011-07-21 22:17:23.000000000 -0400
25530 +++ linux-3.0.7/drivers/edac/i82443bxgx_edac.c 2011-10-11 10:44:33.000000000 -0400
25531 @@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_
25532
25533 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
25534
25535 -static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
25536 +static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
25537 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
25538 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
25539 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
25540 diff -urNp linux-3.0.7/drivers/edac/i82860_edac.c linux-3.0.7/drivers/edac/i82860_edac.c
25541 --- linux-3.0.7/drivers/edac/i82860_edac.c 2011-07-21 22:17:23.000000000 -0400
25542 +++ linux-3.0.7/drivers/edac/i82860_edac.c 2011-10-11 10:44:33.000000000 -0400
25543 @@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(
25544 edac_mc_free(mci);
25545 }
25546
25547 -static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
25548 +static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
25549 {
25550 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25551 I82860},
25552 diff -urNp linux-3.0.7/drivers/edac/i82875p_edac.c linux-3.0.7/drivers/edac/i82875p_edac.c
25553 --- linux-3.0.7/drivers/edac/i82875p_edac.c 2011-07-21 22:17:23.000000000 -0400
25554 +++ linux-3.0.7/drivers/edac/i82875p_edac.c 2011-10-11 10:44:33.000000000 -0400
25555 @@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one
25556 edac_mc_free(mci);
25557 }
25558
25559 -static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
25560 +static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
25561 {
25562 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25563 I82875P},
25564 diff -urNp linux-3.0.7/drivers/edac/i82975x_edac.c linux-3.0.7/drivers/edac/i82975x_edac.c
25565 --- linux-3.0.7/drivers/edac/i82975x_edac.c 2011-07-21 22:17:23.000000000 -0400
25566 +++ linux-3.0.7/drivers/edac/i82975x_edac.c 2011-10-11 10:44:33.000000000 -0400
25567 @@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one
25568 edac_mc_free(mci);
25569 }
25570
25571 -static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
25572 +static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
25573 {
25574 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25575 I82975X
25576 diff -urNp linux-3.0.7/drivers/edac/mce_amd.h linux-3.0.7/drivers/edac/mce_amd.h
25577 --- linux-3.0.7/drivers/edac/mce_amd.h 2011-07-21 22:17:23.000000000 -0400
25578 +++ linux-3.0.7/drivers/edac/mce_amd.h 2011-08-23 21:47:55.000000000 -0400
25579 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
25580 bool (*dc_mce)(u16, u8);
25581 bool (*ic_mce)(u16, u8);
25582 bool (*nb_mce)(u16, u8);
25583 -};
25584 +} __no_const;
25585
25586 void amd_report_gart_errors(bool);
25587 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
25588 diff -urNp linux-3.0.7/drivers/edac/r82600_edac.c linux-3.0.7/drivers/edac/r82600_edac.c
25589 --- linux-3.0.7/drivers/edac/r82600_edac.c 2011-07-21 22:17:23.000000000 -0400
25590 +++ linux-3.0.7/drivers/edac/r82600_edac.c 2011-10-11 10:44:33.000000000 -0400
25591 @@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(
25592 edac_mc_free(mci);
25593 }
25594
25595 -static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
25596 +static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
25597 {
25598 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
25599 },
25600 diff -urNp linux-3.0.7/drivers/edac/x38_edac.c linux-3.0.7/drivers/edac/x38_edac.c
25601 --- linux-3.0.7/drivers/edac/x38_edac.c 2011-07-21 22:17:23.000000000 -0400
25602 +++ linux-3.0.7/drivers/edac/x38_edac.c 2011-10-11 10:44:33.000000000 -0400
25603 @@ -440,7 +440,7 @@ static void __devexit x38_remove_one(str
25604 edac_mc_free(mci);
25605 }
25606
25607 -static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
25608 +static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
25609 {
25610 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25611 X38},
25612 diff -urNp linux-3.0.7/drivers/firewire/core-card.c linux-3.0.7/drivers/firewire/core-card.c
25613 --- linux-3.0.7/drivers/firewire/core-card.c 2011-07-21 22:17:23.000000000 -0400
25614 +++ linux-3.0.7/drivers/firewire/core-card.c 2011-08-23 21:47:55.000000000 -0400
25615 @@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
25616
25617 void fw_core_remove_card(struct fw_card *card)
25618 {
25619 - struct fw_card_driver dummy_driver = dummy_driver_template;
25620 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
25621
25622 card->driver->update_phy_reg(card, 4,
25623 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
25624 diff -urNp linux-3.0.7/drivers/firewire/core-cdev.c linux-3.0.7/drivers/firewire/core-cdev.c
25625 --- linux-3.0.7/drivers/firewire/core-cdev.c 2011-09-02 18:11:21.000000000 -0400
25626 +++ linux-3.0.7/drivers/firewire/core-cdev.c 2011-08-23 21:47:55.000000000 -0400
25627 @@ -1313,8 +1313,7 @@ static int init_iso_resource(struct clie
25628 int ret;
25629
25630 if ((request->channels == 0 && request->bandwidth == 0) ||
25631 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
25632 - request->bandwidth < 0)
25633 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
25634 return -EINVAL;
25635
25636 r = kmalloc(sizeof(*r), GFP_KERNEL);
25637 diff -urNp linux-3.0.7/drivers/firewire/core.h linux-3.0.7/drivers/firewire/core.h
25638 --- linux-3.0.7/drivers/firewire/core.h 2011-07-21 22:17:23.000000000 -0400
25639 +++ linux-3.0.7/drivers/firewire/core.h 2011-08-23 21:47:55.000000000 -0400
25640 @@ -101,6 +101,7 @@ struct fw_card_driver {
25641
25642 int (*stop_iso)(struct fw_iso_context *ctx);
25643 };
25644 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
25645
25646 void fw_card_initialize(struct fw_card *card,
25647 const struct fw_card_driver *driver, struct device *device);
25648 diff -urNp linux-3.0.7/drivers/firewire/core-transaction.c linux-3.0.7/drivers/firewire/core-transaction.c
25649 --- linux-3.0.7/drivers/firewire/core-transaction.c 2011-07-21 22:17:23.000000000 -0400
25650 +++ linux-3.0.7/drivers/firewire/core-transaction.c 2011-08-23 21:48:14.000000000 -0400
25651 @@ -37,6 +37,7 @@
25652 #include <linux/timer.h>
25653 #include <linux/types.h>
25654 #include <linux/workqueue.h>
25655 +#include <linux/sched.h>
25656
25657 #include <asm/byteorder.h>
25658
25659 @@ -422,6 +423,8 @@ int fw_run_transaction(struct fw_card *c
25660 struct transaction_callback_data d;
25661 struct fw_transaction t;
25662
25663 + pax_track_stack();
25664 +
25665 init_timer_on_stack(&t.split_timeout_timer);
25666 init_completion(&d.done);
25667 d.payload = payload;
25668 diff -urNp linux-3.0.7/drivers/firmware/dmi_scan.c linux-3.0.7/drivers/firmware/dmi_scan.c
25669 --- linux-3.0.7/drivers/firmware/dmi_scan.c 2011-07-21 22:17:23.000000000 -0400
25670 +++ linux-3.0.7/drivers/firmware/dmi_scan.c 2011-10-06 04:17:55.000000000 -0400
25671 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
25672 }
25673 }
25674 else {
25675 - /*
25676 - * no iounmap() for that ioremap(); it would be a no-op, but
25677 - * it's so early in setup that sucker gets confused into doing
25678 - * what it shouldn't if we actually call it.
25679 - */
25680 p = dmi_ioremap(0xF0000, 0x10000);
25681 if (p == NULL)
25682 goto error;
25683 @@ -725,7 +720,7 @@ int dmi_walk(void (*decode)(const struct
25684 if (buf == NULL)
25685 return -1;
25686
25687 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
25688 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
25689
25690 iounmap(buf);
25691 return 0;
25692 diff -urNp linux-3.0.7/drivers/gpio/vr41xx_giu.c linux-3.0.7/drivers/gpio/vr41xx_giu.c
25693 --- linux-3.0.7/drivers/gpio/vr41xx_giu.c 2011-07-21 22:17:23.000000000 -0400
25694 +++ linux-3.0.7/drivers/gpio/vr41xx_giu.c 2011-08-23 21:47:55.000000000 -0400
25695 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
25696 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
25697 maskl, pendl, maskh, pendh);
25698
25699 - atomic_inc(&irq_err_count);
25700 + atomic_inc_unchecked(&irq_err_count);
25701
25702 return -EINVAL;
25703 }
25704 diff -urNp linux-3.0.7/drivers/gpu/drm/drm_crtc.c linux-3.0.7/drivers/gpu/drm/drm_crtc.c
25705 --- linux-3.0.7/drivers/gpu/drm/drm_crtc.c 2011-07-21 22:17:23.000000000 -0400
25706 +++ linux-3.0.7/drivers/gpu/drm/drm_crtc.c 2011-10-06 04:17:55.000000000 -0400
25707 @@ -1372,7 +1372,7 @@ int drm_mode_getconnector(struct drm_dev
25708 */
25709 if ((out_resp->count_modes >= mode_count) && mode_count) {
25710 copied = 0;
25711 - mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
25712 + mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
25713 list_for_each_entry(mode, &connector->modes, head) {
25714 drm_crtc_convert_to_umode(&u_mode, mode);
25715 if (copy_to_user(mode_ptr + copied,
25716 @@ -1387,8 +1387,8 @@ int drm_mode_getconnector(struct drm_dev
25717
25718 if ((out_resp->count_props >= props_count) && props_count) {
25719 copied = 0;
25720 - prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
25721 - prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
25722 + prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
25723 + prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
25724 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
25725 if (connector->property_ids[i] != 0) {
25726 if (put_user(connector->property_ids[i],
25727 @@ -1410,7 +1410,7 @@ int drm_mode_getconnector(struct drm_dev
25728
25729 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
25730 copied = 0;
25731 - encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
25732 + encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
25733 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
25734 if (connector->encoder_ids[i] != 0) {
25735 if (put_user(connector->encoder_ids[i],
25736 @@ -1569,7 +1569,7 @@ int drm_mode_setcrtc(struct drm_device *
25737 }
25738
25739 for (i = 0; i < crtc_req->count_connectors; i++) {
25740 - set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
25741 + set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
25742 if (get_user(out_id, &set_connectors_ptr[i])) {
25743 ret = -EFAULT;
25744 goto out;
25745 @@ -1850,7 +1850,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_de
25746 fb = obj_to_fb(obj);
25747
25748 num_clips = r->num_clips;
25749 - clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
25750 + clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
25751
25752 if (!num_clips != !clips_ptr) {
25753 ret = -EINVAL;
25754 @@ -2270,7 +2270,7 @@ int drm_mode_getproperty_ioctl(struct dr
25755 out_resp->flags = property->flags;
25756
25757 if ((out_resp->count_values >= value_count) && value_count) {
25758 - values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
25759 + values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
25760 for (i = 0; i < value_count; i++) {
25761 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
25762 ret = -EFAULT;
25763 @@ -2283,7 +2283,7 @@ int drm_mode_getproperty_ioctl(struct dr
25764 if (property->flags & DRM_MODE_PROP_ENUM) {
25765 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
25766 copied = 0;
25767 - enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
25768 + enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
25769 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
25770
25771 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
25772 @@ -2306,7 +2306,7 @@ int drm_mode_getproperty_ioctl(struct dr
25773 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
25774 copied = 0;
25775 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
25776 - blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
25777 + blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
25778
25779 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
25780 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
25781 @@ -2367,7 +2367,7 @@ int drm_mode_getblob_ioctl(struct drm_de
25782 struct drm_mode_get_blob *out_resp = data;
25783 struct drm_property_blob *blob;
25784 int ret = 0;
25785 - void *blob_ptr;
25786 + void __user *blob_ptr;
25787
25788 if (!drm_core_check_feature(dev, DRIVER_MODESET))
25789 return -EINVAL;
25790 @@ -2381,7 +2381,7 @@ int drm_mode_getblob_ioctl(struct drm_de
25791 blob = obj_to_blob(obj);
25792
25793 if (out_resp->length == blob->length) {
25794 - blob_ptr = (void *)(unsigned long)out_resp->data;
25795 + blob_ptr = (void __user *)(unsigned long)out_resp->data;
25796 if (copy_to_user(blob_ptr, blob->data, blob->length)){
25797 ret = -EFAULT;
25798 goto done;
25799 diff -urNp linux-3.0.7/drivers/gpu/drm/drm_crtc_helper.c linux-3.0.7/drivers/gpu/drm/drm_crtc_helper.c
25800 --- linux-3.0.7/drivers/gpu/drm/drm_crtc_helper.c 2011-07-21 22:17:23.000000000 -0400
25801 +++ linux-3.0.7/drivers/gpu/drm/drm_crtc_helper.c 2011-08-23 21:48:14.000000000 -0400
25802 @@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
25803 struct drm_crtc *tmp;
25804 int crtc_mask = 1;
25805
25806 - WARN(!crtc, "checking null crtc?\n");
25807 + BUG_ON(!crtc);
25808
25809 dev = crtc->dev;
25810
25811 @@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
25812 struct drm_encoder *encoder;
25813 bool ret = true;
25814
25815 + pax_track_stack();
25816 +
25817 crtc->enabled = drm_helper_crtc_in_use(crtc);
25818 if (!crtc->enabled)
25819 return true;
25820 diff -urNp linux-3.0.7/drivers/gpu/drm/drm_drv.c linux-3.0.7/drivers/gpu/drm/drm_drv.c
25821 --- linux-3.0.7/drivers/gpu/drm/drm_drv.c 2011-07-21 22:17:23.000000000 -0400
25822 +++ linux-3.0.7/drivers/gpu/drm/drm_drv.c 2011-10-06 04:17:55.000000000 -0400
25823 @@ -307,7 +307,7 @@ module_exit(drm_core_exit);
25824 /**
25825 * Copy and IOCTL return string to user space
25826 */
25827 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
25828 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
25829 {
25830 int len;
25831
25832 @@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
25833
25834 dev = file_priv->minor->dev;
25835 atomic_inc(&dev->ioctl_count);
25836 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
25837 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
25838 ++file_priv->ioctl_count;
25839
25840 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
25841 diff -urNp linux-3.0.7/drivers/gpu/drm/drm_fops.c linux-3.0.7/drivers/gpu/drm/drm_fops.c
25842 --- linux-3.0.7/drivers/gpu/drm/drm_fops.c 2011-07-21 22:17:23.000000000 -0400
25843 +++ linux-3.0.7/drivers/gpu/drm/drm_fops.c 2011-08-23 21:47:55.000000000 -0400
25844 @@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
25845 }
25846
25847 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
25848 - atomic_set(&dev->counts[i], 0);
25849 + atomic_set_unchecked(&dev->counts[i], 0);
25850
25851 dev->sigdata.lock = NULL;
25852
25853 @@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
25854
25855 retcode = drm_open_helper(inode, filp, dev);
25856 if (!retcode) {
25857 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
25858 - if (!dev->open_count++)
25859 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
25860 + if (local_inc_return(&dev->open_count) == 1)
25861 retcode = drm_setup(dev);
25862 }
25863 if (!retcode) {
25864 @@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
25865
25866 mutex_lock(&drm_global_mutex);
25867
25868 - DRM_DEBUG("open_count = %d\n", dev->open_count);
25869 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
25870
25871 if (dev->driver->preclose)
25872 dev->driver->preclose(dev, file_priv);
25873 @@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
25874 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
25875 task_pid_nr(current),
25876 (long)old_encode_dev(file_priv->minor->device),
25877 - dev->open_count);
25878 + local_read(&dev->open_count));
25879
25880 /* if the master has gone away we can't do anything with the lock */
25881 if (file_priv->minor->master)
25882 @@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
25883 * End inline drm_release
25884 */
25885
25886 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
25887 - if (!--dev->open_count) {
25888 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
25889 + if (local_dec_and_test(&dev->open_count)) {
25890 if (atomic_read(&dev->ioctl_count)) {
25891 DRM_ERROR("Device busy: %d\n",
25892 atomic_read(&dev->ioctl_count));
25893 diff -urNp linux-3.0.7/drivers/gpu/drm/drm_global.c linux-3.0.7/drivers/gpu/drm/drm_global.c
25894 --- linux-3.0.7/drivers/gpu/drm/drm_global.c 2011-07-21 22:17:23.000000000 -0400
25895 +++ linux-3.0.7/drivers/gpu/drm/drm_global.c 2011-08-23 21:47:55.000000000 -0400
25896 @@ -36,7 +36,7 @@
25897 struct drm_global_item {
25898 struct mutex mutex;
25899 void *object;
25900 - int refcount;
25901 + atomic_t refcount;
25902 };
25903
25904 static struct drm_global_item glob[DRM_GLOBAL_NUM];
25905 @@ -49,7 +49,7 @@ void drm_global_init(void)
25906 struct drm_global_item *item = &glob[i];
25907 mutex_init(&item->mutex);
25908 item->object = NULL;
25909 - item->refcount = 0;
25910 + atomic_set(&item->refcount, 0);
25911 }
25912 }
25913
25914 @@ -59,7 +59,7 @@ void drm_global_release(void)
25915 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
25916 struct drm_global_item *item = &glob[i];
25917 BUG_ON(item->object != NULL);
25918 - BUG_ON(item->refcount != 0);
25919 + BUG_ON(atomic_read(&item->refcount) != 0);
25920 }
25921 }
25922
25923 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
25924 void *object;
25925
25926 mutex_lock(&item->mutex);
25927 - if (item->refcount == 0) {
25928 + if (atomic_read(&item->refcount) == 0) {
25929 item->object = kzalloc(ref->size, GFP_KERNEL);
25930 if (unlikely(item->object == NULL)) {
25931 ret = -ENOMEM;
25932 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
25933 goto out_err;
25934
25935 }
25936 - ++item->refcount;
25937 + atomic_inc(&item->refcount);
25938 ref->object = item->object;
25939 object = item->object;
25940 mutex_unlock(&item->mutex);
25941 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
25942 struct drm_global_item *item = &glob[ref->global_type];
25943
25944 mutex_lock(&item->mutex);
25945 - BUG_ON(item->refcount == 0);
25946 + BUG_ON(atomic_read(&item->refcount) == 0);
25947 BUG_ON(ref->object != item->object);
25948 - if (--item->refcount == 0) {
25949 + if (atomic_dec_and_test(&item->refcount)) {
25950 ref->release(ref);
25951 item->object = NULL;
25952 }
25953 diff -urNp linux-3.0.7/drivers/gpu/drm/drm_info.c linux-3.0.7/drivers/gpu/drm/drm_info.c
25954 --- linux-3.0.7/drivers/gpu/drm/drm_info.c 2011-07-21 22:17:23.000000000 -0400
25955 +++ linux-3.0.7/drivers/gpu/drm/drm_info.c 2011-08-23 21:48:14.000000000 -0400
25956 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
25957 struct drm_local_map *map;
25958 struct drm_map_list *r_list;
25959
25960 - /* Hardcoded from _DRM_FRAME_BUFFER,
25961 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
25962 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
25963 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
25964 + static const char * const types[] = {
25965 + [_DRM_FRAME_BUFFER] = "FB",
25966 + [_DRM_REGISTERS] = "REG",
25967 + [_DRM_SHM] = "SHM",
25968 + [_DRM_AGP] = "AGP",
25969 + [_DRM_SCATTER_GATHER] = "SG",
25970 + [_DRM_CONSISTENT] = "PCI",
25971 + [_DRM_GEM] = "GEM" };
25972 const char *type;
25973 int i;
25974
25975 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
25976 map = r_list->map;
25977 if (!map)
25978 continue;
25979 - if (map->type < 0 || map->type > 5)
25980 + if (map->type >= ARRAY_SIZE(types))
25981 type = "??";
25982 else
25983 type = types[map->type];
25984 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
25985 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
25986 vma->vm_flags & VM_LOCKED ? 'l' : '-',
25987 vma->vm_flags & VM_IO ? 'i' : '-',
25988 +#ifdef CONFIG_GRKERNSEC_HIDESYM
25989 + 0);
25990 +#else
25991 vma->vm_pgoff);
25992 +#endif
25993
25994 #if defined(__i386__)
25995 pgprot = pgprot_val(vma->vm_page_prot);
25996 diff -urNp linux-3.0.7/drivers/gpu/drm/drm_ioc32.c linux-3.0.7/drivers/gpu/drm/drm_ioc32.c
25997 --- linux-3.0.7/drivers/gpu/drm/drm_ioc32.c 2011-07-21 22:17:23.000000000 -0400
25998 +++ linux-3.0.7/drivers/gpu/drm/drm_ioc32.c 2011-10-06 04:17:55.000000000 -0400
25999 @@ -455,7 +455,7 @@ static int compat_drm_infobufs(struct fi
26000 request = compat_alloc_user_space(nbytes);
26001 if (!access_ok(VERIFY_WRITE, request, nbytes))
26002 return -EFAULT;
26003 - list = (struct drm_buf_desc *) (request + 1);
26004 + list = (struct drm_buf_desc __user *) (request + 1);
26005
26006 if (__put_user(count, &request->count)
26007 || __put_user(list, &request->list))
26008 @@ -516,7 +516,7 @@ static int compat_drm_mapbufs(struct fil
26009 request = compat_alloc_user_space(nbytes);
26010 if (!access_ok(VERIFY_WRITE, request, nbytes))
26011 return -EFAULT;
26012 - list = (struct drm_buf_pub *) (request + 1);
26013 + list = (struct drm_buf_pub __user *) (request + 1);
26014
26015 if (__put_user(count, &request->count)
26016 || __put_user(list, &request->list))
26017 diff -urNp linux-3.0.7/drivers/gpu/drm/drm_ioctl.c linux-3.0.7/drivers/gpu/drm/drm_ioctl.c
26018 --- linux-3.0.7/drivers/gpu/drm/drm_ioctl.c 2011-07-21 22:17:23.000000000 -0400
26019 +++ linux-3.0.7/drivers/gpu/drm/drm_ioctl.c 2011-08-23 21:47:55.000000000 -0400
26020 @@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
26021 stats->data[i].value =
26022 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
26023 else
26024 - stats->data[i].value = atomic_read(&dev->counts[i]);
26025 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
26026 stats->data[i].type = dev->types[i];
26027 }
26028
26029 diff -urNp linux-3.0.7/drivers/gpu/drm/drm_lock.c linux-3.0.7/drivers/gpu/drm/drm_lock.c
26030 --- linux-3.0.7/drivers/gpu/drm/drm_lock.c 2011-07-21 22:17:23.000000000 -0400
26031 +++ linux-3.0.7/drivers/gpu/drm/drm_lock.c 2011-08-23 21:47:55.000000000 -0400
26032 @@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
26033 if (drm_lock_take(&master->lock, lock->context)) {
26034 master->lock.file_priv = file_priv;
26035 master->lock.lock_time = jiffies;
26036 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
26037 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
26038 break; /* Got lock */
26039 }
26040
26041 @@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
26042 return -EINVAL;
26043 }
26044
26045 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
26046 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
26047
26048 if (drm_lock_free(&master->lock, lock->context)) {
26049 /* FIXME: Should really bail out here. */
26050 diff -urNp linux-3.0.7/drivers/gpu/drm/i810/i810_dma.c linux-3.0.7/drivers/gpu/drm/i810/i810_dma.c
26051 --- linux-3.0.7/drivers/gpu/drm/i810/i810_dma.c 2011-07-21 22:17:23.000000000 -0400
26052 +++ linux-3.0.7/drivers/gpu/drm/i810/i810_dma.c 2011-08-23 21:47:55.000000000 -0400
26053 @@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
26054 dma->buflist[vertex->idx],
26055 vertex->discard, vertex->used);
26056
26057 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
26058 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
26059 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
26060 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
26061 sarea_priv->last_enqueue = dev_priv->counter - 1;
26062 sarea_priv->last_dispatch = (int)hw_status[5];
26063
26064 @@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
26065 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
26066 mc->last_render);
26067
26068 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
26069 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
26070 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
26071 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
26072 sarea_priv->last_enqueue = dev_priv->counter - 1;
26073 sarea_priv->last_dispatch = (int)hw_status[5];
26074
26075 diff -urNp linux-3.0.7/drivers/gpu/drm/i810/i810_drv.h linux-3.0.7/drivers/gpu/drm/i810/i810_drv.h
26076 --- linux-3.0.7/drivers/gpu/drm/i810/i810_drv.h 2011-07-21 22:17:23.000000000 -0400
26077 +++ linux-3.0.7/drivers/gpu/drm/i810/i810_drv.h 2011-08-23 21:47:55.000000000 -0400
26078 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
26079 int page_flipping;
26080
26081 wait_queue_head_t irq_queue;
26082 - atomic_t irq_received;
26083 - atomic_t irq_emitted;
26084 + atomic_unchecked_t irq_received;
26085 + atomic_unchecked_t irq_emitted;
26086
26087 int front_offset;
26088 } drm_i810_private_t;
26089 diff -urNp linux-3.0.7/drivers/gpu/drm/i915/i915_debugfs.c linux-3.0.7/drivers/gpu/drm/i915/i915_debugfs.c
26090 --- linux-3.0.7/drivers/gpu/drm/i915/i915_debugfs.c 2011-07-21 22:17:23.000000000 -0400
26091 +++ linux-3.0.7/drivers/gpu/drm/i915/i915_debugfs.c 2011-10-06 04:17:55.000000000 -0400
26092 @@ -497,7 +497,7 @@ static int i915_interrupt_info(struct se
26093 I915_READ(GTIMR));
26094 }
26095 seq_printf(m, "Interrupts received: %d\n",
26096 - atomic_read(&dev_priv->irq_received));
26097 + atomic_read_unchecked(&dev_priv->irq_received));
26098 for (i = 0; i < I915_NUM_RINGS; i++) {
26099 if (IS_GEN6(dev)) {
26100 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
26101 @@ -1147,7 +1147,7 @@ static int i915_opregion(struct seq_file
26102 return ret;
26103
26104 if (opregion->header)
26105 - seq_write(m, opregion->header, OPREGION_SIZE);
26106 + seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
26107
26108 mutex_unlock(&dev->struct_mutex);
26109
26110 diff -urNp linux-3.0.7/drivers/gpu/drm/i915/i915_dma.c linux-3.0.7/drivers/gpu/drm/i915/i915_dma.c
26111 --- linux-3.0.7/drivers/gpu/drm/i915/i915_dma.c 2011-09-02 18:11:21.000000000 -0400
26112 +++ linux-3.0.7/drivers/gpu/drm/i915/i915_dma.c 2011-08-23 21:47:55.000000000 -0400
26113 @@ -1169,7 +1169,7 @@ static bool i915_switcheroo_can_switch(s
26114 bool can_switch;
26115
26116 spin_lock(&dev->count_lock);
26117 - can_switch = (dev->open_count == 0);
26118 + can_switch = (local_read(&dev->open_count) == 0);
26119 spin_unlock(&dev->count_lock);
26120 return can_switch;
26121 }
26122 diff -urNp linux-3.0.7/drivers/gpu/drm/i915/i915_drv.h linux-3.0.7/drivers/gpu/drm/i915/i915_drv.h
26123 --- linux-3.0.7/drivers/gpu/drm/i915/i915_drv.h 2011-07-21 22:17:23.000000000 -0400
26124 +++ linux-3.0.7/drivers/gpu/drm/i915/i915_drv.h 2011-08-23 21:47:55.000000000 -0400
26125 @@ -219,7 +219,7 @@ struct drm_i915_display_funcs {
26126 /* render clock increase/decrease */
26127 /* display clock increase/decrease */
26128 /* pll clock increase/decrease */
26129 -};
26130 +} __no_const;
26131
26132 struct intel_device_info {
26133 u8 gen;
26134 @@ -300,7 +300,7 @@ typedef struct drm_i915_private {
26135 int current_page;
26136 int page_flipping;
26137
26138 - atomic_t irq_received;
26139 + atomic_unchecked_t irq_received;
26140
26141 /* protects the irq masks */
26142 spinlock_t irq_lock;
26143 @@ -874,7 +874,7 @@ struct drm_i915_gem_object {
26144 * will be page flipped away on the next vblank. When it
26145 * reaches 0, dev_priv->pending_flip_queue will be woken up.
26146 */
26147 - atomic_t pending_flip;
26148 + atomic_unchecked_t pending_flip;
26149 };
26150
26151 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
26152 @@ -1247,7 +1247,7 @@ extern int intel_setup_gmbus(struct drm_
26153 extern void intel_teardown_gmbus(struct drm_device *dev);
26154 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
26155 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
26156 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
26157 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
26158 {
26159 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
26160 }
26161 diff -urNp linux-3.0.7/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.0.7/drivers/gpu/drm/i915/i915_gem_execbuffer.c
26162 --- linux-3.0.7/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-07-21 22:17:23.000000000 -0400
26163 +++ linux-3.0.7/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-08-23 21:47:55.000000000 -0400
26164 @@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct
26165 i915_gem_clflush_object(obj);
26166
26167 if (obj->base.pending_write_domain)
26168 - cd->flips |= atomic_read(&obj->pending_flip);
26169 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
26170
26171 /* The actual obj->write_domain will be updated with
26172 * pending_write_domain after we emit the accumulated flush for all
26173 diff -urNp linux-3.0.7/drivers/gpu/drm/i915/i915_irq.c linux-3.0.7/drivers/gpu/drm/i915/i915_irq.c
26174 --- linux-3.0.7/drivers/gpu/drm/i915/i915_irq.c 2011-09-02 18:11:21.000000000 -0400
26175 +++ linux-3.0.7/drivers/gpu/drm/i915/i915_irq.c 2011-08-23 21:47:55.000000000 -0400
26176 @@ -473,7 +473,7 @@ static irqreturn_t ivybridge_irq_handler
26177 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
26178 struct drm_i915_master_private *master_priv;
26179
26180 - atomic_inc(&dev_priv->irq_received);
26181 + atomic_inc_unchecked(&dev_priv->irq_received);
26182
26183 /* disable master interrupt before clearing iir */
26184 de_ier = I915_READ(DEIER);
26185 @@ -563,7 +563,7 @@ static irqreturn_t ironlake_irq_handler(
26186 struct drm_i915_master_private *master_priv;
26187 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
26188
26189 - atomic_inc(&dev_priv->irq_received);
26190 + atomic_inc_unchecked(&dev_priv->irq_received);
26191
26192 if (IS_GEN6(dev))
26193 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
26194 @@ -1226,7 +1226,7 @@ static irqreturn_t i915_driver_irq_handl
26195 int ret = IRQ_NONE, pipe;
26196 bool blc_event = false;
26197
26198 - atomic_inc(&dev_priv->irq_received);
26199 + atomic_inc_unchecked(&dev_priv->irq_received);
26200
26201 iir = I915_READ(IIR);
26202
26203 @@ -1735,7 +1735,7 @@ static void ironlake_irq_preinstall(stru
26204 {
26205 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
26206
26207 - atomic_set(&dev_priv->irq_received, 0);
26208 + atomic_set_unchecked(&dev_priv->irq_received, 0);
26209
26210 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
26211 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
26212 @@ -1899,7 +1899,7 @@ static void i915_driver_irq_preinstall(s
26213 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
26214 int pipe;
26215
26216 - atomic_set(&dev_priv->irq_received, 0);
26217 + atomic_set_unchecked(&dev_priv->irq_received, 0);
26218
26219 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
26220 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
26221 diff -urNp linux-3.0.7/drivers/gpu/drm/i915/intel_display.c linux-3.0.7/drivers/gpu/drm/i915/intel_display.c
26222 --- linux-3.0.7/drivers/gpu/drm/i915/intel_display.c 2011-09-02 18:11:21.000000000 -0400
26223 +++ linux-3.0.7/drivers/gpu/drm/i915/intel_display.c 2011-08-23 21:47:55.000000000 -0400
26224 @@ -1961,7 +1961,7 @@ intel_pipe_set_base(struct drm_crtc *crt
26225
26226 wait_event(dev_priv->pending_flip_queue,
26227 atomic_read(&dev_priv->mm.wedged) ||
26228 - atomic_read(&obj->pending_flip) == 0);
26229 + atomic_read_unchecked(&obj->pending_flip) == 0);
26230
26231 /* Big Hammer, we also need to ensure that any pending
26232 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
26233 @@ -2548,7 +2548,7 @@ static void intel_crtc_wait_for_pending_
26234 obj = to_intel_framebuffer(crtc->fb)->obj;
26235 dev_priv = crtc->dev->dev_private;
26236 wait_event(dev_priv->pending_flip_queue,
26237 - atomic_read(&obj->pending_flip) == 0);
26238 + atomic_read_unchecked(&obj->pending_flip) == 0);
26239 }
26240
26241 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
26242 @@ -6225,7 +6225,7 @@ static void do_intel_finish_page_flip(st
26243
26244 atomic_clear_mask(1 << intel_crtc->plane,
26245 &obj->pending_flip.counter);
26246 - if (atomic_read(&obj->pending_flip) == 0)
26247 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
26248 wake_up(&dev_priv->pending_flip_queue);
26249
26250 schedule_work(&work->work);
26251 @@ -6514,7 +6514,7 @@ static int intel_crtc_page_flip(struct d
26252 /* Block clients from rendering to the new back buffer until
26253 * the flip occurs and the object is no longer visible.
26254 */
26255 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26256 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26257
26258 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
26259 if (ret)
26260 @@ -6527,7 +6527,7 @@ static int intel_crtc_page_flip(struct d
26261 return 0;
26262
26263 cleanup_pending:
26264 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26265 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26266 cleanup_objs:
26267 drm_gem_object_unreference(&work->old_fb_obj->base);
26268 drm_gem_object_unreference(&obj->base);
26269 diff -urNp linux-3.0.7/drivers/gpu/drm/mga/mga_drv.h linux-3.0.7/drivers/gpu/drm/mga/mga_drv.h
26270 --- linux-3.0.7/drivers/gpu/drm/mga/mga_drv.h 2011-07-21 22:17:23.000000000 -0400
26271 +++ linux-3.0.7/drivers/gpu/drm/mga/mga_drv.h 2011-08-23 21:47:55.000000000 -0400
26272 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
26273 u32 clear_cmd;
26274 u32 maccess;
26275
26276 - atomic_t vbl_received; /**< Number of vblanks received. */
26277 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
26278 wait_queue_head_t fence_queue;
26279 - atomic_t last_fence_retired;
26280 + atomic_unchecked_t last_fence_retired;
26281 u32 next_fence_to_post;
26282
26283 unsigned int fb_cpp;
26284 diff -urNp linux-3.0.7/drivers/gpu/drm/mga/mga_irq.c linux-3.0.7/drivers/gpu/drm/mga/mga_irq.c
26285 --- linux-3.0.7/drivers/gpu/drm/mga/mga_irq.c 2011-07-21 22:17:23.000000000 -0400
26286 +++ linux-3.0.7/drivers/gpu/drm/mga/mga_irq.c 2011-08-23 21:47:55.000000000 -0400
26287 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
26288 if (crtc != 0)
26289 return 0;
26290
26291 - return atomic_read(&dev_priv->vbl_received);
26292 + return atomic_read_unchecked(&dev_priv->vbl_received);
26293 }
26294
26295
26296 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
26297 /* VBLANK interrupt */
26298 if (status & MGA_VLINEPEN) {
26299 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
26300 - atomic_inc(&dev_priv->vbl_received);
26301 + atomic_inc_unchecked(&dev_priv->vbl_received);
26302 drm_handle_vblank(dev, 0);
26303 handled = 1;
26304 }
26305 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
26306 if ((prim_start & ~0x03) != (prim_end & ~0x03))
26307 MGA_WRITE(MGA_PRIMEND, prim_end);
26308
26309 - atomic_inc(&dev_priv->last_fence_retired);
26310 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
26311 DRM_WAKEUP(&dev_priv->fence_queue);
26312 handled = 1;
26313 }
26314 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
26315 * using fences.
26316 */
26317 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
26318 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
26319 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
26320 - *sequence) <= (1 << 23)));
26321
26322 *sequence = cur_fence;
26323 diff -urNp linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_bios.c linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_bios.c
26324 --- linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-07-21 22:17:23.000000000 -0400
26325 +++ linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-08-26 19:49:56.000000000 -0400
26326 @@ -200,7 +200,7 @@ struct methods {
26327 const char desc[8];
26328 void (*loadbios)(struct drm_device *, uint8_t *);
26329 const bool rw;
26330 -};
26331 +} __do_const;
26332
26333 static struct methods shadow_methods[] = {
26334 { "PRAMIN", load_vbios_pramin, true },
26335 @@ -5488,7 +5488,7 @@ parse_bit_displayport_tbl_entry(struct d
26336 struct bit_table {
26337 const char id;
26338 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
26339 -};
26340 +} __no_const;
26341
26342 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
26343
26344 diff -urNp linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_drv.h linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_drv.h
26345 --- linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-07-21 22:17:23.000000000 -0400
26346 +++ linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-08-23 21:47:55.000000000 -0400
26347 @@ -227,7 +227,7 @@ struct nouveau_channel {
26348 struct list_head pending;
26349 uint32_t sequence;
26350 uint32_t sequence_ack;
26351 - atomic_t last_sequence_irq;
26352 + atomic_unchecked_t last_sequence_irq;
26353 } fence;
26354
26355 /* DMA push buffer */
26356 @@ -304,7 +304,7 @@ struct nouveau_exec_engine {
26357 u32 handle, u16 class);
26358 void (*set_tile_region)(struct drm_device *dev, int i);
26359 void (*tlb_flush)(struct drm_device *, int engine);
26360 -};
26361 +} __no_const;
26362
26363 struct nouveau_instmem_engine {
26364 void *priv;
26365 @@ -325,13 +325,13 @@ struct nouveau_instmem_engine {
26366 struct nouveau_mc_engine {
26367 int (*init)(struct drm_device *dev);
26368 void (*takedown)(struct drm_device *dev);
26369 -};
26370 +} __no_const;
26371
26372 struct nouveau_timer_engine {
26373 int (*init)(struct drm_device *dev);
26374 void (*takedown)(struct drm_device *dev);
26375 uint64_t (*read)(struct drm_device *dev);
26376 -};
26377 +} __no_const;
26378
26379 struct nouveau_fb_engine {
26380 int num_tiles;
26381 @@ -494,7 +494,7 @@ struct nouveau_vram_engine {
26382 void (*put)(struct drm_device *, struct nouveau_mem **);
26383
26384 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
26385 -};
26386 +} __no_const;
26387
26388 struct nouveau_engine {
26389 struct nouveau_instmem_engine instmem;
26390 @@ -640,7 +640,7 @@ struct drm_nouveau_private {
26391 struct drm_global_reference mem_global_ref;
26392 struct ttm_bo_global_ref bo_global_ref;
26393 struct ttm_bo_device bdev;
26394 - atomic_t validate_sequence;
26395 + atomic_unchecked_t validate_sequence;
26396 } ttm;
26397
26398 struct {
26399 diff -urNp linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_fence.c linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_fence.c
26400 --- linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-07-21 22:17:23.000000000 -0400
26401 +++ linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-08-23 21:47:55.000000000 -0400
26402 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
26403 if (USE_REFCNT(dev))
26404 sequence = nvchan_rd32(chan, 0x48);
26405 else
26406 - sequence = atomic_read(&chan->fence.last_sequence_irq);
26407 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
26408
26409 if (chan->fence.sequence_ack == sequence)
26410 goto out;
26411 @@ -544,7 +544,7 @@ nouveau_fence_channel_init(struct nouvea
26412
26413 INIT_LIST_HEAD(&chan->fence.pending);
26414 spin_lock_init(&chan->fence.lock);
26415 - atomic_set(&chan->fence.last_sequence_irq, 0);
26416 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
26417 return 0;
26418 }
26419
26420 diff -urNp linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_gem.c linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_gem.c
26421 --- linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-07-21 22:17:23.000000000 -0400
26422 +++ linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-08-23 21:47:55.000000000 -0400
26423 @@ -249,7 +249,7 @@ validate_init(struct nouveau_channel *ch
26424 int trycnt = 0;
26425 int ret, i;
26426
26427 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
26428 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
26429 retry:
26430 if (++trycnt > 100000) {
26431 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
26432 diff -urNp linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_state.c linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_state.c
26433 --- linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_state.c 2011-07-21 22:17:23.000000000 -0400
26434 +++ linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_state.c 2011-08-23 21:47:55.000000000 -0400
26435 @@ -488,7 +488,7 @@ static bool nouveau_switcheroo_can_switc
26436 bool can_switch;
26437
26438 spin_lock(&dev->count_lock);
26439 - can_switch = (dev->open_count == 0);
26440 + can_switch = (local_read(&dev->open_count) == 0);
26441 spin_unlock(&dev->count_lock);
26442 return can_switch;
26443 }
26444 diff -urNp linux-3.0.7/drivers/gpu/drm/nouveau/nv04_graph.c linux-3.0.7/drivers/gpu/drm/nouveau/nv04_graph.c
26445 --- linux-3.0.7/drivers/gpu/drm/nouveau/nv04_graph.c 2011-07-21 22:17:23.000000000 -0400
26446 +++ linux-3.0.7/drivers/gpu/drm/nouveau/nv04_graph.c 2011-08-23 21:47:55.000000000 -0400
26447 @@ -560,7 +560,7 @@ static int
26448 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
26449 u32 class, u32 mthd, u32 data)
26450 {
26451 - atomic_set(&chan->fence.last_sequence_irq, data);
26452 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
26453 return 0;
26454 }
26455
26456 diff -urNp linux-3.0.7/drivers/gpu/drm/r128/r128_cce.c linux-3.0.7/drivers/gpu/drm/r128/r128_cce.c
26457 --- linux-3.0.7/drivers/gpu/drm/r128/r128_cce.c 2011-07-21 22:17:23.000000000 -0400
26458 +++ linux-3.0.7/drivers/gpu/drm/r128/r128_cce.c 2011-08-23 21:47:55.000000000 -0400
26459 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
26460
26461 /* GH: Simple idle check.
26462 */
26463 - atomic_set(&dev_priv->idle_count, 0);
26464 + atomic_set_unchecked(&dev_priv->idle_count, 0);
26465
26466 /* We don't support anything other than bus-mastering ring mode,
26467 * but the ring can be in either AGP or PCI space for the ring
26468 diff -urNp linux-3.0.7/drivers/gpu/drm/r128/r128_drv.h linux-3.0.7/drivers/gpu/drm/r128/r128_drv.h
26469 --- linux-3.0.7/drivers/gpu/drm/r128/r128_drv.h 2011-07-21 22:17:23.000000000 -0400
26470 +++ linux-3.0.7/drivers/gpu/drm/r128/r128_drv.h 2011-08-23 21:47:55.000000000 -0400
26471 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
26472 int is_pci;
26473 unsigned long cce_buffers_offset;
26474
26475 - atomic_t idle_count;
26476 + atomic_unchecked_t idle_count;
26477
26478 int page_flipping;
26479 int current_page;
26480 u32 crtc_offset;
26481 u32 crtc_offset_cntl;
26482
26483 - atomic_t vbl_received;
26484 + atomic_unchecked_t vbl_received;
26485
26486 u32 color_fmt;
26487 unsigned int front_offset;
26488 diff -urNp linux-3.0.7/drivers/gpu/drm/r128/r128_irq.c linux-3.0.7/drivers/gpu/drm/r128/r128_irq.c
26489 --- linux-3.0.7/drivers/gpu/drm/r128/r128_irq.c 2011-07-21 22:17:23.000000000 -0400
26490 +++ linux-3.0.7/drivers/gpu/drm/r128/r128_irq.c 2011-08-23 21:47:55.000000000 -0400
26491 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
26492 if (crtc != 0)
26493 return 0;
26494
26495 - return atomic_read(&dev_priv->vbl_received);
26496 + return atomic_read_unchecked(&dev_priv->vbl_received);
26497 }
26498
26499 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
26500 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
26501 /* VBLANK interrupt */
26502 if (status & R128_CRTC_VBLANK_INT) {
26503 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
26504 - atomic_inc(&dev_priv->vbl_received);
26505 + atomic_inc_unchecked(&dev_priv->vbl_received);
26506 drm_handle_vblank(dev, 0);
26507 return IRQ_HANDLED;
26508 }
26509 diff -urNp linux-3.0.7/drivers/gpu/drm/r128/r128_state.c linux-3.0.7/drivers/gpu/drm/r128/r128_state.c
26510 --- linux-3.0.7/drivers/gpu/drm/r128/r128_state.c 2011-07-21 22:17:23.000000000 -0400
26511 +++ linux-3.0.7/drivers/gpu/drm/r128/r128_state.c 2011-08-23 21:47:55.000000000 -0400
26512 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
26513
26514 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
26515 {
26516 - if (atomic_read(&dev_priv->idle_count) == 0)
26517 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
26518 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
26519 else
26520 - atomic_set(&dev_priv->idle_count, 0);
26521 + atomic_set_unchecked(&dev_priv->idle_count, 0);
26522 }
26523
26524 #endif
26525 diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/atom.c linux-3.0.7/drivers/gpu/drm/radeon/atom.c
26526 --- linux-3.0.7/drivers/gpu/drm/radeon/atom.c 2011-07-21 22:17:23.000000000 -0400
26527 +++ linux-3.0.7/drivers/gpu/drm/radeon/atom.c 2011-08-23 21:48:14.000000000 -0400
26528 @@ -1245,6 +1245,8 @@ struct atom_context *atom_parse(struct c
26529 char name[512];
26530 int i;
26531
26532 + pax_track_stack();
26533 +
26534 ctx->card = card;
26535 ctx->bios = bios;
26536
26537 diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/mkregtable.c linux-3.0.7/drivers/gpu/drm/radeon/mkregtable.c
26538 --- linux-3.0.7/drivers/gpu/drm/radeon/mkregtable.c 2011-07-21 22:17:23.000000000 -0400
26539 +++ linux-3.0.7/drivers/gpu/drm/radeon/mkregtable.c 2011-08-23 21:47:55.000000000 -0400
26540 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
26541 regex_t mask_rex;
26542 regmatch_t match[4];
26543 char buf[1024];
26544 - size_t end;
26545 + long end;
26546 int len;
26547 int done = 0;
26548 int r;
26549 unsigned o;
26550 struct offset *offset;
26551 char last_reg_s[10];
26552 - int last_reg;
26553 + unsigned long last_reg;
26554
26555 if (regcomp
26556 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
26557 diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_atombios.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_atombios.c
26558 --- linux-3.0.7/drivers/gpu/drm/radeon/radeon_atombios.c 2011-07-21 22:17:23.000000000 -0400
26559 +++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_atombios.c 2011-08-23 21:48:14.000000000 -0400
26560 @@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
26561 struct radeon_gpio_rec gpio;
26562 struct radeon_hpd hpd;
26563
26564 + pax_track_stack();
26565 +
26566 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
26567 return false;
26568
26569 diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_device.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_device.c
26570 --- linux-3.0.7/drivers/gpu/drm/radeon/radeon_device.c 2011-09-02 18:11:21.000000000 -0400
26571 +++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_device.c 2011-08-23 21:47:55.000000000 -0400
26572 @@ -678,7 +678,7 @@ static bool radeon_switcheroo_can_switch
26573 bool can_switch;
26574
26575 spin_lock(&dev->count_lock);
26576 - can_switch = (dev->open_count == 0);
26577 + can_switch = (local_read(&dev->open_count) == 0);
26578 spin_unlock(&dev->count_lock);
26579 return can_switch;
26580 }
26581 diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_display.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_display.c
26582 --- linux-3.0.7/drivers/gpu/drm/radeon/radeon_display.c 2011-09-02 18:11:21.000000000 -0400
26583 +++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_display.c 2011-08-23 21:48:14.000000000 -0400
26584 @@ -946,6 +946,8 @@ void radeon_compute_pll_legacy(struct ra
26585 uint32_t post_div;
26586 u32 pll_out_min, pll_out_max;
26587
26588 + pax_track_stack();
26589 +
26590 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
26591 freq = freq * 1000;
26592
26593 diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_drv.h linux-3.0.7/drivers/gpu/drm/radeon/radeon_drv.h
26594 --- linux-3.0.7/drivers/gpu/drm/radeon/radeon_drv.h 2011-07-21 22:17:23.000000000 -0400
26595 +++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_drv.h 2011-08-23 21:47:55.000000000 -0400
26596 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
26597
26598 /* SW interrupt */
26599 wait_queue_head_t swi_queue;
26600 - atomic_t swi_emitted;
26601 + atomic_unchecked_t swi_emitted;
26602 int vblank_crtc;
26603 uint32_t irq_enable_reg;
26604 uint32_t r500_disp_irq_reg;
26605 diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_fence.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_fence.c
26606 --- linux-3.0.7/drivers/gpu/drm/radeon/radeon_fence.c 2011-07-21 22:17:23.000000000 -0400
26607 +++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_fence.c 2011-08-23 21:47:55.000000000 -0400
26608 @@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_devi
26609 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
26610 return 0;
26611 }
26612 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
26613 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
26614 if (!rdev->cp.ready)
26615 /* FIXME: cp is not running assume everythings is done right
26616 * away
26617 @@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct rade
26618 return r;
26619 }
26620 radeon_fence_write(rdev, 0);
26621 - atomic_set(&rdev->fence_drv.seq, 0);
26622 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
26623 INIT_LIST_HEAD(&rdev->fence_drv.created);
26624 INIT_LIST_HEAD(&rdev->fence_drv.emited);
26625 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
26626 diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon.h linux-3.0.7/drivers/gpu/drm/radeon/radeon.h
26627 --- linux-3.0.7/drivers/gpu/drm/radeon/radeon.h 2011-10-16 21:54:53.000000000 -0400
26628 +++ linux-3.0.7/drivers/gpu/drm/radeon/radeon.h 2011-10-16 21:55:27.000000000 -0400
26629 @@ -191,7 +191,7 @@ extern int sumo_get_temp(struct radeon_d
26630 */
26631 struct radeon_fence_driver {
26632 uint32_t scratch_reg;
26633 - atomic_t seq;
26634 + atomic_unchecked_t seq;
26635 uint32_t last_seq;
26636 unsigned long last_jiffies;
26637 unsigned long last_timeout;
26638 @@ -961,7 +961,7 @@ struct radeon_asic {
26639 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
26640 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
26641 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
26642 -};
26643 +} __no_const;
26644
26645 /*
26646 * Asic structures
26647 diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_ioc32.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_ioc32.c
26648 --- linux-3.0.7/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-07-21 22:17:23.000000000 -0400
26649 +++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-08-23 21:47:55.000000000 -0400
26650 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
26651 request = compat_alloc_user_space(sizeof(*request));
26652 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
26653 || __put_user(req32.param, &request->param)
26654 - || __put_user((void __user *)(unsigned long)req32.value,
26655 + || __put_user((unsigned long)req32.value,
26656 &request->value))
26657 return -EFAULT;
26658
26659 diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_irq.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_irq.c
26660 --- linux-3.0.7/drivers/gpu/drm/radeon/radeon_irq.c 2011-07-21 22:17:23.000000000 -0400
26661 +++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_irq.c 2011-08-23 21:47:55.000000000 -0400
26662 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
26663 unsigned int ret;
26664 RING_LOCALS;
26665
26666 - atomic_inc(&dev_priv->swi_emitted);
26667 - ret = atomic_read(&dev_priv->swi_emitted);
26668 + atomic_inc_unchecked(&dev_priv->swi_emitted);
26669 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
26670
26671 BEGIN_RING(4);
26672 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
26673 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
26674 drm_radeon_private_t *dev_priv =
26675 (drm_radeon_private_t *) dev->dev_private;
26676
26677 - atomic_set(&dev_priv->swi_emitted, 0);
26678 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
26679 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
26680
26681 dev->max_vblank_count = 0x001fffff;
26682 diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_state.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_state.c
26683 --- linux-3.0.7/drivers/gpu/drm/radeon/radeon_state.c 2011-07-21 22:17:23.000000000 -0400
26684 +++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_state.c 2011-08-23 21:47:55.000000000 -0400
26685 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
26686 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
26687 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
26688
26689 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
26690 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
26691 sarea_priv->nbox * sizeof(depth_boxes[0])))
26692 return -EFAULT;
26693
26694 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
26695 {
26696 drm_radeon_private_t *dev_priv = dev->dev_private;
26697 drm_radeon_getparam_t *param = data;
26698 - int value;
26699 + int value = 0;
26700
26701 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
26702
26703 diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_ttm.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_ttm.c
26704 --- linux-3.0.7/drivers/gpu/drm/radeon/radeon_ttm.c 2011-10-16 21:54:53.000000000 -0400
26705 +++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_ttm.c 2011-10-16 21:55:27.000000000 -0400
26706 @@ -649,8 +649,10 @@ int radeon_mmap(struct file *filp, struc
26707 }
26708 if (unlikely(ttm_vm_ops == NULL)) {
26709 ttm_vm_ops = vma->vm_ops;
26710 - radeon_ttm_vm_ops = *ttm_vm_ops;
26711 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
26712 + pax_open_kernel();
26713 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
26714 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
26715 + pax_close_kernel();
26716 }
26717 vma->vm_ops = &radeon_ttm_vm_ops;
26718 return 0;
26719 diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/rs690.c linux-3.0.7/drivers/gpu/drm/radeon/rs690.c
26720 --- linux-3.0.7/drivers/gpu/drm/radeon/rs690.c 2011-07-21 22:17:23.000000000 -0400
26721 +++ linux-3.0.7/drivers/gpu/drm/radeon/rs690.c 2011-08-23 21:47:55.000000000 -0400
26722 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
26723 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
26724 rdev->pm.sideport_bandwidth.full)
26725 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
26726 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
26727 + read_delay_latency.full = dfixed_const(800 * 1000);
26728 read_delay_latency.full = dfixed_div(read_delay_latency,
26729 rdev->pm.igp_sideport_mclk);
26730 + a.full = dfixed_const(370);
26731 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
26732 } else {
26733 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
26734 rdev->pm.k8_bandwidth.full)
26735 diff -urNp linux-3.0.7/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-3.0.7/drivers/gpu/drm/ttm/ttm_page_alloc.c
26736 --- linux-3.0.7/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-07-21 22:17:23.000000000 -0400
26737 +++ linux-3.0.7/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-08-23 21:47:55.000000000 -0400
26738 @@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages
26739 static int ttm_pool_mm_shrink(struct shrinker *shrink,
26740 struct shrink_control *sc)
26741 {
26742 - static atomic_t start_pool = ATOMIC_INIT(0);
26743 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
26744 unsigned i;
26745 - unsigned pool_offset = atomic_add_return(1, &start_pool);
26746 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
26747 struct ttm_page_pool *pool;
26748 int shrink_pages = sc->nr_to_scan;
26749
26750 diff -urNp linux-3.0.7/drivers/gpu/drm/via/via_drv.h linux-3.0.7/drivers/gpu/drm/via/via_drv.h
26751 --- linux-3.0.7/drivers/gpu/drm/via/via_drv.h 2011-07-21 22:17:23.000000000 -0400
26752 +++ linux-3.0.7/drivers/gpu/drm/via/via_drv.h 2011-08-23 21:47:55.000000000 -0400
26753 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
26754 typedef uint32_t maskarray_t[5];
26755
26756 typedef struct drm_via_irq {
26757 - atomic_t irq_received;
26758 + atomic_unchecked_t irq_received;
26759 uint32_t pending_mask;
26760 uint32_t enable_mask;
26761 wait_queue_head_t irq_queue;
26762 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
26763 struct timeval last_vblank;
26764 int last_vblank_valid;
26765 unsigned usec_per_vblank;
26766 - atomic_t vbl_received;
26767 + atomic_unchecked_t vbl_received;
26768 drm_via_state_t hc_state;
26769 char pci_buf[VIA_PCI_BUF_SIZE];
26770 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
26771 diff -urNp linux-3.0.7/drivers/gpu/drm/via/via_irq.c linux-3.0.7/drivers/gpu/drm/via/via_irq.c
26772 --- linux-3.0.7/drivers/gpu/drm/via/via_irq.c 2011-07-21 22:17:23.000000000 -0400
26773 +++ linux-3.0.7/drivers/gpu/drm/via/via_irq.c 2011-08-23 21:47:55.000000000 -0400
26774 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
26775 if (crtc != 0)
26776 return 0;
26777
26778 - return atomic_read(&dev_priv->vbl_received);
26779 + return atomic_read_unchecked(&dev_priv->vbl_received);
26780 }
26781
26782 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
26783 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
26784
26785 status = VIA_READ(VIA_REG_INTERRUPT);
26786 if (status & VIA_IRQ_VBLANK_PENDING) {
26787 - atomic_inc(&dev_priv->vbl_received);
26788 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
26789 + atomic_inc_unchecked(&dev_priv->vbl_received);
26790 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
26791 do_gettimeofday(&cur_vblank);
26792 if (dev_priv->last_vblank_valid) {
26793 dev_priv->usec_per_vblank =
26794 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
26795 dev_priv->last_vblank = cur_vblank;
26796 dev_priv->last_vblank_valid = 1;
26797 }
26798 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
26799 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
26800 DRM_DEBUG("US per vblank is: %u\n",
26801 dev_priv->usec_per_vblank);
26802 }
26803 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
26804
26805 for (i = 0; i < dev_priv->num_irqs; ++i) {
26806 if (status & cur_irq->pending_mask) {
26807 - atomic_inc(&cur_irq->irq_received);
26808 + atomic_inc_unchecked(&cur_irq->irq_received);
26809 DRM_WAKEUP(&cur_irq->irq_queue);
26810 handled = 1;
26811 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
26812 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
26813 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
26814 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
26815 masks[irq][4]));
26816 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
26817 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
26818 } else {
26819 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
26820 (((cur_irq_sequence =
26821 - atomic_read(&cur_irq->irq_received)) -
26822 + atomic_read_unchecked(&cur_irq->irq_received)) -
26823 *sequence) <= (1 << 23)));
26824 }
26825 *sequence = cur_irq_sequence;
26826 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
26827 }
26828
26829 for (i = 0; i < dev_priv->num_irqs; ++i) {
26830 - atomic_set(&cur_irq->irq_received, 0);
26831 + atomic_set_unchecked(&cur_irq->irq_received, 0);
26832 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
26833 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
26834 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
26835 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
26836 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
26837 case VIA_IRQ_RELATIVE:
26838 irqwait->request.sequence +=
26839 - atomic_read(&cur_irq->irq_received);
26840 + atomic_read_unchecked(&cur_irq->irq_received);
26841 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
26842 case VIA_IRQ_ABSOLUTE:
26843 break;
26844 diff -urNp linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
26845 --- linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-07-21 22:17:23.000000000 -0400
26846 +++ linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-08-23 21:47:55.000000000 -0400
26847 @@ -240,7 +240,7 @@ struct vmw_private {
26848 * Fencing and IRQs.
26849 */
26850
26851 - atomic_t fence_seq;
26852 + atomic_unchecked_t fence_seq;
26853 wait_queue_head_t fence_queue;
26854 wait_queue_head_t fifo_queue;
26855 atomic_t fence_queue_waiters;
26856 diff -urNp linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
26857 --- linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 2011-07-21 22:17:23.000000000 -0400
26858 +++ linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 2011-10-06 04:17:55.000000000 -0400
26859 @@ -610,7 +610,7 @@ int vmw_execbuf_ioctl(struct drm_device
26860 struct drm_vmw_fence_rep fence_rep;
26861 struct drm_vmw_fence_rep __user *user_fence_rep;
26862 int ret;
26863 - void *user_cmd;
26864 + void __user *user_cmd;
26865 void *cmd;
26866 uint32_t sequence;
26867 struct vmw_sw_context *sw_context = &dev_priv->ctx;
26868 diff -urNp linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
26869 --- linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-07-21 22:17:23.000000000 -0400
26870 +++ linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-08-23 21:47:55.000000000 -0400
26871 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
26872 while (!vmw_lag_lt(queue, us)) {
26873 spin_lock(&queue->lock);
26874 if (list_empty(&queue->head))
26875 - sequence = atomic_read(&dev_priv->fence_seq);
26876 + sequence = atomic_read_unchecked(&dev_priv->fence_seq);
26877 else {
26878 fence = list_first_entry(&queue->head,
26879 struct vmw_fence, head);
26880 diff -urNp linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
26881 --- linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-07-21 22:17:23.000000000 -0400
26882 +++ linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-10-06 04:17:55.000000000 -0400
26883 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
26884 (unsigned int) min,
26885 (unsigned int) fifo->capabilities);
26886
26887 - atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
26888 + atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
26889 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
26890 vmw_fence_queue_init(&fifo->fence_queue);
26891 return vmw_fifo_send_fence(dev_priv, &dummy);
26892 @@ -356,7 +356,7 @@ void *vmw_fifo_reserve(struct vmw_privat
26893 if (reserveable)
26894 iowrite32(bytes, fifo_mem +
26895 SVGA_FIFO_RESERVED);
26896 - return fifo_mem + (next_cmd >> 2);
26897 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
26898 } else {
26899 need_bounce = true;
26900 }
26901 @@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
26902
26903 fm = vmw_fifo_reserve(dev_priv, bytes);
26904 if (unlikely(fm == NULL)) {
26905 - *sequence = atomic_read(&dev_priv->fence_seq);
26906 + *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
26907 ret = -ENOMEM;
26908 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
26909 false, 3*HZ);
26910 @@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
26911 }
26912
26913 do {
26914 - *sequence = atomic_add_return(1, &dev_priv->fence_seq);
26915 + *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
26916 } while (*sequence == 0);
26917
26918 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
26919 diff -urNp linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
26920 --- linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-07-21 22:17:23.000000000 -0400
26921 +++ linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-08-23 21:47:55.000000000 -0400
26922 @@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
26923 * emitted. Then the fence is stale and signaled.
26924 */
26925
26926 - ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
26927 + ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
26928 > VMW_FENCE_WRAP);
26929
26930 return ret;
26931 @@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
26932
26933 if (fifo_idle)
26934 down_read(&fifo_state->rwsem);
26935 - signal_seq = atomic_read(&dev_priv->fence_seq);
26936 + signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
26937 ret = 0;
26938
26939 for (;;) {
26940 diff -urNp linux-3.0.7/drivers/hid/hid-core.c linux-3.0.7/drivers/hid/hid-core.c
26941 --- linux-3.0.7/drivers/hid/hid-core.c 2011-07-21 22:17:23.000000000 -0400
26942 +++ linux-3.0.7/drivers/hid/hid-core.c 2011-08-23 21:47:55.000000000 -0400
26943 @@ -1923,7 +1923,7 @@ static bool hid_ignore(struct hid_device
26944
26945 int hid_add_device(struct hid_device *hdev)
26946 {
26947 - static atomic_t id = ATOMIC_INIT(0);
26948 + static atomic_unchecked_t id = ATOMIC_INIT(0);
26949 int ret;
26950
26951 if (WARN_ON(hdev->status & HID_STAT_ADDED))
26952 @@ -1938,7 +1938,7 @@ int hid_add_device(struct hid_device *hd
26953 /* XXX hack, any other cleaner solution after the driver core
26954 * is converted to allow more than 20 bytes as the device name? */
26955 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
26956 - hdev->vendor, hdev->product, atomic_inc_return(&id));
26957 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
26958
26959 hid_debug_register(hdev, dev_name(&hdev->dev));
26960 ret = device_add(&hdev->dev);
26961 diff -urNp linux-3.0.7/drivers/hid/usbhid/hiddev.c linux-3.0.7/drivers/hid/usbhid/hiddev.c
26962 --- linux-3.0.7/drivers/hid/usbhid/hiddev.c 2011-07-21 22:17:23.000000000 -0400
26963 +++ linux-3.0.7/drivers/hid/usbhid/hiddev.c 2011-08-23 21:47:55.000000000 -0400
26964 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *fi
26965 break;
26966
26967 case HIDIOCAPPLICATION:
26968 - if (arg < 0 || arg >= hid->maxapplication)
26969 + if (arg >= hid->maxapplication)
26970 break;
26971
26972 for (i = 0; i < hid->maxcollection; i++)
26973 diff -urNp linux-3.0.7/drivers/hwmon/acpi_power_meter.c linux-3.0.7/drivers/hwmon/acpi_power_meter.c
26974 --- linux-3.0.7/drivers/hwmon/acpi_power_meter.c 2011-07-21 22:17:23.000000000 -0400
26975 +++ linux-3.0.7/drivers/hwmon/acpi_power_meter.c 2011-08-23 21:47:55.000000000 -0400
26976 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
26977 return res;
26978
26979 temp /= 1000;
26980 - if (temp < 0)
26981 - return -EINVAL;
26982
26983 mutex_lock(&resource->lock);
26984 resource->trip[attr->index - 7] = temp;
26985 diff -urNp linux-3.0.7/drivers/hwmon/sht15.c linux-3.0.7/drivers/hwmon/sht15.c
26986 --- linux-3.0.7/drivers/hwmon/sht15.c 2011-07-21 22:17:23.000000000 -0400
26987 +++ linux-3.0.7/drivers/hwmon/sht15.c 2011-08-23 21:47:55.000000000 -0400
26988 @@ -166,7 +166,7 @@ struct sht15_data {
26989 int supply_uV;
26990 bool supply_uV_valid;
26991 struct work_struct update_supply_work;
26992 - atomic_t interrupt_handled;
26993 + atomic_unchecked_t interrupt_handled;
26994 };
26995
26996 /**
26997 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht1
26998 return ret;
26999
27000 gpio_direction_input(data->pdata->gpio_data);
27001 - atomic_set(&data->interrupt_handled, 0);
27002 + atomic_set_unchecked(&data->interrupt_handled, 0);
27003
27004 enable_irq(gpio_to_irq(data->pdata->gpio_data));
27005 if (gpio_get_value(data->pdata->gpio_data) == 0) {
27006 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
27007 /* Only relevant if the interrupt hasn't occurred. */
27008 - if (!atomic_read(&data->interrupt_handled))
27009 + if (!atomic_read_unchecked(&data->interrupt_handled))
27010 schedule_work(&data->read_work);
27011 }
27012 ret = wait_event_timeout(data->wait_queue,
27013 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired
27014
27015 /* First disable the interrupt */
27016 disable_irq_nosync(irq);
27017 - atomic_inc(&data->interrupt_handled);
27018 + atomic_inc_unchecked(&data->interrupt_handled);
27019 /* Then schedule a reading work struct */
27020 if (data->state != SHT15_READING_NOTHING)
27021 schedule_work(&data->read_work);
27022 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct wo
27023 * If not, then start the interrupt again - care here as could
27024 * have gone low in meantime so verify it hasn't!
27025 */
27026 - atomic_set(&data->interrupt_handled, 0);
27027 + atomic_set_unchecked(&data->interrupt_handled, 0);
27028 enable_irq(gpio_to_irq(data->pdata->gpio_data));
27029 /* If still not occurred or another handler has been scheduled */
27030 if (gpio_get_value(data->pdata->gpio_data)
27031 - || atomic_read(&data->interrupt_handled))
27032 + || atomic_read_unchecked(&data->interrupt_handled))
27033 return;
27034 }
27035
27036 diff -urNp linux-3.0.7/drivers/hwmon/w83791d.c linux-3.0.7/drivers/hwmon/w83791d.c
27037 --- linux-3.0.7/drivers/hwmon/w83791d.c 2011-07-21 22:17:23.000000000 -0400
27038 +++ linux-3.0.7/drivers/hwmon/w83791d.c 2011-08-23 21:47:55.000000000 -0400
27039 @@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli
27040 struct i2c_board_info *info);
27041 static int w83791d_remove(struct i2c_client *client);
27042
27043 -static int w83791d_read(struct i2c_client *client, u8 register);
27044 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
27045 +static int w83791d_read(struct i2c_client *client, u8 reg);
27046 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
27047 static struct w83791d_data *w83791d_update_device(struct device *dev);
27048
27049 #ifdef DEBUG
27050 diff -urNp linux-3.0.7/drivers/i2c/busses/i2c-amd756-s4882.c linux-3.0.7/drivers/i2c/busses/i2c-amd756-s4882.c
27051 --- linux-3.0.7/drivers/i2c/busses/i2c-amd756-s4882.c 2011-07-21 22:17:23.000000000 -0400
27052 +++ linux-3.0.7/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-23 21:47:55.000000000 -0400
27053 @@ -43,7 +43,7 @@
27054 extern struct i2c_adapter amd756_smbus;
27055
27056 static struct i2c_adapter *s4882_adapter;
27057 -static struct i2c_algorithm *s4882_algo;
27058 +static i2c_algorithm_no_const *s4882_algo;
27059
27060 /* Wrapper access functions for multiplexed SMBus */
27061 static DEFINE_MUTEX(amd756_lock);
27062 diff -urNp linux-3.0.7/drivers/i2c/busses/i2c-nforce2-s4985.c linux-3.0.7/drivers/i2c/busses/i2c-nforce2-s4985.c
27063 --- linux-3.0.7/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-07-21 22:17:23.000000000 -0400
27064 +++ linux-3.0.7/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-23 21:47:55.000000000 -0400
27065 @@ -41,7 +41,7 @@
27066 extern struct i2c_adapter *nforce2_smbus;
27067
27068 static struct i2c_adapter *s4985_adapter;
27069 -static struct i2c_algorithm *s4985_algo;
27070 +static i2c_algorithm_no_const *s4985_algo;
27071
27072 /* Wrapper access functions for multiplexed SMBus */
27073 static DEFINE_MUTEX(nforce2_lock);
27074 diff -urNp linux-3.0.7/drivers/i2c/i2c-mux.c linux-3.0.7/drivers/i2c/i2c-mux.c
27075 --- linux-3.0.7/drivers/i2c/i2c-mux.c 2011-07-21 22:17:23.000000000 -0400
27076 +++ linux-3.0.7/drivers/i2c/i2c-mux.c 2011-08-23 21:47:55.000000000 -0400
27077 @@ -28,7 +28,7 @@
27078 /* multiplexer per channel data */
27079 struct i2c_mux_priv {
27080 struct i2c_adapter adap;
27081 - struct i2c_algorithm algo;
27082 + i2c_algorithm_no_const algo;
27083
27084 struct i2c_adapter *parent;
27085 void *mux_dev; /* the mux chip/device */
27086 diff -urNp linux-3.0.7/drivers/ide/aec62xx.c linux-3.0.7/drivers/ide/aec62xx.c
27087 --- linux-3.0.7/drivers/ide/aec62xx.c 2011-07-21 22:17:23.000000000 -0400
27088 +++ linux-3.0.7/drivers/ide/aec62xx.c 2011-10-11 10:44:33.000000000 -0400
27089 @@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_
27090 .cable_detect = atp86x_cable_detect,
27091 };
27092
27093 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
27094 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
27095 { /* 0: AEC6210 */
27096 .name = DRV_NAME,
27097 .init_chipset = init_chipset_aec62xx,
27098 diff -urNp linux-3.0.7/drivers/ide/alim15x3.c linux-3.0.7/drivers/ide/alim15x3.c
27099 --- linux-3.0.7/drivers/ide/alim15x3.c 2011-07-21 22:17:23.000000000 -0400
27100 +++ linux-3.0.7/drivers/ide/alim15x3.c 2011-10-11 10:44:33.000000000 -0400
27101 @@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_
27102 .dma_sff_read_status = ide_dma_sff_read_status,
27103 };
27104
27105 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
27106 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
27107 .name = DRV_NAME,
27108 .init_chipset = init_chipset_ali15x3,
27109 .init_hwif = init_hwif_ali15x3,
27110 diff -urNp linux-3.0.7/drivers/ide/amd74xx.c linux-3.0.7/drivers/ide/amd74xx.c
27111 --- linux-3.0.7/drivers/ide/amd74xx.c 2011-07-21 22:17:23.000000000 -0400
27112 +++ linux-3.0.7/drivers/ide/amd74xx.c 2011-10-11 10:44:33.000000000 -0400
27113 @@ -223,7 +223,7 @@ static const struct ide_port_ops amd_por
27114 .udma_mask = udma, \
27115 }
27116
27117 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
27118 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
27119 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
27120 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
27121 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
27122 diff -urNp linux-3.0.7/drivers/ide/atiixp.c linux-3.0.7/drivers/ide/atiixp.c
27123 --- linux-3.0.7/drivers/ide/atiixp.c 2011-07-21 22:17:23.000000000 -0400
27124 +++ linux-3.0.7/drivers/ide/atiixp.c 2011-10-11 10:44:33.000000000 -0400
27125 @@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_
27126 .cable_detect = atiixp_cable_detect,
27127 };
27128
27129 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
27130 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
27131 { /* 0: IXP200/300/400/700 */
27132 .name = DRV_NAME,
27133 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
27134 diff -urNp linux-3.0.7/drivers/ide/cmd64x.c linux-3.0.7/drivers/ide/cmd64x.c
27135 --- linux-3.0.7/drivers/ide/cmd64x.c 2011-07-21 22:17:23.000000000 -0400
27136 +++ linux-3.0.7/drivers/ide/cmd64x.c 2011-10-11 10:44:33.000000000 -0400
27137 @@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_r
27138 .dma_sff_read_status = ide_dma_sff_read_status,
27139 };
27140
27141 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
27142 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
27143 { /* 0: CMD643 */
27144 .name = DRV_NAME,
27145 .init_chipset = init_chipset_cmd64x,
27146 diff -urNp linux-3.0.7/drivers/ide/cs5520.c linux-3.0.7/drivers/ide/cs5520.c
27147 --- linux-3.0.7/drivers/ide/cs5520.c 2011-07-21 22:17:23.000000000 -0400
27148 +++ linux-3.0.7/drivers/ide/cs5520.c 2011-10-11 10:44:33.000000000 -0400
27149 @@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_
27150 .set_dma_mode = cs5520_set_dma_mode,
27151 };
27152
27153 -static const struct ide_port_info cyrix_chipset __devinitdata = {
27154 +static const struct ide_port_info cyrix_chipset __devinitconst = {
27155 .name = DRV_NAME,
27156 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
27157 .port_ops = &cs5520_port_ops,
27158 diff -urNp linux-3.0.7/drivers/ide/cs5530.c linux-3.0.7/drivers/ide/cs5530.c
27159 --- linux-3.0.7/drivers/ide/cs5530.c 2011-07-21 22:17:23.000000000 -0400
27160 +++ linux-3.0.7/drivers/ide/cs5530.c 2011-10-11 10:44:33.000000000 -0400
27161 @@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_
27162 .udma_filter = cs5530_udma_filter,
27163 };
27164
27165 -static const struct ide_port_info cs5530_chipset __devinitdata = {
27166 +static const struct ide_port_info cs5530_chipset __devinitconst = {
27167 .name = DRV_NAME,
27168 .init_chipset = init_chipset_cs5530,
27169 .init_hwif = init_hwif_cs5530,
27170 diff -urNp linux-3.0.7/drivers/ide/cs5535.c linux-3.0.7/drivers/ide/cs5535.c
27171 --- linux-3.0.7/drivers/ide/cs5535.c 2011-07-21 22:17:23.000000000 -0400
27172 +++ linux-3.0.7/drivers/ide/cs5535.c 2011-10-11 10:44:33.000000000 -0400
27173 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_
27174 .cable_detect = cs5535_cable_detect,
27175 };
27176
27177 -static const struct ide_port_info cs5535_chipset __devinitdata = {
27178 +static const struct ide_port_info cs5535_chipset __devinitconst = {
27179 .name = DRV_NAME,
27180 .port_ops = &cs5535_port_ops,
27181 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
27182 diff -urNp linux-3.0.7/drivers/ide/cy82c693.c linux-3.0.7/drivers/ide/cy82c693.c
27183 --- linux-3.0.7/drivers/ide/cy82c693.c 2011-07-21 22:17:23.000000000 -0400
27184 +++ linux-3.0.7/drivers/ide/cy82c693.c 2011-10-11 10:44:33.000000000 -0400
27185 @@ -161,7 +161,7 @@ static const struct ide_port_ops cy82c69
27186 .set_dma_mode = cy82c693_set_dma_mode,
27187 };
27188
27189 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
27190 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
27191 .name = DRV_NAME,
27192 .init_iops = init_iops_cy82c693,
27193 .port_ops = &cy82c693_port_ops,
27194 diff -urNp linux-3.0.7/drivers/ide/hpt366.c linux-3.0.7/drivers/ide/hpt366.c
27195 --- linux-3.0.7/drivers/ide/hpt366.c 2011-07-21 22:17:23.000000000 -0400
27196 +++ linux-3.0.7/drivers/ide/hpt366.c 2011-10-11 10:44:33.000000000 -0400
27197 @@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings
27198 }
27199 };
27200
27201 -static const struct hpt_info hpt36x __devinitdata = {
27202 +static const struct hpt_info hpt36x __devinitconst = {
27203 .chip_name = "HPT36x",
27204 .chip_type = HPT36x,
27205 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
27206 @@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __de
27207 .timings = &hpt36x_timings
27208 };
27209
27210 -static const struct hpt_info hpt370 __devinitdata = {
27211 +static const struct hpt_info hpt370 __devinitconst = {
27212 .chip_name = "HPT370",
27213 .chip_type = HPT370,
27214 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
27215 @@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __de
27216 .timings = &hpt37x_timings
27217 };
27218
27219 -static const struct hpt_info hpt370a __devinitdata = {
27220 +static const struct hpt_info hpt370a __devinitconst = {
27221 .chip_name = "HPT370A",
27222 .chip_type = HPT370A,
27223 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
27224 @@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __d
27225 .timings = &hpt37x_timings
27226 };
27227
27228 -static const struct hpt_info hpt374 __devinitdata = {
27229 +static const struct hpt_info hpt374 __devinitconst = {
27230 .chip_name = "HPT374",
27231 .chip_type = HPT374,
27232 .udma_mask = ATA_UDMA5,
27233 @@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __de
27234 .timings = &hpt37x_timings
27235 };
27236
27237 -static const struct hpt_info hpt372 __devinitdata = {
27238 +static const struct hpt_info hpt372 __devinitconst = {
27239 .chip_name = "HPT372",
27240 .chip_type = HPT372,
27241 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27242 @@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __de
27243 .timings = &hpt37x_timings
27244 };
27245
27246 -static const struct hpt_info hpt372a __devinitdata = {
27247 +static const struct hpt_info hpt372a __devinitconst = {
27248 .chip_name = "HPT372A",
27249 .chip_type = HPT372A,
27250 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27251 @@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __d
27252 .timings = &hpt37x_timings
27253 };
27254
27255 -static const struct hpt_info hpt302 __devinitdata = {
27256 +static const struct hpt_info hpt302 __devinitconst = {
27257 .chip_name = "HPT302",
27258 .chip_type = HPT302,
27259 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27260 @@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __de
27261 .timings = &hpt37x_timings
27262 };
27263
27264 -static const struct hpt_info hpt371 __devinitdata = {
27265 +static const struct hpt_info hpt371 __devinitconst = {
27266 .chip_name = "HPT371",
27267 .chip_type = HPT371,
27268 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27269 @@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __de
27270 .timings = &hpt37x_timings
27271 };
27272
27273 -static const struct hpt_info hpt372n __devinitdata = {
27274 +static const struct hpt_info hpt372n __devinitconst = {
27275 .chip_name = "HPT372N",
27276 .chip_type = HPT372N,
27277 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27278 @@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __d
27279 .timings = &hpt37x_timings
27280 };
27281
27282 -static const struct hpt_info hpt302n __devinitdata = {
27283 +static const struct hpt_info hpt302n __devinitconst = {
27284 .chip_name = "HPT302N",
27285 .chip_type = HPT302N,
27286 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27287 @@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __d
27288 .timings = &hpt37x_timings
27289 };
27290
27291 -static const struct hpt_info hpt371n __devinitdata = {
27292 +static const struct hpt_info hpt371n __devinitconst = {
27293 .chip_name = "HPT371N",
27294 .chip_type = HPT371N,
27295 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27296 @@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_d
27297 .dma_sff_read_status = ide_dma_sff_read_status,
27298 };
27299
27300 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
27301 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
27302 { /* 0: HPT36x */
27303 .name = DRV_NAME,
27304 .init_chipset = init_chipset_hpt366,
27305 diff -urNp linux-3.0.7/drivers/ide/ide-cd.c linux-3.0.7/drivers/ide/ide-cd.c
27306 --- linux-3.0.7/drivers/ide/ide-cd.c 2011-07-21 22:17:23.000000000 -0400
27307 +++ linux-3.0.7/drivers/ide/ide-cd.c 2011-08-23 21:47:55.000000000 -0400
27308 @@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
27309 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
27310 if ((unsigned long)buf & alignment
27311 || blk_rq_bytes(rq) & q->dma_pad_mask
27312 - || object_is_on_stack(buf))
27313 + || object_starts_on_stack(buf))
27314 drive->dma = 0;
27315 }
27316 }
27317 diff -urNp linux-3.0.7/drivers/ide/ide-floppy.c linux-3.0.7/drivers/ide/ide-floppy.c
27318 --- linux-3.0.7/drivers/ide/ide-floppy.c 2011-07-21 22:17:23.000000000 -0400
27319 +++ linux-3.0.7/drivers/ide/ide-floppy.c 2011-08-23 21:48:14.000000000 -0400
27320 @@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
27321 u8 pc_buf[256], header_len, desc_cnt;
27322 int i, rc = 1, blocks, length;
27323
27324 + pax_track_stack();
27325 +
27326 ide_debug_log(IDE_DBG_FUNC, "enter");
27327
27328 drive->bios_cyl = 0;
27329 diff -urNp linux-3.0.7/drivers/ide/ide-pci-generic.c linux-3.0.7/drivers/ide/ide-pci-generic.c
27330 --- linux-3.0.7/drivers/ide/ide-pci-generic.c 2011-07-21 22:17:23.000000000 -0400
27331 +++ linux-3.0.7/drivers/ide/ide-pci-generic.c 2011-10-11 10:44:33.000000000 -0400
27332 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell
27333 .udma_mask = ATA_UDMA6, \
27334 }
27335
27336 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
27337 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
27338 /* 0: Unknown */
27339 DECLARE_GENERIC_PCI_DEV(0),
27340
27341 diff -urNp linux-3.0.7/drivers/ide/it8172.c linux-3.0.7/drivers/ide/it8172.c
27342 --- linux-3.0.7/drivers/ide/it8172.c 2011-07-21 22:17:23.000000000 -0400
27343 +++ linux-3.0.7/drivers/ide/it8172.c 2011-10-11 10:44:33.000000000 -0400
27344 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_
27345 .set_dma_mode = it8172_set_dma_mode,
27346 };
27347
27348 -static const struct ide_port_info it8172_port_info __devinitdata = {
27349 +static const struct ide_port_info it8172_port_info __devinitconst = {
27350 .name = DRV_NAME,
27351 .port_ops = &it8172_port_ops,
27352 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
27353 diff -urNp linux-3.0.7/drivers/ide/it8213.c linux-3.0.7/drivers/ide/it8213.c
27354 --- linux-3.0.7/drivers/ide/it8213.c 2011-07-21 22:17:23.000000000 -0400
27355 +++ linux-3.0.7/drivers/ide/it8213.c 2011-10-11 10:44:33.000000000 -0400
27356 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_
27357 .cable_detect = it8213_cable_detect,
27358 };
27359
27360 -static const struct ide_port_info it8213_chipset __devinitdata = {
27361 +static const struct ide_port_info it8213_chipset __devinitconst = {
27362 .name = DRV_NAME,
27363 .enablebits = { {0x41, 0x80, 0x80} },
27364 .port_ops = &it8213_port_ops,
27365 diff -urNp linux-3.0.7/drivers/ide/it821x.c linux-3.0.7/drivers/ide/it821x.c
27366 --- linux-3.0.7/drivers/ide/it821x.c 2011-07-21 22:17:23.000000000 -0400
27367 +++ linux-3.0.7/drivers/ide/it821x.c 2011-10-11 10:44:33.000000000 -0400
27368 @@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_
27369 .cable_detect = it821x_cable_detect,
27370 };
27371
27372 -static const struct ide_port_info it821x_chipset __devinitdata = {
27373 +static const struct ide_port_info it821x_chipset __devinitconst = {
27374 .name = DRV_NAME,
27375 .init_chipset = init_chipset_it821x,
27376 .init_hwif = init_hwif_it821x,
27377 diff -urNp linux-3.0.7/drivers/ide/jmicron.c linux-3.0.7/drivers/ide/jmicron.c
27378 --- linux-3.0.7/drivers/ide/jmicron.c 2011-07-21 22:17:23.000000000 -0400
27379 +++ linux-3.0.7/drivers/ide/jmicron.c 2011-10-11 10:44:33.000000000 -0400
27380 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron
27381 .cable_detect = jmicron_cable_detect,
27382 };
27383
27384 -static const struct ide_port_info jmicron_chipset __devinitdata = {
27385 +static const struct ide_port_info jmicron_chipset __devinitconst = {
27386 .name = DRV_NAME,
27387 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
27388 .port_ops = &jmicron_port_ops,
27389 diff -urNp linux-3.0.7/drivers/ide/ns87415.c linux-3.0.7/drivers/ide/ns87415.c
27390 --- linux-3.0.7/drivers/ide/ns87415.c 2011-07-21 22:17:23.000000000 -0400
27391 +++ linux-3.0.7/drivers/ide/ns87415.c 2011-10-11 10:44:33.000000000 -0400
27392 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_
27393 .dma_sff_read_status = superio_dma_sff_read_status,
27394 };
27395
27396 -static const struct ide_port_info ns87415_chipset __devinitdata = {
27397 +static const struct ide_port_info ns87415_chipset __devinitconst = {
27398 .name = DRV_NAME,
27399 .init_hwif = init_hwif_ns87415,
27400 .tp_ops = &ns87415_tp_ops,
27401 diff -urNp linux-3.0.7/drivers/ide/opti621.c linux-3.0.7/drivers/ide/opti621.c
27402 --- linux-3.0.7/drivers/ide/opti621.c 2011-07-21 22:17:23.000000000 -0400
27403 +++ linux-3.0.7/drivers/ide/opti621.c 2011-10-11 10:44:33.000000000 -0400
27404 @@ -131,7 +131,7 @@ static const struct ide_port_ops opti621
27405 .set_pio_mode = opti621_set_pio_mode,
27406 };
27407
27408 -static const struct ide_port_info opti621_chipset __devinitdata = {
27409 +static const struct ide_port_info opti621_chipset __devinitconst = {
27410 .name = DRV_NAME,
27411 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
27412 .port_ops = &opti621_port_ops,
27413 diff -urNp linux-3.0.7/drivers/ide/pdc202xx_new.c linux-3.0.7/drivers/ide/pdc202xx_new.c
27414 --- linux-3.0.7/drivers/ide/pdc202xx_new.c 2011-07-21 22:17:23.000000000 -0400
27415 +++ linux-3.0.7/drivers/ide/pdc202xx_new.c 2011-10-11 10:44:33.000000000 -0400
27416 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_
27417 .udma_mask = udma, \
27418 }
27419
27420 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
27421 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
27422 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
27423 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
27424 };
27425 diff -urNp linux-3.0.7/drivers/ide/pdc202xx_old.c linux-3.0.7/drivers/ide/pdc202xx_old.c
27426 --- linux-3.0.7/drivers/ide/pdc202xx_old.c 2011-07-21 22:17:23.000000000 -0400
27427 +++ linux-3.0.7/drivers/ide/pdc202xx_old.c 2011-10-11 10:44:33.000000000 -0400
27428 @@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x
27429 .max_sectors = sectors, \
27430 }
27431
27432 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
27433 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
27434 { /* 0: PDC20246 */
27435 .name = DRV_NAME,
27436 .init_chipset = init_chipset_pdc202xx,
27437 diff -urNp linux-3.0.7/drivers/ide/piix.c linux-3.0.7/drivers/ide/piix.c
27438 --- linux-3.0.7/drivers/ide/piix.c 2011-07-21 22:17:23.000000000 -0400
27439 +++ linux-3.0.7/drivers/ide/piix.c 2011-10-11 10:44:33.000000000 -0400
27440 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_por
27441 .udma_mask = udma, \
27442 }
27443
27444 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
27445 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
27446 /* 0: MPIIX */
27447 { /*
27448 * MPIIX actually has only a single IDE channel mapped to
27449 diff -urNp linux-3.0.7/drivers/ide/rz1000.c linux-3.0.7/drivers/ide/rz1000.c
27450 --- linux-3.0.7/drivers/ide/rz1000.c 2011-07-21 22:17:23.000000000 -0400
27451 +++ linux-3.0.7/drivers/ide/rz1000.c 2011-10-11 10:44:33.000000000 -0400
27452 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_read
27453 }
27454 }
27455
27456 -static const struct ide_port_info rz1000_chipset __devinitdata = {
27457 +static const struct ide_port_info rz1000_chipset __devinitconst = {
27458 .name = DRV_NAME,
27459 .host_flags = IDE_HFLAG_NO_DMA,
27460 };
27461 diff -urNp linux-3.0.7/drivers/ide/sc1200.c linux-3.0.7/drivers/ide/sc1200.c
27462 --- linux-3.0.7/drivers/ide/sc1200.c 2011-07-21 22:17:23.000000000 -0400
27463 +++ linux-3.0.7/drivers/ide/sc1200.c 2011-10-11 10:44:33.000000000 -0400
27464 @@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_d
27465 .dma_sff_read_status = ide_dma_sff_read_status,
27466 };
27467
27468 -static const struct ide_port_info sc1200_chipset __devinitdata = {
27469 +static const struct ide_port_info sc1200_chipset __devinitconst = {
27470 .name = DRV_NAME,
27471 .port_ops = &sc1200_port_ops,
27472 .dma_ops = &sc1200_dma_ops,
27473 diff -urNp linux-3.0.7/drivers/ide/scc_pata.c linux-3.0.7/drivers/ide/scc_pata.c
27474 --- linux-3.0.7/drivers/ide/scc_pata.c 2011-07-21 22:17:23.000000000 -0400
27475 +++ linux-3.0.7/drivers/ide/scc_pata.c 2011-10-11 10:44:33.000000000 -0400
27476 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_
27477 .dma_sff_read_status = scc_dma_sff_read_status,
27478 };
27479
27480 -static const struct ide_port_info scc_chipset __devinitdata = {
27481 +static const struct ide_port_info scc_chipset __devinitconst = {
27482 .name = "sccIDE",
27483 .init_iops = init_iops_scc,
27484 .init_dma = scc_init_dma,
27485 diff -urNp linux-3.0.7/drivers/ide/serverworks.c linux-3.0.7/drivers/ide/serverworks.c
27486 --- linux-3.0.7/drivers/ide/serverworks.c 2011-07-21 22:17:23.000000000 -0400
27487 +++ linux-3.0.7/drivers/ide/serverworks.c 2011-10-11 10:44:33.000000000 -0400
27488 @@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_p
27489 .cable_detect = svwks_cable_detect,
27490 };
27491
27492 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
27493 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
27494 { /* 0: OSB4 */
27495 .name = DRV_NAME,
27496 .init_chipset = init_chipset_svwks,
27497 diff -urNp linux-3.0.7/drivers/ide/setup-pci.c linux-3.0.7/drivers/ide/setup-pci.c
27498 --- linux-3.0.7/drivers/ide/setup-pci.c 2011-07-21 22:17:23.000000000 -0400
27499 +++ linux-3.0.7/drivers/ide/setup-pci.c 2011-08-23 21:48:14.000000000 -0400
27500 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
27501 int ret, i, n_ports = dev2 ? 4 : 2;
27502 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
27503
27504 + pax_track_stack();
27505 +
27506 for (i = 0; i < n_ports / 2; i++) {
27507 ret = ide_setup_pci_controller(pdev[i], d, !i);
27508 if (ret < 0)
27509 diff -urNp linux-3.0.7/drivers/ide/siimage.c linux-3.0.7/drivers/ide/siimage.c
27510 --- linux-3.0.7/drivers/ide/siimage.c 2011-07-21 22:17:23.000000000 -0400
27511 +++ linux-3.0.7/drivers/ide/siimage.c 2011-10-11 10:44:33.000000000 -0400
27512 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_
27513 .udma_mask = ATA_UDMA6, \
27514 }
27515
27516 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
27517 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
27518 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
27519 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
27520 };
27521 diff -urNp linux-3.0.7/drivers/ide/sis5513.c linux-3.0.7/drivers/ide/sis5513.c
27522 --- linux-3.0.7/drivers/ide/sis5513.c 2011-07-21 22:17:23.000000000 -0400
27523 +++ linux-3.0.7/drivers/ide/sis5513.c 2011-10-11 10:44:33.000000000 -0400
27524 @@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata
27525 .cable_detect = sis_cable_detect,
27526 };
27527
27528 -static const struct ide_port_info sis5513_chipset __devinitdata = {
27529 +static const struct ide_port_info sis5513_chipset __devinitconst = {
27530 .name = DRV_NAME,
27531 .init_chipset = init_chipset_sis5513,
27532 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
27533 diff -urNp linux-3.0.7/drivers/ide/sl82c105.c linux-3.0.7/drivers/ide/sl82c105.c
27534 --- linux-3.0.7/drivers/ide/sl82c105.c 2011-07-21 22:17:23.000000000 -0400
27535 +++ linux-3.0.7/drivers/ide/sl82c105.c 2011-10-11 10:44:33.000000000 -0400
27536 @@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105
27537 .dma_sff_read_status = ide_dma_sff_read_status,
27538 };
27539
27540 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
27541 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
27542 .name = DRV_NAME,
27543 .init_chipset = init_chipset_sl82c105,
27544 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
27545 diff -urNp linux-3.0.7/drivers/ide/slc90e66.c linux-3.0.7/drivers/ide/slc90e66.c
27546 --- linux-3.0.7/drivers/ide/slc90e66.c 2011-07-21 22:17:23.000000000 -0400
27547 +++ linux-3.0.7/drivers/ide/slc90e66.c 2011-10-11 10:44:33.000000000 -0400
27548 @@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e6
27549 .cable_detect = slc90e66_cable_detect,
27550 };
27551
27552 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
27553 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
27554 .name = DRV_NAME,
27555 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
27556 .port_ops = &slc90e66_port_ops,
27557 diff -urNp linux-3.0.7/drivers/ide/tc86c001.c linux-3.0.7/drivers/ide/tc86c001.c
27558 --- linux-3.0.7/drivers/ide/tc86c001.c 2011-07-21 22:17:23.000000000 -0400
27559 +++ linux-3.0.7/drivers/ide/tc86c001.c 2011-10-11 10:44:33.000000000 -0400
27560 @@ -191,7 +191,7 @@ static const struct ide_dma_ops tc86c001
27561 .dma_sff_read_status = ide_dma_sff_read_status,
27562 };
27563
27564 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
27565 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
27566 .name = DRV_NAME,
27567 .init_hwif = init_hwif_tc86c001,
27568 .port_ops = &tc86c001_port_ops,
27569 diff -urNp linux-3.0.7/drivers/ide/triflex.c linux-3.0.7/drivers/ide/triflex.c
27570 --- linux-3.0.7/drivers/ide/triflex.c 2011-07-21 22:17:23.000000000 -0400
27571 +++ linux-3.0.7/drivers/ide/triflex.c 2011-10-11 10:44:33.000000000 -0400
27572 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex
27573 .set_dma_mode = triflex_set_mode,
27574 };
27575
27576 -static const struct ide_port_info triflex_device __devinitdata = {
27577 +static const struct ide_port_info triflex_device __devinitconst = {
27578 .name = DRV_NAME,
27579 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
27580 .port_ops = &triflex_port_ops,
27581 diff -urNp linux-3.0.7/drivers/ide/trm290.c linux-3.0.7/drivers/ide/trm290.c
27582 --- linux-3.0.7/drivers/ide/trm290.c 2011-07-21 22:17:23.000000000 -0400
27583 +++ linux-3.0.7/drivers/ide/trm290.c 2011-10-11 10:44:33.000000000 -0400
27584 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops
27585 .dma_check = trm290_dma_check,
27586 };
27587
27588 -static const struct ide_port_info trm290_chipset __devinitdata = {
27589 +static const struct ide_port_info trm290_chipset __devinitconst = {
27590 .name = DRV_NAME,
27591 .init_hwif = init_hwif_trm290,
27592 .tp_ops = &trm290_tp_ops,
27593 diff -urNp linux-3.0.7/drivers/ide/via82cxxx.c linux-3.0.7/drivers/ide/via82cxxx.c
27594 --- linux-3.0.7/drivers/ide/via82cxxx.c 2011-07-21 22:17:23.000000000 -0400
27595 +++ linux-3.0.7/drivers/ide/via82cxxx.c 2011-10-11 10:44:33.000000000 -0400
27596 @@ -403,7 +403,7 @@ static const struct ide_port_ops via_por
27597 .cable_detect = via82cxxx_cable_detect,
27598 };
27599
27600 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
27601 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
27602 .name = DRV_NAME,
27603 .init_chipset = init_chipset_via82cxxx,
27604 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
27605 diff -urNp linux-3.0.7/drivers/infiniband/core/cm.c linux-3.0.7/drivers/infiniband/core/cm.c
27606 --- linux-3.0.7/drivers/infiniband/core/cm.c 2011-07-21 22:17:23.000000000 -0400
27607 +++ linux-3.0.7/drivers/infiniband/core/cm.c 2011-08-23 21:47:55.000000000 -0400
27608 @@ -113,7 +113,7 @@ static char const counter_group_names[CM
27609
27610 struct cm_counter_group {
27611 struct kobject obj;
27612 - atomic_long_t counter[CM_ATTR_COUNT];
27613 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
27614 };
27615
27616 struct cm_counter_attribute {
27617 @@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
27618 struct ib_mad_send_buf *msg = NULL;
27619 int ret;
27620
27621 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27622 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27623 counter[CM_REQ_COUNTER]);
27624
27625 /* Quick state check to discard duplicate REQs. */
27626 @@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
27627 if (!cm_id_priv)
27628 return;
27629
27630 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27631 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27632 counter[CM_REP_COUNTER]);
27633 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
27634 if (ret)
27635 @@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
27636 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
27637 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
27638 spin_unlock_irq(&cm_id_priv->lock);
27639 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27640 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27641 counter[CM_RTU_COUNTER]);
27642 goto out;
27643 }
27644 @@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
27645 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
27646 dreq_msg->local_comm_id);
27647 if (!cm_id_priv) {
27648 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27649 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27650 counter[CM_DREQ_COUNTER]);
27651 cm_issue_drep(work->port, work->mad_recv_wc);
27652 return -EINVAL;
27653 @@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
27654 case IB_CM_MRA_REP_RCVD:
27655 break;
27656 case IB_CM_TIMEWAIT:
27657 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27658 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27659 counter[CM_DREQ_COUNTER]);
27660 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
27661 goto unlock;
27662 @@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
27663 cm_free_msg(msg);
27664 goto deref;
27665 case IB_CM_DREQ_RCVD:
27666 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27667 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27668 counter[CM_DREQ_COUNTER]);
27669 goto unlock;
27670 default:
27671 @@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
27672 ib_modify_mad(cm_id_priv->av.port->mad_agent,
27673 cm_id_priv->msg, timeout)) {
27674 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
27675 - atomic_long_inc(&work->port->
27676 + atomic_long_inc_unchecked(&work->port->
27677 counter_group[CM_RECV_DUPLICATES].
27678 counter[CM_MRA_COUNTER]);
27679 goto out;
27680 @@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
27681 break;
27682 case IB_CM_MRA_REQ_RCVD:
27683 case IB_CM_MRA_REP_RCVD:
27684 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27685 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27686 counter[CM_MRA_COUNTER]);
27687 /* fall through */
27688 default:
27689 @@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
27690 case IB_CM_LAP_IDLE:
27691 break;
27692 case IB_CM_MRA_LAP_SENT:
27693 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27694 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27695 counter[CM_LAP_COUNTER]);
27696 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
27697 goto unlock;
27698 @@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
27699 cm_free_msg(msg);
27700 goto deref;
27701 case IB_CM_LAP_RCVD:
27702 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27703 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27704 counter[CM_LAP_COUNTER]);
27705 goto unlock;
27706 default:
27707 @@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
27708 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
27709 if (cur_cm_id_priv) {
27710 spin_unlock_irq(&cm.lock);
27711 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27712 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27713 counter[CM_SIDR_REQ_COUNTER]);
27714 goto out; /* Duplicate message. */
27715 }
27716 @@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
27717 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
27718 msg->retries = 1;
27719
27720 - atomic_long_add(1 + msg->retries,
27721 + atomic_long_add_unchecked(1 + msg->retries,
27722 &port->counter_group[CM_XMIT].counter[attr_index]);
27723 if (msg->retries)
27724 - atomic_long_add(msg->retries,
27725 + atomic_long_add_unchecked(msg->retries,
27726 &port->counter_group[CM_XMIT_RETRIES].
27727 counter[attr_index]);
27728
27729 @@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
27730 }
27731
27732 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
27733 - atomic_long_inc(&port->counter_group[CM_RECV].
27734 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
27735 counter[attr_id - CM_ATTR_ID_OFFSET]);
27736
27737 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
27738 @@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
27739 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
27740
27741 return sprintf(buf, "%ld\n",
27742 - atomic_long_read(&group->counter[cm_attr->index]));
27743 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
27744 }
27745
27746 static const struct sysfs_ops cm_counter_ops = {
27747 diff -urNp linux-3.0.7/drivers/infiniband/core/fmr_pool.c linux-3.0.7/drivers/infiniband/core/fmr_pool.c
27748 --- linux-3.0.7/drivers/infiniband/core/fmr_pool.c 2011-07-21 22:17:23.000000000 -0400
27749 +++ linux-3.0.7/drivers/infiniband/core/fmr_pool.c 2011-08-23 21:47:55.000000000 -0400
27750 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
27751
27752 struct task_struct *thread;
27753
27754 - atomic_t req_ser;
27755 - atomic_t flush_ser;
27756 + atomic_unchecked_t req_ser;
27757 + atomic_unchecked_t flush_ser;
27758
27759 wait_queue_head_t force_wait;
27760 };
27761 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
27762 struct ib_fmr_pool *pool = pool_ptr;
27763
27764 do {
27765 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
27766 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
27767 ib_fmr_batch_release(pool);
27768
27769 - atomic_inc(&pool->flush_ser);
27770 + atomic_inc_unchecked(&pool->flush_ser);
27771 wake_up_interruptible(&pool->force_wait);
27772
27773 if (pool->flush_function)
27774 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
27775 }
27776
27777 set_current_state(TASK_INTERRUPTIBLE);
27778 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
27779 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
27780 !kthread_should_stop())
27781 schedule();
27782 __set_current_state(TASK_RUNNING);
27783 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
27784 pool->dirty_watermark = params->dirty_watermark;
27785 pool->dirty_len = 0;
27786 spin_lock_init(&pool->pool_lock);
27787 - atomic_set(&pool->req_ser, 0);
27788 - atomic_set(&pool->flush_ser, 0);
27789 + atomic_set_unchecked(&pool->req_ser, 0);
27790 + atomic_set_unchecked(&pool->flush_ser, 0);
27791 init_waitqueue_head(&pool->force_wait);
27792
27793 pool->thread = kthread_run(ib_fmr_cleanup_thread,
27794 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
27795 }
27796 spin_unlock_irq(&pool->pool_lock);
27797
27798 - serial = atomic_inc_return(&pool->req_ser);
27799 + serial = atomic_inc_return_unchecked(&pool->req_ser);
27800 wake_up_process(pool->thread);
27801
27802 if (wait_event_interruptible(pool->force_wait,
27803 - atomic_read(&pool->flush_ser) - serial >= 0))
27804 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
27805 return -EINTR;
27806
27807 return 0;
27808 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
27809 } else {
27810 list_add_tail(&fmr->list, &pool->dirty_list);
27811 if (++pool->dirty_len >= pool->dirty_watermark) {
27812 - atomic_inc(&pool->req_ser);
27813 + atomic_inc_unchecked(&pool->req_ser);
27814 wake_up_process(pool->thread);
27815 }
27816 }
27817 diff -urNp linux-3.0.7/drivers/infiniband/hw/cxgb4/mem.c linux-3.0.7/drivers/infiniband/hw/cxgb4/mem.c
27818 --- linux-3.0.7/drivers/infiniband/hw/cxgb4/mem.c 2011-07-21 22:17:23.000000000 -0400
27819 +++ linux-3.0.7/drivers/infiniband/hw/cxgb4/mem.c 2011-08-23 21:47:55.000000000 -0400
27820 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
27821 int err;
27822 struct fw_ri_tpte tpt;
27823 u32 stag_idx;
27824 - static atomic_t key;
27825 + static atomic_unchecked_t key;
27826
27827 if (c4iw_fatal_error(rdev))
27828 return -EIO;
27829 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
27830 &rdev->resource.tpt_fifo_lock);
27831 if (!stag_idx)
27832 return -ENOMEM;
27833 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
27834 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
27835 }
27836 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
27837 __func__, stag_state, type, pdid, stag_idx);
27838 diff -urNp linux-3.0.7/drivers/infiniband/hw/ipath/ipath_fs.c linux-3.0.7/drivers/infiniband/hw/ipath/ipath_fs.c
27839 --- linux-3.0.7/drivers/infiniband/hw/ipath/ipath_fs.c 2011-07-21 22:17:23.000000000 -0400
27840 +++ linux-3.0.7/drivers/infiniband/hw/ipath/ipath_fs.c 2011-08-23 21:48:14.000000000 -0400
27841 @@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
27842 struct infinipath_counters counters;
27843 struct ipath_devdata *dd;
27844
27845 + pax_track_stack();
27846 +
27847 dd = file->f_path.dentry->d_inode->i_private;
27848 dd->ipath_f_read_counters(dd, &counters);
27849
27850 diff -urNp linux-3.0.7/drivers/infiniband/hw/ipath/ipath_rc.c linux-3.0.7/drivers/infiniband/hw/ipath/ipath_rc.c
27851 --- linux-3.0.7/drivers/infiniband/hw/ipath/ipath_rc.c 2011-07-21 22:17:23.000000000 -0400
27852 +++ linux-3.0.7/drivers/infiniband/hw/ipath/ipath_rc.c 2011-08-23 21:47:55.000000000 -0400
27853 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
27854 struct ib_atomic_eth *ateth;
27855 struct ipath_ack_entry *e;
27856 u64 vaddr;
27857 - atomic64_t *maddr;
27858 + atomic64_unchecked_t *maddr;
27859 u64 sdata;
27860 u32 rkey;
27861 u8 next;
27862 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
27863 IB_ACCESS_REMOTE_ATOMIC)))
27864 goto nack_acc_unlck;
27865 /* Perform atomic OP and save result. */
27866 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
27867 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
27868 sdata = be64_to_cpu(ateth->swap_data);
27869 e = &qp->s_ack_queue[qp->r_head_ack_queue];
27870 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
27871 - (u64) atomic64_add_return(sdata, maddr) - sdata :
27872 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
27873 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
27874 be64_to_cpu(ateth->compare_data),
27875 sdata);
27876 diff -urNp linux-3.0.7/drivers/infiniband/hw/ipath/ipath_ruc.c linux-3.0.7/drivers/infiniband/hw/ipath/ipath_ruc.c
27877 --- linux-3.0.7/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-07-21 22:17:23.000000000 -0400
27878 +++ linux-3.0.7/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-08-23 21:47:55.000000000 -0400
27879 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
27880 unsigned long flags;
27881 struct ib_wc wc;
27882 u64 sdata;
27883 - atomic64_t *maddr;
27884 + atomic64_unchecked_t *maddr;
27885 enum ib_wc_status send_status;
27886
27887 /*
27888 @@ -382,11 +382,11 @@ again:
27889 IB_ACCESS_REMOTE_ATOMIC)))
27890 goto acc_err;
27891 /* Perform atomic OP and save result. */
27892 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
27893 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
27894 sdata = wqe->wr.wr.atomic.compare_add;
27895 *(u64 *) sqp->s_sge.sge.vaddr =
27896 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
27897 - (u64) atomic64_add_return(sdata, maddr) - sdata :
27898 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
27899 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
27900 sdata, wqe->wr.wr.atomic.swap);
27901 goto send_comp;
27902 diff -urNp linux-3.0.7/drivers/infiniband/hw/nes/nes.c linux-3.0.7/drivers/infiniband/hw/nes/nes.c
27903 --- linux-3.0.7/drivers/infiniband/hw/nes/nes.c 2011-07-21 22:17:23.000000000 -0400
27904 +++ linux-3.0.7/drivers/infiniband/hw/nes/nes.c 2011-08-23 21:47:55.000000000 -0400
27905 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
27906 LIST_HEAD(nes_adapter_list);
27907 static LIST_HEAD(nes_dev_list);
27908
27909 -atomic_t qps_destroyed;
27910 +atomic_unchecked_t qps_destroyed;
27911
27912 static unsigned int ee_flsh_adapter;
27913 static unsigned int sysfs_nonidx_addr;
27914 @@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
27915 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
27916 struct nes_adapter *nesadapter = nesdev->nesadapter;
27917
27918 - atomic_inc(&qps_destroyed);
27919 + atomic_inc_unchecked(&qps_destroyed);
27920
27921 /* Free the control structures */
27922
27923 diff -urNp linux-3.0.7/drivers/infiniband/hw/nes/nes_cm.c linux-3.0.7/drivers/infiniband/hw/nes/nes_cm.c
27924 --- linux-3.0.7/drivers/infiniband/hw/nes/nes_cm.c 2011-07-21 22:17:23.000000000 -0400
27925 +++ linux-3.0.7/drivers/infiniband/hw/nes/nes_cm.c 2011-08-23 21:47:55.000000000 -0400
27926 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
27927 u32 cm_packets_retrans;
27928 u32 cm_packets_created;
27929 u32 cm_packets_received;
27930 -atomic_t cm_listens_created;
27931 -atomic_t cm_listens_destroyed;
27932 +atomic_unchecked_t cm_listens_created;
27933 +atomic_unchecked_t cm_listens_destroyed;
27934 u32 cm_backlog_drops;
27935 -atomic_t cm_loopbacks;
27936 -atomic_t cm_nodes_created;
27937 -atomic_t cm_nodes_destroyed;
27938 -atomic_t cm_accel_dropped_pkts;
27939 -atomic_t cm_resets_recvd;
27940 +atomic_unchecked_t cm_loopbacks;
27941 +atomic_unchecked_t cm_nodes_created;
27942 +atomic_unchecked_t cm_nodes_destroyed;
27943 +atomic_unchecked_t cm_accel_dropped_pkts;
27944 +atomic_unchecked_t cm_resets_recvd;
27945
27946 static inline int mini_cm_accelerated(struct nes_cm_core *,
27947 struct nes_cm_node *);
27948 @@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
27949
27950 static struct nes_cm_core *g_cm_core;
27951
27952 -atomic_t cm_connects;
27953 -atomic_t cm_accepts;
27954 -atomic_t cm_disconnects;
27955 -atomic_t cm_closes;
27956 -atomic_t cm_connecteds;
27957 -atomic_t cm_connect_reqs;
27958 -atomic_t cm_rejects;
27959 +atomic_unchecked_t cm_connects;
27960 +atomic_unchecked_t cm_accepts;
27961 +atomic_unchecked_t cm_disconnects;
27962 +atomic_unchecked_t cm_closes;
27963 +atomic_unchecked_t cm_connecteds;
27964 +atomic_unchecked_t cm_connect_reqs;
27965 +atomic_unchecked_t cm_rejects;
27966
27967
27968 /**
27969 @@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
27970 kfree(listener);
27971 listener = NULL;
27972 ret = 0;
27973 - atomic_inc(&cm_listens_destroyed);
27974 + atomic_inc_unchecked(&cm_listens_destroyed);
27975 } else {
27976 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
27977 }
27978 @@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
27979 cm_node->rem_mac);
27980
27981 add_hte_node(cm_core, cm_node);
27982 - atomic_inc(&cm_nodes_created);
27983 + atomic_inc_unchecked(&cm_nodes_created);
27984
27985 return cm_node;
27986 }
27987 @@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
27988 }
27989
27990 atomic_dec(&cm_core->node_cnt);
27991 - atomic_inc(&cm_nodes_destroyed);
27992 + atomic_inc_unchecked(&cm_nodes_destroyed);
27993 nesqp = cm_node->nesqp;
27994 if (nesqp) {
27995 nesqp->cm_node = NULL;
27996 @@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
27997
27998 static void drop_packet(struct sk_buff *skb)
27999 {
28000 - atomic_inc(&cm_accel_dropped_pkts);
28001 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
28002 dev_kfree_skb_any(skb);
28003 }
28004
28005 @@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
28006 {
28007
28008 int reset = 0; /* whether to send reset in case of err.. */
28009 - atomic_inc(&cm_resets_recvd);
28010 + atomic_inc_unchecked(&cm_resets_recvd);
28011 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
28012 " refcnt=%d\n", cm_node, cm_node->state,
28013 atomic_read(&cm_node->ref_count));
28014 @@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
28015 rem_ref_cm_node(cm_node->cm_core, cm_node);
28016 return NULL;
28017 }
28018 - atomic_inc(&cm_loopbacks);
28019 + atomic_inc_unchecked(&cm_loopbacks);
28020 loopbackremotenode->loopbackpartner = cm_node;
28021 loopbackremotenode->tcp_cntxt.rcv_wscale =
28022 NES_CM_DEFAULT_RCV_WND_SCALE;
28023 @@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
28024 add_ref_cm_node(cm_node);
28025 } else if (cm_node->state == NES_CM_STATE_TSA) {
28026 rem_ref_cm_node(cm_core, cm_node);
28027 - atomic_inc(&cm_accel_dropped_pkts);
28028 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
28029 dev_kfree_skb_any(skb);
28030 break;
28031 }
28032 @@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
28033
28034 if ((cm_id) && (cm_id->event_handler)) {
28035 if (issue_disconn) {
28036 - atomic_inc(&cm_disconnects);
28037 + atomic_inc_unchecked(&cm_disconnects);
28038 cm_event.event = IW_CM_EVENT_DISCONNECT;
28039 cm_event.status = disconn_status;
28040 cm_event.local_addr = cm_id->local_addr;
28041 @@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
28042 }
28043
28044 if (issue_close) {
28045 - atomic_inc(&cm_closes);
28046 + atomic_inc_unchecked(&cm_closes);
28047 nes_disconnect(nesqp, 1);
28048
28049 cm_id->provider_data = nesqp;
28050 @@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
28051
28052 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
28053 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
28054 - atomic_inc(&cm_accepts);
28055 + atomic_inc_unchecked(&cm_accepts);
28056
28057 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
28058 netdev_refcnt_read(nesvnic->netdev));
28059 @@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
28060
28061 struct nes_cm_core *cm_core;
28062
28063 - atomic_inc(&cm_rejects);
28064 + atomic_inc_unchecked(&cm_rejects);
28065 cm_node = (struct nes_cm_node *) cm_id->provider_data;
28066 loopback = cm_node->loopbackpartner;
28067 cm_core = cm_node->cm_core;
28068 @@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
28069 ntohl(cm_id->local_addr.sin_addr.s_addr),
28070 ntohs(cm_id->local_addr.sin_port));
28071
28072 - atomic_inc(&cm_connects);
28073 + atomic_inc_unchecked(&cm_connects);
28074 nesqp->active_conn = 1;
28075
28076 /* cache the cm_id in the qp */
28077 @@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
28078 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
28079 return err;
28080 }
28081 - atomic_inc(&cm_listens_created);
28082 + atomic_inc_unchecked(&cm_listens_created);
28083 }
28084
28085 cm_id->add_ref(cm_id);
28086 @@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
28087 if (nesqp->destroyed) {
28088 return;
28089 }
28090 - atomic_inc(&cm_connecteds);
28091 + atomic_inc_unchecked(&cm_connecteds);
28092 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
28093 " local port 0x%04X. jiffies = %lu.\n",
28094 nesqp->hwqp.qp_id,
28095 @@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
28096
28097 cm_id->add_ref(cm_id);
28098 ret = cm_id->event_handler(cm_id, &cm_event);
28099 - atomic_inc(&cm_closes);
28100 + atomic_inc_unchecked(&cm_closes);
28101 cm_event.event = IW_CM_EVENT_CLOSE;
28102 cm_event.status = 0;
28103 cm_event.provider_data = cm_id->provider_data;
28104 @@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
28105 return;
28106 cm_id = cm_node->cm_id;
28107
28108 - atomic_inc(&cm_connect_reqs);
28109 + atomic_inc_unchecked(&cm_connect_reqs);
28110 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
28111 cm_node, cm_id, jiffies);
28112
28113 @@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
28114 return;
28115 cm_id = cm_node->cm_id;
28116
28117 - atomic_inc(&cm_connect_reqs);
28118 + atomic_inc_unchecked(&cm_connect_reqs);
28119 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
28120 cm_node, cm_id, jiffies);
28121
28122 diff -urNp linux-3.0.7/drivers/infiniband/hw/nes/nes.h linux-3.0.7/drivers/infiniband/hw/nes/nes.h
28123 --- linux-3.0.7/drivers/infiniband/hw/nes/nes.h 2011-07-21 22:17:23.000000000 -0400
28124 +++ linux-3.0.7/drivers/infiniband/hw/nes/nes.h 2011-08-23 21:47:55.000000000 -0400
28125 @@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
28126 extern unsigned int wqm_quanta;
28127 extern struct list_head nes_adapter_list;
28128
28129 -extern atomic_t cm_connects;
28130 -extern atomic_t cm_accepts;
28131 -extern atomic_t cm_disconnects;
28132 -extern atomic_t cm_closes;
28133 -extern atomic_t cm_connecteds;
28134 -extern atomic_t cm_connect_reqs;
28135 -extern atomic_t cm_rejects;
28136 -extern atomic_t mod_qp_timouts;
28137 -extern atomic_t qps_created;
28138 -extern atomic_t qps_destroyed;
28139 -extern atomic_t sw_qps_destroyed;
28140 +extern atomic_unchecked_t cm_connects;
28141 +extern atomic_unchecked_t cm_accepts;
28142 +extern atomic_unchecked_t cm_disconnects;
28143 +extern atomic_unchecked_t cm_closes;
28144 +extern atomic_unchecked_t cm_connecteds;
28145 +extern atomic_unchecked_t cm_connect_reqs;
28146 +extern atomic_unchecked_t cm_rejects;
28147 +extern atomic_unchecked_t mod_qp_timouts;
28148 +extern atomic_unchecked_t qps_created;
28149 +extern atomic_unchecked_t qps_destroyed;
28150 +extern atomic_unchecked_t sw_qps_destroyed;
28151 extern u32 mh_detected;
28152 extern u32 mh_pauses_sent;
28153 extern u32 cm_packets_sent;
28154 @@ -194,14 +194,14 @@ extern u32 cm_packets_created;
28155 extern u32 cm_packets_received;
28156 extern u32 cm_packets_dropped;
28157 extern u32 cm_packets_retrans;
28158 -extern atomic_t cm_listens_created;
28159 -extern atomic_t cm_listens_destroyed;
28160 +extern atomic_unchecked_t cm_listens_created;
28161 +extern atomic_unchecked_t cm_listens_destroyed;
28162 extern u32 cm_backlog_drops;
28163 -extern atomic_t cm_loopbacks;
28164 -extern atomic_t cm_nodes_created;
28165 -extern atomic_t cm_nodes_destroyed;
28166 -extern atomic_t cm_accel_dropped_pkts;
28167 -extern atomic_t cm_resets_recvd;
28168 +extern atomic_unchecked_t cm_loopbacks;
28169 +extern atomic_unchecked_t cm_nodes_created;
28170 +extern atomic_unchecked_t cm_nodes_destroyed;
28171 +extern atomic_unchecked_t cm_accel_dropped_pkts;
28172 +extern atomic_unchecked_t cm_resets_recvd;
28173
28174 extern u32 int_mod_timer_init;
28175 extern u32 int_mod_cq_depth_256;
28176 diff -urNp linux-3.0.7/drivers/infiniband/hw/nes/nes_nic.c linux-3.0.7/drivers/infiniband/hw/nes/nes_nic.c
28177 --- linux-3.0.7/drivers/infiniband/hw/nes/nes_nic.c 2011-07-21 22:17:23.000000000 -0400
28178 +++ linux-3.0.7/drivers/infiniband/hw/nes/nes_nic.c 2011-08-23 21:47:55.000000000 -0400
28179 @@ -1274,31 +1274,31 @@ static void nes_netdev_get_ethtool_stats
28180 target_stat_values[++index] = mh_detected;
28181 target_stat_values[++index] = mh_pauses_sent;
28182 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
28183 - target_stat_values[++index] = atomic_read(&cm_connects);
28184 - target_stat_values[++index] = atomic_read(&cm_accepts);
28185 - target_stat_values[++index] = atomic_read(&cm_disconnects);
28186 - target_stat_values[++index] = atomic_read(&cm_connecteds);
28187 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
28188 - target_stat_values[++index] = atomic_read(&cm_rejects);
28189 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
28190 - target_stat_values[++index] = atomic_read(&qps_created);
28191 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
28192 - target_stat_values[++index] = atomic_read(&qps_destroyed);
28193 - target_stat_values[++index] = atomic_read(&cm_closes);
28194 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
28195 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
28196 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
28197 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
28198 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
28199 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
28200 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
28201 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
28202 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
28203 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
28204 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
28205 target_stat_values[++index] = cm_packets_sent;
28206 target_stat_values[++index] = cm_packets_bounced;
28207 target_stat_values[++index] = cm_packets_created;
28208 target_stat_values[++index] = cm_packets_received;
28209 target_stat_values[++index] = cm_packets_dropped;
28210 target_stat_values[++index] = cm_packets_retrans;
28211 - target_stat_values[++index] = atomic_read(&cm_listens_created);
28212 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
28213 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
28214 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
28215 target_stat_values[++index] = cm_backlog_drops;
28216 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
28217 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
28218 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
28219 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
28220 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
28221 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
28222 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
28223 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
28224 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
28225 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
28226 target_stat_values[++index] = nesadapter->free_4kpbl;
28227 target_stat_values[++index] = nesadapter->free_256pbl;
28228 target_stat_values[++index] = int_mod_timer_init;
28229 diff -urNp linux-3.0.7/drivers/infiniband/hw/nes/nes_verbs.c linux-3.0.7/drivers/infiniband/hw/nes/nes_verbs.c
28230 --- linux-3.0.7/drivers/infiniband/hw/nes/nes_verbs.c 2011-07-21 22:17:23.000000000 -0400
28231 +++ linux-3.0.7/drivers/infiniband/hw/nes/nes_verbs.c 2011-08-23 21:47:55.000000000 -0400
28232 @@ -46,9 +46,9 @@
28233
28234 #include <rdma/ib_umem.h>
28235
28236 -atomic_t mod_qp_timouts;
28237 -atomic_t qps_created;
28238 -atomic_t sw_qps_destroyed;
28239 +atomic_unchecked_t mod_qp_timouts;
28240 +atomic_unchecked_t qps_created;
28241 +atomic_unchecked_t sw_qps_destroyed;
28242
28243 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
28244
28245 @@ -1141,7 +1141,7 @@ static struct ib_qp *nes_create_qp(struc
28246 if (init_attr->create_flags)
28247 return ERR_PTR(-EINVAL);
28248
28249 - atomic_inc(&qps_created);
28250 + atomic_inc_unchecked(&qps_created);
28251 switch (init_attr->qp_type) {
28252 case IB_QPT_RC:
28253 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
28254 @@ -1470,7 +1470,7 @@ static int nes_destroy_qp(struct ib_qp *
28255 struct iw_cm_event cm_event;
28256 int ret;
28257
28258 - atomic_inc(&sw_qps_destroyed);
28259 + atomic_inc_unchecked(&sw_qps_destroyed);
28260 nesqp->destroyed = 1;
28261
28262 /* Blow away the connection if it exists. */
28263 diff -urNp linux-3.0.7/drivers/infiniband/hw/qib/qib.h linux-3.0.7/drivers/infiniband/hw/qib/qib.h
28264 --- linux-3.0.7/drivers/infiniband/hw/qib/qib.h 2011-07-21 22:17:23.000000000 -0400
28265 +++ linux-3.0.7/drivers/infiniband/hw/qib/qib.h 2011-08-23 21:47:55.000000000 -0400
28266 @@ -51,6 +51,7 @@
28267 #include <linux/completion.h>
28268 #include <linux/kref.h>
28269 #include <linux/sched.h>
28270 +#include <linux/slab.h>
28271
28272 #include "qib_common.h"
28273 #include "qib_verbs.h"
28274 diff -urNp linux-3.0.7/drivers/input/gameport/gameport.c linux-3.0.7/drivers/input/gameport/gameport.c
28275 --- linux-3.0.7/drivers/input/gameport/gameport.c 2011-07-21 22:17:23.000000000 -0400
28276 +++ linux-3.0.7/drivers/input/gameport/gameport.c 2011-08-23 21:47:55.000000000 -0400
28277 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
28278 */
28279 static void gameport_init_port(struct gameport *gameport)
28280 {
28281 - static atomic_t gameport_no = ATOMIC_INIT(0);
28282 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
28283
28284 __module_get(THIS_MODULE);
28285
28286 mutex_init(&gameport->drv_mutex);
28287 device_initialize(&gameport->dev);
28288 dev_set_name(&gameport->dev, "gameport%lu",
28289 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
28290 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
28291 gameport->dev.bus = &gameport_bus;
28292 gameport->dev.release = gameport_release_port;
28293 if (gameport->parent)
28294 diff -urNp linux-3.0.7/drivers/input/input.c linux-3.0.7/drivers/input/input.c
28295 --- linux-3.0.7/drivers/input/input.c 2011-07-21 22:17:23.000000000 -0400
28296 +++ linux-3.0.7/drivers/input/input.c 2011-08-23 21:47:55.000000000 -0400
28297 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struc
28298 */
28299 int input_register_device(struct input_dev *dev)
28300 {
28301 - static atomic_t input_no = ATOMIC_INIT(0);
28302 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
28303 struct input_handler *handler;
28304 const char *path;
28305 int error;
28306 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_d
28307 dev->setkeycode = input_default_setkeycode;
28308
28309 dev_set_name(&dev->dev, "input%ld",
28310 - (unsigned long) atomic_inc_return(&input_no) - 1);
28311 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
28312
28313 error = device_add(&dev->dev);
28314 if (error)
28315 diff -urNp linux-3.0.7/drivers/input/joystick/sidewinder.c linux-3.0.7/drivers/input/joystick/sidewinder.c
28316 --- linux-3.0.7/drivers/input/joystick/sidewinder.c 2011-07-21 22:17:23.000000000 -0400
28317 +++ linux-3.0.7/drivers/input/joystick/sidewinder.c 2011-08-23 21:48:14.000000000 -0400
28318 @@ -30,6 +30,7 @@
28319 #include <linux/kernel.h>
28320 #include <linux/module.h>
28321 #include <linux/slab.h>
28322 +#include <linux/sched.h>
28323 #include <linux/init.h>
28324 #include <linux/input.h>
28325 #include <linux/gameport.h>
28326 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
28327 unsigned char buf[SW_LENGTH];
28328 int i;
28329
28330 + pax_track_stack();
28331 +
28332 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
28333
28334 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
28335 diff -urNp linux-3.0.7/drivers/input/joystick/xpad.c linux-3.0.7/drivers/input/joystick/xpad.c
28336 --- linux-3.0.7/drivers/input/joystick/xpad.c 2011-07-21 22:17:23.000000000 -0400
28337 +++ linux-3.0.7/drivers/input/joystick/xpad.c 2011-08-23 21:47:55.000000000 -0400
28338 @@ -689,7 +689,7 @@ static void xpad_led_set(struct led_clas
28339
28340 static int xpad_led_probe(struct usb_xpad *xpad)
28341 {
28342 - static atomic_t led_seq = ATOMIC_INIT(0);
28343 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
28344 long led_no;
28345 struct xpad_led *led;
28346 struct led_classdev *led_cdev;
28347 @@ -702,7 +702,7 @@ static int xpad_led_probe(struct usb_xpa
28348 if (!led)
28349 return -ENOMEM;
28350
28351 - led_no = (long)atomic_inc_return(&led_seq) - 1;
28352 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
28353
28354 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
28355 led->xpad = xpad;
28356 diff -urNp linux-3.0.7/drivers/input/mousedev.c linux-3.0.7/drivers/input/mousedev.c
28357 --- linux-3.0.7/drivers/input/mousedev.c 2011-07-21 22:17:23.000000000 -0400
28358 +++ linux-3.0.7/drivers/input/mousedev.c 2011-08-23 21:47:55.000000000 -0400
28359 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file
28360
28361 spin_unlock_irq(&client->packet_lock);
28362
28363 - if (copy_to_user(buffer, data, count))
28364 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
28365 return -EFAULT;
28366
28367 return count;
28368 diff -urNp linux-3.0.7/drivers/input/serio/serio.c linux-3.0.7/drivers/input/serio/serio.c
28369 --- linux-3.0.7/drivers/input/serio/serio.c 2011-07-21 22:17:23.000000000 -0400
28370 +++ linux-3.0.7/drivers/input/serio/serio.c 2011-08-23 21:47:55.000000000 -0400
28371 @@ -497,7 +497,7 @@ static void serio_release_port(struct de
28372 */
28373 static void serio_init_port(struct serio *serio)
28374 {
28375 - static atomic_t serio_no = ATOMIC_INIT(0);
28376 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
28377
28378 __module_get(THIS_MODULE);
28379
28380 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio
28381 mutex_init(&serio->drv_mutex);
28382 device_initialize(&serio->dev);
28383 dev_set_name(&serio->dev, "serio%ld",
28384 - (long)atomic_inc_return(&serio_no) - 1);
28385 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
28386 serio->dev.bus = &serio_bus;
28387 serio->dev.release = serio_release_port;
28388 serio->dev.groups = serio_device_attr_groups;
28389 diff -urNp linux-3.0.7/drivers/isdn/capi/capi.c linux-3.0.7/drivers/isdn/capi/capi.c
28390 --- linux-3.0.7/drivers/isdn/capi/capi.c 2011-07-21 22:17:23.000000000 -0400
28391 +++ linux-3.0.7/drivers/isdn/capi/capi.c 2011-08-23 21:47:55.000000000 -0400
28392 @@ -83,8 +83,8 @@ struct capiminor {
28393
28394 struct capi20_appl *ap;
28395 u32 ncci;
28396 - atomic_t datahandle;
28397 - atomic_t msgid;
28398 + atomic_unchecked_t datahandle;
28399 + atomic_unchecked_t msgid;
28400
28401 struct tty_port port;
28402 int ttyinstop;
28403 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *m
28404 capimsg_setu16(s, 2, mp->ap->applid);
28405 capimsg_setu8 (s, 4, CAPI_DATA_B3);
28406 capimsg_setu8 (s, 5, CAPI_RESP);
28407 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
28408 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
28409 capimsg_setu32(s, 8, mp->ncci);
28410 capimsg_setu16(s, 12, datahandle);
28411 }
28412 @@ -518,14 +518,14 @@ static void handle_minor_send(struct cap
28413 mp->outbytes -= len;
28414 spin_unlock_bh(&mp->outlock);
28415
28416 - datahandle = atomic_inc_return(&mp->datahandle);
28417 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
28418 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
28419 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
28420 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
28421 capimsg_setu16(skb->data, 2, mp->ap->applid);
28422 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
28423 capimsg_setu8 (skb->data, 5, CAPI_REQ);
28424 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
28425 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
28426 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
28427 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
28428 capimsg_setu16(skb->data, 16, len); /* Data length */
28429 diff -urNp linux-3.0.7/drivers/isdn/gigaset/common.c linux-3.0.7/drivers/isdn/gigaset/common.c
28430 --- linux-3.0.7/drivers/isdn/gigaset/common.c 2011-07-21 22:17:23.000000000 -0400
28431 +++ linux-3.0.7/drivers/isdn/gigaset/common.c 2011-08-23 21:47:55.000000000 -0400
28432 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
28433 cs->commands_pending = 0;
28434 cs->cur_at_seq = 0;
28435 cs->gotfwver = -1;
28436 - cs->open_count = 0;
28437 + local_set(&cs->open_count, 0);
28438 cs->dev = NULL;
28439 cs->tty = NULL;
28440 cs->tty_dev = NULL;
28441 diff -urNp linux-3.0.7/drivers/isdn/gigaset/gigaset.h linux-3.0.7/drivers/isdn/gigaset/gigaset.h
28442 --- linux-3.0.7/drivers/isdn/gigaset/gigaset.h 2011-07-21 22:17:23.000000000 -0400
28443 +++ linux-3.0.7/drivers/isdn/gigaset/gigaset.h 2011-08-23 21:47:55.000000000 -0400
28444 @@ -35,6 +35,7 @@
28445 #include <linux/tty_driver.h>
28446 #include <linux/list.h>
28447 #include <asm/atomic.h>
28448 +#include <asm/local.h>
28449
28450 #define GIG_VERSION {0, 5, 0, 0}
28451 #define GIG_COMPAT {0, 4, 0, 0}
28452 @@ -433,7 +434,7 @@ struct cardstate {
28453 spinlock_t cmdlock;
28454 unsigned curlen, cmdbytes;
28455
28456 - unsigned open_count;
28457 + local_t open_count;
28458 struct tty_struct *tty;
28459 struct tasklet_struct if_wake_tasklet;
28460 unsigned control_state;
28461 diff -urNp linux-3.0.7/drivers/isdn/gigaset/interface.c linux-3.0.7/drivers/isdn/gigaset/interface.c
28462 --- linux-3.0.7/drivers/isdn/gigaset/interface.c 2011-07-21 22:17:23.000000000 -0400
28463 +++ linux-3.0.7/drivers/isdn/gigaset/interface.c 2011-08-23 21:47:55.000000000 -0400
28464 @@ -162,9 +162,7 @@ static int if_open(struct tty_struct *tt
28465 }
28466 tty->driver_data = cs;
28467
28468 - ++cs->open_count;
28469 -
28470 - if (cs->open_count == 1) {
28471 + if (local_inc_return(&cs->open_count) == 1) {
28472 spin_lock_irqsave(&cs->lock, flags);
28473 cs->tty = tty;
28474 spin_unlock_irqrestore(&cs->lock, flags);
28475 @@ -192,10 +190,10 @@ static void if_close(struct tty_struct *
28476
28477 if (!cs->connected)
28478 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
28479 - else if (!cs->open_count)
28480 + else if (!local_read(&cs->open_count))
28481 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28482 else {
28483 - if (!--cs->open_count) {
28484 + if (!local_dec_return(&cs->open_count)) {
28485 spin_lock_irqsave(&cs->lock, flags);
28486 cs->tty = NULL;
28487 spin_unlock_irqrestore(&cs->lock, flags);
28488 @@ -230,7 +228,7 @@ static int if_ioctl(struct tty_struct *t
28489 if (!cs->connected) {
28490 gig_dbg(DEBUG_IF, "not connected");
28491 retval = -ENODEV;
28492 - } else if (!cs->open_count)
28493 + } else if (!local_read(&cs->open_count))
28494 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28495 else {
28496 retval = 0;
28497 @@ -360,7 +358,7 @@ static int if_write(struct tty_struct *t
28498 retval = -ENODEV;
28499 goto done;
28500 }
28501 - if (!cs->open_count) {
28502 + if (!local_read(&cs->open_count)) {
28503 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28504 retval = -ENODEV;
28505 goto done;
28506 @@ -413,7 +411,7 @@ static int if_write_room(struct tty_stru
28507 if (!cs->connected) {
28508 gig_dbg(DEBUG_IF, "not connected");
28509 retval = -ENODEV;
28510 - } else if (!cs->open_count)
28511 + } else if (!local_read(&cs->open_count))
28512 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28513 else if (cs->mstate != MS_LOCKED) {
28514 dev_warn(cs->dev, "can't write to unlocked device\n");
28515 @@ -443,7 +441,7 @@ static int if_chars_in_buffer(struct tty
28516
28517 if (!cs->connected)
28518 gig_dbg(DEBUG_IF, "not connected");
28519 - else if (!cs->open_count)
28520 + else if (!local_read(&cs->open_count))
28521 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28522 else if (cs->mstate != MS_LOCKED)
28523 dev_warn(cs->dev, "can't write to unlocked device\n");
28524 @@ -471,7 +469,7 @@ static void if_throttle(struct tty_struc
28525
28526 if (!cs->connected)
28527 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
28528 - else if (!cs->open_count)
28529 + else if (!local_read(&cs->open_count))
28530 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28531 else
28532 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
28533 @@ -495,7 +493,7 @@ static void if_unthrottle(struct tty_str
28534
28535 if (!cs->connected)
28536 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
28537 - else if (!cs->open_count)
28538 + else if (!local_read(&cs->open_count))
28539 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28540 else
28541 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
28542 @@ -526,7 +524,7 @@ static void if_set_termios(struct tty_st
28543 goto out;
28544 }
28545
28546 - if (!cs->open_count) {
28547 + if (!local_read(&cs->open_count)) {
28548 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28549 goto out;
28550 }
28551 diff -urNp linux-3.0.7/drivers/isdn/hardware/avm/b1.c linux-3.0.7/drivers/isdn/hardware/avm/b1.c
28552 --- linux-3.0.7/drivers/isdn/hardware/avm/b1.c 2011-07-21 22:17:23.000000000 -0400
28553 +++ linux-3.0.7/drivers/isdn/hardware/avm/b1.c 2011-08-23 21:47:55.000000000 -0400
28554 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
28555 }
28556 if (left) {
28557 if (t4file->user) {
28558 - if (copy_from_user(buf, dp, left))
28559 + if (left > sizeof buf || copy_from_user(buf, dp, left))
28560 return -EFAULT;
28561 } else {
28562 memcpy(buf, dp, left);
28563 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
28564 }
28565 if (left) {
28566 if (config->user) {
28567 - if (copy_from_user(buf, dp, left))
28568 + if (left > sizeof buf || copy_from_user(buf, dp, left))
28569 return -EFAULT;
28570 } else {
28571 memcpy(buf, dp, left);
28572 diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/capidtmf.c linux-3.0.7/drivers/isdn/hardware/eicon/capidtmf.c
28573 --- linux-3.0.7/drivers/isdn/hardware/eicon/capidtmf.c 2011-07-21 22:17:23.000000000 -0400
28574 +++ linux-3.0.7/drivers/isdn/hardware/eicon/capidtmf.c 2011-08-23 21:48:14.000000000 -0400
28575 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
28576 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
28577 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
28578
28579 + pax_track_stack();
28580
28581 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
28582 {
28583 diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/capifunc.c linux-3.0.7/drivers/isdn/hardware/eicon/capifunc.c
28584 --- linux-3.0.7/drivers/isdn/hardware/eicon/capifunc.c 2011-07-21 22:17:23.000000000 -0400
28585 +++ linux-3.0.7/drivers/isdn/hardware/eicon/capifunc.c 2011-08-23 21:48:14.000000000 -0400
28586 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
28587 IDI_SYNC_REQ req;
28588 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
28589
28590 + pax_track_stack();
28591 +
28592 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
28593
28594 for (x = 0; x < MAX_DESCRIPTORS; x++) {
28595 diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/diddfunc.c linux-3.0.7/drivers/isdn/hardware/eicon/diddfunc.c
28596 --- linux-3.0.7/drivers/isdn/hardware/eicon/diddfunc.c 2011-07-21 22:17:23.000000000 -0400
28597 +++ linux-3.0.7/drivers/isdn/hardware/eicon/diddfunc.c 2011-08-23 21:48:14.000000000 -0400
28598 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
28599 IDI_SYNC_REQ req;
28600 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
28601
28602 + pax_track_stack();
28603 +
28604 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
28605
28606 for (x = 0; x < MAX_DESCRIPTORS; x++) {
28607 diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/divasfunc.c linux-3.0.7/drivers/isdn/hardware/eicon/divasfunc.c
28608 --- linux-3.0.7/drivers/isdn/hardware/eicon/divasfunc.c 2011-07-21 22:17:23.000000000 -0400
28609 +++ linux-3.0.7/drivers/isdn/hardware/eicon/divasfunc.c 2011-08-23 21:48:14.000000000 -0400
28610 @@ -160,6 +160,8 @@ static int DIVA_INIT_FUNCTION connect_di
28611 IDI_SYNC_REQ req;
28612 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
28613
28614 + pax_track_stack();
28615 +
28616 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
28617
28618 for (x = 0; x < MAX_DESCRIPTORS; x++) {
28619 diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/divasync.h linux-3.0.7/drivers/isdn/hardware/eicon/divasync.h
28620 --- linux-3.0.7/drivers/isdn/hardware/eicon/divasync.h 2011-07-21 22:17:23.000000000 -0400
28621 +++ linux-3.0.7/drivers/isdn/hardware/eicon/divasync.h 2011-08-23 21:47:55.000000000 -0400
28622 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
28623 } diva_didd_add_adapter_t;
28624 typedef struct _diva_didd_remove_adapter {
28625 IDI_CALL p_request;
28626 -} diva_didd_remove_adapter_t;
28627 +} __no_const diva_didd_remove_adapter_t;
28628 typedef struct _diva_didd_read_adapter_array {
28629 void * buffer;
28630 dword length;
28631 diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/idifunc.c linux-3.0.7/drivers/isdn/hardware/eicon/idifunc.c
28632 --- linux-3.0.7/drivers/isdn/hardware/eicon/idifunc.c 2011-07-21 22:17:23.000000000 -0400
28633 +++ linux-3.0.7/drivers/isdn/hardware/eicon/idifunc.c 2011-08-23 21:48:14.000000000 -0400
28634 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
28635 IDI_SYNC_REQ req;
28636 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
28637
28638 + pax_track_stack();
28639 +
28640 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
28641
28642 for (x = 0; x < MAX_DESCRIPTORS; x++) {
28643 diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/message.c linux-3.0.7/drivers/isdn/hardware/eicon/message.c
28644 --- linux-3.0.7/drivers/isdn/hardware/eicon/message.c 2011-07-21 22:17:23.000000000 -0400
28645 +++ linux-3.0.7/drivers/isdn/hardware/eicon/message.c 2011-08-23 21:48:14.000000000 -0400
28646 @@ -4886,6 +4886,8 @@ static void sig_ind(PLCI *plci)
28647 dword d;
28648 word w;
28649
28650 + pax_track_stack();
28651 +
28652 a = plci->adapter;
28653 Id = ((word)plci->Id<<8)|a->Id;
28654 PUT_WORD(&SS_Ind[4],0x0000);
28655 @@ -7480,6 +7482,8 @@ static word add_b1(PLCI *plci, API_PARSE
28656 word j, n, w;
28657 dword d;
28658
28659 + pax_track_stack();
28660 +
28661
28662 for(i=0;i<8;i++) bp_parms[i].length = 0;
28663 for(i=0;i<2;i++) global_config[i].length = 0;
28664 @@ -7954,6 +7958,8 @@ static word add_b23(PLCI *plci, API_PARS
28665 const byte llc3[] = {4,3,2,2,6,6,0};
28666 const byte header[] = {0,2,3,3,0,0,0};
28667
28668 + pax_track_stack();
28669 +
28670 for(i=0;i<8;i++) bp_parms[i].length = 0;
28671 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
28672 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
28673 @@ -14741,6 +14747,8 @@ static void group_optimization(DIVA_CAPI
28674 word appl_number_group_type[MAX_APPL];
28675 PLCI *auxplci;
28676
28677 + pax_track_stack();
28678 +
28679 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
28680
28681 if(!a->group_optimization_enabled)
28682 diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/mntfunc.c linux-3.0.7/drivers/isdn/hardware/eicon/mntfunc.c
28683 --- linux-3.0.7/drivers/isdn/hardware/eicon/mntfunc.c 2011-07-21 22:17:23.000000000 -0400
28684 +++ linux-3.0.7/drivers/isdn/hardware/eicon/mntfunc.c 2011-08-23 21:48:14.000000000 -0400
28685 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
28686 IDI_SYNC_REQ req;
28687 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
28688
28689 + pax_track_stack();
28690 +
28691 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
28692
28693 for (x = 0; x < MAX_DESCRIPTORS; x++) {
28694 diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/xdi_adapter.h linux-3.0.7/drivers/isdn/hardware/eicon/xdi_adapter.h
28695 --- linux-3.0.7/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-07-21 22:17:23.000000000 -0400
28696 +++ linux-3.0.7/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-23 21:47:55.000000000 -0400
28697 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
28698 typedef struct _diva_os_idi_adapter_interface {
28699 diva_init_card_proc_t cleanup_adapter_proc;
28700 diva_cmd_card_proc_t cmd_proc;
28701 -} diva_os_idi_adapter_interface_t;
28702 +} __no_const diva_os_idi_adapter_interface_t;
28703
28704 typedef struct _diva_os_xdi_adapter {
28705 struct list_head link;
28706 diff -urNp linux-3.0.7/drivers/isdn/i4l/isdn_common.c linux-3.0.7/drivers/isdn/i4l/isdn_common.c
28707 --- linux-3.0.7/drivers/isdn/i4l/isdn_common.c 2011-07-21 22:17:23.000000000 -0400
28708 +++ linux-3.0.7/drivers/isdn/i4l/isdn_common.c 2011-08-23 21:48:14.000000000 -0400
28709 @@ -1286,6 +1286,8 @@ isdn_ioctl(struct file *file, uint cmd,
28710 } iocpar;
28711 void __user *argp = (void __user *)arg;
28712
28713 + pax_track_stack();
28714 +
28715 #define name iocpar.name
28716 #define bname iocpar.bname
28717 #define iocts iocpar.iocts
28718 diff -urNp linux-3.0.7/drivers/isdn/icn/icn.c linux-3.0.7/drivers/isdn/icn/icn.c
28719 --- linux-3.0.7/drivers/isdn/icn/icn.c 2011-07-21 22:17:23.000000000 -0400
28720 +++ linux-3.0.7/drivers/isdn/icn/icn.c 2011-08-23 21:47:55.000000000 -0400
28721 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
28722 if (count > len)
28723 count = len;
28724 if (user) {
28725 - if (copy_from_user(msg, buf, count))
28726 + if (count > sizeof msg || copy_from_user(msg, buf, count))
28727 return -EFAULT;
28728 } else
28729 memcpy(msg, buf, count);
28730 diff -urNp linux-3.0.7/drivers/lguest/core.c linux-3.0.7/drivers/lguest/core.c
28731 --- linux-3.0.7/drivers/lguest/core.c 2011-07-21 22:17:23.000000000 -0400
28732 +++ linux-3.0.7/drivers/lguest/core.c 2011-08-23 21:47:55.000000000 -0400
28733 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
28734 * it's worked so far. The end address needs +1 because __get_vm_area
28735 * allocates an extra guard page, so we need space for that.
28736 */
28737 +
28738 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28739 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
28740 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
28741 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
28742 +#else
28743 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
28744 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
28745 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
28746 +#endif
28747 +
28748 if (!switcher_vma) {
28749 err = -ENOMEM;
28750 printk("lguest: could not map switcher pages high\n");
28751 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
28752 * Now the Switcher is mapped at the right address, we can't fail!
28753 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
28754 */
28755 - memcpy(switcher_vma->addr, start_switcher_text,
28756 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
28757 end_switcher_text - start_switcher_text);
28758
28759 printk(KERN_INFO "lguest: mapped switcher at %p\n",
28760 diff -urNp linux-3.0.7/drivers/lguest/x86/core.c linux-3.0.7/drivers/lguest/x86/core.c
28761 --- linux-3.0.7/drivers/lguest/x86/core.c 2011-07-21 22:17:23.000000000 -0400
28762 +++ linux-3.0.7/drivers/lguest/x86/core.c 2011-08-23 21:47:55.000000000 -0400
28763 @@ -59,7 +59,7 @@ static struct {
28764 /* Offset from where switcher.S was compiled to where we've copied it */
28765 static unsigned long switcher_offset(void)
28766 {
28767 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
28768 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
28769 }
28770
28771 /* This cpu's struct lguest_pages. */
28772 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
28773 * These copies are pretty cheap, so we do them unconditionally: */
28774 /* Save the current Host top-level page directory.
28775 */
28776 +
28777 +#ifdef CONFIG_PAX_PER_CPU_PGD
28778 + pages->state.host_cr3 = read_cr3();
28779 +#else
28780 pages->state.host_cr3 = __pa(current->mm->pgd);
28781 +#endif
28782 +
28783 /*
28784 * Set up the Guest's page tables to see this CPU's pages (and no
28785 * other CPU's pages).
28786 @@ -547,7 +553,7 @@ void __init lguest_arch_host_init(void)
28787 * compiled-in switcher code and the high-mapped copy we just made.
28788 */
28789 for (i = 0; i < IDT_ENTRIES; i++)
28790 - default_idt_entries[i] += switcher_offset();
28791 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
28792
28793 /*
28794 * Set up the Switcher's per-cpu areas.
28795 @@ -630,7 +636,7 @@ void __init lguest_arch_host_init(void)
28796 * it will be undisturbed when we switch. To change %cs and jump we
28797 * need this structure to feed to Intel's "lcall" instruction.
28798 */
28799 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
28800 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
28801 lguest_entry.segment = LGUEST_CS;
28802
28803 /*
28804 diff -urNp linux-3.0.7/drivers/lguest/x86/switcher_32.S linux-3.0.7/drivers/lguest/x86/switcher_32.S
28805 --- linux-3.0.7/drivers/lguest/x86/switcher_32.S 2011-07-21 22:17:23.000000000 -0400
28806 +++ linux-3.0.7/drivers/lguest/x86/switcher_32.S 2011-08-23 21:47:55.000000000 -0400
28807 @@ -87,6 +87,7 @@
28808 #include <asm/page.h>
28809 #include <asm/segment.h>
28810 #include <asm/lguest.h>
28811 +#include <asm/processor-flags.h>
28812
28813 // We mark the start of the code to copy
28814 // It's placed in .text tho it's never run here
28815 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
28816 // Changes type when we load it: damn Intel!
28817 // For after we switch over our page tables
28818 // That entry will be read-only: we'd crash.
28819 +
28820 +#ifdef CONFIG_PAX_KERNEXEC
28821 + mov %cr0, %edx
28822 + xor $X86_CR0_WP, %edx
28823 + mov %edx, %cr0
28824 +#endif
28825 +
28826 movl $(GDT_ENTRY_TSS*8), %edx
28827 ltr %dx
28828
28829 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
28830 // Let's clear it again for our return.
28831 // The GDT descriptor of the Host
28832 // Points to the table after two "size" bytes
28833 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
28834 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
28835 // Clear "used" from type field (byte 5, bit 2)
28836 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
28837 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
28838 +
28839 +#ifdef CONFIG_PAX_KERNEXEC
28840 + mov %cr0, %eax
28841 + xor $X86_CR0_WP, %eax
28842 + mov %eax, %cr0
28843 +#endif
28844
28845 // Once our page table's switched, the Guest is live!
28846 // The Host fades as we run this final step.
28847 @@ -295,13 +309,12 @@ deliver_to_host:
28848 // I consulted gcc, and it gave
28849 // These instructions, which I gladly credit:
28850 leal (%edx,%ebx,8), %eax
28851 - movzwl (%eax),%edx
28852 - movl 4(%eax), %eax
28853 - xorw %ax, %ax
28854 - orl %eax, %edx
28855 + movl 4(%eax), %edx
28856 + movw (%eax), %dx
28857 // Now the address of the handler's in %edx
28858 // We call it now: its "iret" drops us home.
28859 - jmp *%edx
28860 + ljmp $__KERNEL_CS, $1f
28861 +1: jmp *%edx
28862
28863 // Every interrupt can come to us here
28864 // But we must truly tell each apart.
28865 diff -urNp linux-3.0.7/drivers/macintosh/macio_asic.c linux-3.0.7/drivers/macintosh/macio_asic.c
28866 --- linux-3.0.7/drivers/macintosh/macio_asic.c 2011-07-21 22:17:23.000000000 -0400
28867 +++ linux-3.0.7/drivers/macintosh/macio_asic.c 2011-10-11 10:44:33.000000000 -0400
28868 @@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(s
28869 * MacIO is matched against any Apple ID, it's probe() function
28870 * will then decide wether it applies or not
28871 */
28872 -static const struct pci_device_id __devinitdata pci_ids [] = { {
28873 +static const struct pci_device_id __devinitconst pci_ids [] = { {
28874 .vendor = PCI_VENDOR_ID_APPLE,
28875 .device = PCI_ANY_ID,
28876 .subvendor = PCI_ANY_ID,
28877 diff -urNp linux-3.0.7/drivers/md/dm.c linux-3.0.7/drivers/md/dm.c
28878 --- linux-3.0.7/drivers/md/dm.c 2011-09-02 18:11:21.000000000 -0400
28879 +++ linux-3.0.7/drivers/md/dm.c 2011-08-23 21:47:55.000000000 -0400
28880 @@ -164,9 +164,9 @@ struct mapped_device {
28881 /*
28882 * Event handling.
28883 */
28884 - atomic_t event_nr;
28885 + atomic_unchecked_t event_nr;
28886 wait_queue_head_t eventq;
28887 - atomic_t uevent_seq;
28888 + atomic_unchecked_t uevent_seq;
28889 struct list_head uevent_list;
28890 spinlock_t uevent_lock; /* Protect access to uevent_list */
28891
28892 @@ -1842,8 +1842,8 @@ static struct mapped_device *alloc_dev(i
28893 rwlock_init(&md->map_lock);
28894 atomic_set(&md->holders, 1);
28895 atomic_set(&md->open_count, 0);
28896 - atomic_set(&md->event_nr, 0);
28897 - atomic_set(&md->uevent_seq, 0);
28898 + atomic_set_unchecked(&md->event_nr, 0);
28899 + atomic_set_unchecked(&md->uevent_seq, 0);
28900 INIT_LIST_HEAD(&md->uevent_list);
28901 spin_lock_init(&md->uevent_lock);
28902
28903 @@ -1977,7 +1977,7 @@ static void event_callback(void *context
28904
28905 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
28906
28907 - atomic_inc(&md->event_nr);
28908 + atomic_inc_unchecked(&md->event_nr);
28909 wake_up(&md->eventq);
28910 }
28911
28912 @@ -2553,18 +2553,18 @@ int dm_kobject_uevent(struct mapped_devi
28913
28914 uint32_t dm_next_uevent_seq(struct mapped_device *md)
28915 {
28916 - return atomic_add_return(1, &md->uevent_seq);
28917 + return atomic_add_return_unchecked(1, &md->uevent_seq);
28918 }
28919
28920 uint32_t dm_get_event_nr(struct mapped_device *md)
28921 {
28922 - return atomic_read(&md->event_nr);
28923 + return atomic_read_unchecked(&md->event_nr);
28924 }
28925
28926 int dm_wait_event(struct mapped_device *md, int event_nr)
28927 {
28928 return wait_event_interruptible(md->eventq,
28929 - (event_nr != atomic_read(&md->event_nr)));
28930 + (event_nr != atomic_read_unchecked(&md->event_nr)));
28931 }
28932
28933 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
28934 diff -urNp linux-3.0.7/drivers/md/dm-ioctl.c linux-3.0.7/drivers/md/dm-ioctl.c
28935 --- linux-3.0.7/drivers/md/dm-ioctl.c 2011-07-21 22:17:23.000000000 -0400
28936 +++ linux-3.0.7/drivers/md/dm-ioctl.c 2011-08-23 21:47:55.000000000 -0400
28937 @@ -1551,7 +1551,7 @@ static int validate_params(uint cmd, str
28938 cmd == DM_LIST_VERSIONS_CMD)
28939 return 0;
28940
28941 - if ((cmd == DM_DEV_CREATE_CMD)) {
28942 + if (cmd == DM_DEV_CREATE_CMD) {
28943 if (!*param->name) {
28944 DMWARN("name not supplied when creating device");
28945 return -EINVAL;
28946 diff -urNp linux-3.0.7/drivers/md/dm-raid1.c linux-3.0.7/drivers/md/dm-raid1.c
28947 --- linux-3.0.7/drivers/md/dm-raid1.c 2011-07-21 22:17:23.000000000 -0400
28948 +++ linux-3.0.7/drivers/md/dm-raid1.c 2011-08-23 21:47:55.000000000 -0400
28949 @@ -40,7 +40,7 @@ enum dm_raid1_error {
28950
28951 struct mirror {
28952 struct mirror_set *ms;
28953 - atomic_t error_count;
28954 + atomic_unchecked_t error_count;
28955 unsigned long error_type;
28956 struct dm_dev *dev;
28957 sector_t offset;
28958 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(s
28959 struct mirror *m;
28960
28961 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
28962 - if (!atomic_read(&m->error_count))
28963 + if (!atomic_read_unchecked(&m->error_count))
28964 return m;
28965
28966 return NULL;
28967 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m
28968 * simple way to tell if a device has encountered
28969 * errors.
28970 */
28971 - atomic_inc(&m->error_count);
28972 + atomic_inc_unchecked(&m->error_count);
28973
28974 if (test_and_set_bit(error_type, &m->error_type))
28975 return;
28976 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(stru
28977 struct mirror *m = get_default_mirror(ms);
28978
28979 do {
28980 - if (likely(!atomic_read(&m->error_count)))
28981 + if (likely(!atomic_read_unchecked(&m->error_count)))
28982 return m;
28983
28984 if (m-- == ms->mirror)
28985 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
28986 {
28987 struct mirror *default_mirror = get_default_mirror(m->ms);
28988
28989 - return !atomic_read(&default_mirror->error_count);
28990 + return !atomic_read_unchecked(&default_mirror->error_count);
28991 }
28992
28993 static int mirror_available(struct mirror_set *ms, struct bio *bio)
28994 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *
28995 */
28996 if (likely(region_in_sync(ms, region, 1)))
28997 m = choose_mirror(ms, bio->bi_sector);
28998 - else if (m && atomic_read(&m->error_count))
28999 + else if (m && atomic_read_unchecked(&m->error_count))
29000 m = NULL;
29001
29002 if (likely(m))
29003 @@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set
29004 }
29005
29006 ms->mirror[mirror].ms = ms;
29007 - atomic_set(&(ms->mirror[mirror].error_count), 0);
29008 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
29009 ms->mirror[mirror].error_type = 0;
29010 ms->mirror[mirror].offset = offset;
29011
29012 @@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
29013 */
29014 static char device_status_char(struct mirror *m)
29015 {
29016 - if (!atomic_read(&(m->error_count)))
29017 + if (!atomic_read_unchecked(&(m->error_count)))
29018 return 'A';
29019
29020 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
29021 diff -urNp linux-3.0.7/drivers/md/dm-stripe.c linux-3.0.7/drivers/md/dm-stripe.c
29022 --- linux-3.0.7/drivers/md/dm-stripe.c 2011-07-21 22:17:23.000000000 -0400
29023 +++ linux-3.0.7/drivers/md/dm-stripe.c 2011-08-23 21:47:55.000000000 -0400
29024 @@ -20,7 +20,7 @@ struct stripe {
29025 struct dm_dev *dev;
29026 sector_t physical_start;
29027
29028 - atomic_t error_count;
29029 + atomic_unchecked_t error_count;
29030 };
29031
29032 struct stripe_c {
29033 @@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
29034 kfree(sc);
29035 return r;
29036 }
29037 - atomic_set(&(sc->stripe[i].error_count), 0);
29038 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
29039 }
29040
29041 ti->private = sc;
29042 @@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
29043 DMEMIT("%d ", sc->stripes);
29044 for (i = 0; i < sc->stripes; i++) {
29045 DMEMIT("%s ", sc->stripe[i].dev->name);
29046 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
29047 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
29048 'D' : 'A';
29049 }
29050 buffer[i] = '\0';
29051 @@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
29052 */
29053 for (i = 0; i < sc->stripes; i++)
29054 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
29055 - atomic_inc(&(sc->stripe[i].error_count));
29056 - if (atomic_read(&(sc->stripe[i].error_count)) <
29057 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
29058 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
29059 DM_IO_ERROR_THRESHOLD)
29060 schedule_work(&sc->trigger_event);
29061 }
29062 diff -urNp linux-3.0.7/drivers/md/dm-table.c linux-3.0.7/drivers/md/dm-table.c
29063 --- linux-3.0.7/drivers/md/dm-table.c 2011-10-17 23:17:09.000000000 -0400
29064 +++ linux-3.0.7/drivers/md/dm-table.c 2011-10-17 23:17:19.000000000 -0400
29065 @@ -390,7 +390,7 @@ static int device_area_is_invalid(struct
29066 if (!dev_size)
29067 return 0;
29068
29069 - if ((start >= dev_size) || (start + len > dev_size)) {
29070 + if ((start >= dev_size) || (len > dev_size - start)) {
29071 DMWARN("%s: %s too small for target: "
29072 "start=%llu, len=%llu, dev_size=%llu",
29073 dm_device_name(ti->table->md), bdevname(bdev, b),
29074 diff -urNp linux-3.0.7/drivers/md/md.c linux-3.0.7/drivers/md/md.c
29075 --- linux-3.0.7/drivers/md/md.c 2011-10-17 23:17:09.000000000 -0400
29076 +++ linux-3.0.7/drivers/md/md.c 2011-10-17 23:17:19.000000000 -0400
29077 @@ -231,10 +231,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
29078 * start build, activate spare
29079 */
29080 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
29081 -static atomic_t md_event_count;
29082 +static atomic_unchecked_t md_event_count;
29083 void md_new_event(mddev_t *mddev)
29084 {
29085 - atomic_inc(&md_event_count);
29086 + atomic_inc_unchecked(&md_event_count);
29087 wake_up(&md_event_waiters);
29088 }
29089 EXPORT_SYMBOL_GPL(md_new_event);
29090 @@ -244,7 +244,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
29091 */
29092 static void md_new_event_inintr(mddev_t *mddev)
29093 {
29094 - atomic_inc(&md_event_count);
29095 + atomic_inc_unchecked(&md_event_count);
29096 wake_up(&md_event_waiters);
29097 }
29098
29099 @@ -1475,7 +1475,7 @@ static int super_1_load(mdk_rdev_t *rdev
29100
29101 rdev->preferred_minor = 0xffff;
29102 rdev->data_offset = le64_to_cpu(sb->data_offset);
29103 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
29104 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
29105
29106 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
29107 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
29108 @@ -1653,7 +1653,7 @@ static void super_1_sync(mddev_t *mddev,
29109 else
29110 sb->resync_offset = cpu_to_le64(0);
29111
29112 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
29113 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
29114
29115 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
29116 sb->size = cpu_to_le64(mddev->dev_sectors);
29117 @@ -2446,7 +2446,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
29118 static ssize_t
29119 errors_show(mdk_rdev_t *rdev, char *page)
29120 {
29121 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
29122 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
29123 }
29124
29125 static ssize_t
29126 @@ -2455,7 +2455,7 @@ errors_store(mdk_rdev_t *rdev, const cha
29127 char *e;
29128 unsigned long n = simple_strtoul(buf, &e, 10);
29129 if (*buf && (*e == 0 || *e == '\n')) {
29130 - atomic_set(&rdev->corrected_errors, n);
29131 + atomic_set_unchecked(&rdev->corrected_errors, n);
29132 return len;
29133 }
29134 return -EINVAL;
29135 @@ -2811,8 +2811,8 @@ void md_rdev_init(mdk_rdev_t *rdev)
29136 rdev->last_read_error.tv_sec = 0;
29137 rdev->last_read_error.tv_nsec = 0;
29138 atomic_set(&rdev->nr_pending, 0);
29139 - atomic_set(&rdev->read_errors, 0);
29140 - atomic_set(&rdev->corrected_errors, 0);
29141 + atomic_set_unchecked(&rdev->read_errors, 0);
29142 + atomic_set_unchecked(&rdev->corrected_errors, 0);
29143
29144 INIT_LIST_HEAD(&rdev->same_set);
29145 init_waitqueue_head(&rdev->blocked_wait);
29146 @@ -6440,7 +6440,7 @@ static int md_seq_show(struct seq_file *
29147
29148 spin_unlock(&pers_lock);
29149 seq_printf(seq, "\n");
29150 - mi->event = atomic_read(&md_event_count);
29151 + mi->event = atomic_read_unchecked(&md_event_count);
29152 return 0;
29153 }
29154 if (v == (void*)2) {
29155 @@ -6529,7 +6529,7 @@ static int md_seq_show(struct seq_file *
29156 chunk_kb ? "KB" : "B");
29157 if (bitmap->file) {
29158 seq_printf(seq, ", file: ");
29159 - seq_path(seq, &bitmap->file->f_path, " \t\n");
29160 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
29161 }
29162
29163 seq_printf(seq, "\n");
29164 @@ -6563,7 +6563,7 @@ static int md_seq_open(struct inode *ino
29165 else {
29166 struct seq_file *p = file->private_data;
29167 p->private = mi;
29168 - mi->event = atomic_read(&md_event_count);
29169 + mi->event = atomic_read_unchecked(&md_event_count);
29170 }
29171 return error;
29172 }
29173 @@ -6579,7 +6579,7 @@ static unsigned int mdstat_poll(struct f
29174 /* always allow read */
29175 mask = POLLIN | POLLRDNORM;
29176
29177 - if (mi->event != atomic_read(&md_event_count))
29178 + if (mi->event != atomic_read_unchecked(&md_event_count))
29179 mask |= POLLERR | POLLPRI;
29180 return mask;
29181 }
29182 @@ -6623,7 +6623,7 @@ static int is_mddev_idle(mddev_t *mddev,
29183 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
29184 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
29185 (int)part_stat_read(&disk->part0, sectors[1]) -
29186 - atomic_read(&disk->sync_io);
29187 + atomic_read_unchecked(&disk->sync_io);
29188 /* sync IO will cause sync_io to increase before the disk_stats
29189 * as sync_io is counted when a request starts, and
29190 * disk_stats is counted when it completes.
29191 diff -urNp linux-3.0.7/drivers/md/md.h linux-3.0.7/drivers/md/md.h
29192 --- linux-3.0.7/drivers/md/md.h 2011-10-17 23:17:09.000000000 -0400
29193 +++ linux-3.0.7/drivers/md/md.h 2011-10-17 23:17:19.000000000 -0400
29194 @@ -97,13 +97,13 @@ struct mdk_rdev_s
29195 * only maintained for arrays that
29196 * support hot removal
29197 */
29198 - atomic_t read_errors; /* number of consecutive read errors that
29199 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
29200 * we have tried to ignore.
29201 */
29202 struct timespec last_read_error; /* monotonic time since our
29203 * last read error
29204 */
29205 - atomic_t corrected_errors; /* number of corrected read errors,
29206 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
29207 * for reporting to userspace and storing
29208 * in superblock.
29209 */
29210 @@ -344,7 +344,7 @@ static inline void rdev_dec_pending(mdk_
29211
29212 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
29213 {
29214 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
29215 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
29216 }
29217
29218 struct mdk_personality
29219 diff -urNp linux-3.0.7/drivers/md/raid10.c linux-3.0.7/drivers/md/raid10.c
29220 --- linux-3.0.7/drivers/md/raid10.c 2011-10-17 23:17:09.000000000 -0400
29221 +++ linux-3.0.7/drivers/md/raid10.c 2011-10-17 23:17:19.000000000 -0400
29222 @@ -1186,7 +1186,7 @@ static void end_sync_read(struct bio *bi
29223 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
29224 set_bit(R10BIO_Uptodate, &r10_bio->state);
29225 else {
29226 - atomic_add(r10_bio->sectors,
29227 + atomic_add_unchecked(r10_bio->sectors,
29228 &conf->mirrors[d].rdev->corrected_errors);
29229 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
29230 md_error(r10_bio->mddev,
29231 @@ -1394,7 +1394,7 @@ static void check_decay_read_errors(mdde
29232 {
29233 struct timespec cur_time_mon;
29234 unsigned long hours_since_last;
29235 - unsigned int read_errors = atomic_read(&rdev->read_errors);
29236 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
29237
29238 ktime_get_ts(&cur_time_mon);
29239
29240 @@ -1416,9 +1416,9 @@ static void check_decay_read_errors(mdde
29241 * overflowing the shift of read_errors by hours_since_last.
29242 */
29243 if (hours_since_last >= 8 * sizeof(read_errors))
29244 - atomic_set(&rdev->read_errors, 0);
29245 + atomic_set_unchecked(&rdev->read_errors, 0);
29246 else
29247 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
29248 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
29249 }
29250
29251 /*
29252 @@ -1448,8 +1448,8 @@ static void fix_read_error(conf_t *conf,
29253 return;
29254
29255 check_decay_read_errors(mddev, rdev);
29256 - atomic_inc(&rdev->read_errors);
29257 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
29258 + atomic_inc_unchecked(&rdev->read_errors);
29259 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
29260 char b[BDEVNAME_SIZE];
29261 bdevname(rdev->bdev, b);
29262
29263 @@ -1457,7 +1457,7 @@ static void fix_read_error(conf_t *conf,
29264 "md/raid10:%s: %s: Raid device exceeded "
29265 "read_error threshold [cur %d:max %d]\n",
29266 mdname(mddev), b,
29267 - atomic_read(&rdev->read_errors), max_read_errors);
29268 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
29269 printk(KERN_NOTICE
29270 "md/raid10:%s: %s: Failing raid device\n",
29271 mdname(mddev), b);
29272 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
29273 test_bit(In_sync, &rdev->flags)) {
29274 atomic_inc(&rdev->nr_pending);
29275 rcu_read_unlock();
29276 - atomic_add(s, &rdev->corrected_errors);
29277 + atomic_add_unchecked(s, &rdev->corrected_errors);
29278 if (sync_page_io(rdev,
29279 r10_bio->devs[sl].addr +
29280 sect,
29281 diff -urNp linux-3.0.7/drivers/md/raid1.c linux-3.0.7/drivers/md/raid1.c
29282 --- linux-3.0.7/drivers/md/raid1.c 2011-10-17 23:17:09.000000000 -0400
29283 +++ linux-3.0.7/drivers/md/raid1.c 2011-10-17 23:17:19.000000000 -0400
29284 @@ -1263,7 +1263,7 @@ static int fix_sync_read_error(r1bio_t *
29285 rdev_dec_pending(rdev, mddev);
29286 md_error(mddev, rdev);
29287 } else
29288 - atomic_add(s, &rdev->corrected_errors);
29289 + atomic_add_unchecked(s, &rdev->corrected_errors);
29290 }
29291 d = start;
29292 while (d != r1_bio->read_disk) {
29293 @@ -1492,7 +1492,7 @@ static void fix_read_error(conf_t *conf,
29294 /* Well, this device is dead */
29295 md_error(mddev, rdev);
29296 else {
29297 - atomic_add(s, &rdev->corrected_errors);
29298 + atomic_add_unchecked(s, &rdev->corrected_errors);
29299 printk(KERN_INFO
29300 "md/raid1:%s: read error corrected "
29301 "(%d sectors at %llu on %s)\n",
29302 diff -urNp linux-3.0.7/drivers/md/raid5.c linux-3.0.7/drivers/md/raid5.c
29303 --- linux-3.0.7/drivers/md/raid5.c 2011-10-17 23:17:09.000000000 -0400
29304 +++ linux-3.0.7/drivers/md/raid5.c 2011-10-17 23:17:19.000000000 -0400
29305 @@ -550,7 +550,7 @@ static void ops_run_io(struct stripe_hea
29306 bi->bi_next = NULL;
29307 if ((rw & WRITE) &&
29308 test_bit(R5_ReWrite, &sh->dev[i].flags))
29309 - atomic_add(STRIPE_SECTORS,
29310 + atomic_add_unchecked(STRIPE_SECTORS,
29311 &rdev->corrected_errors);
29312 generic_make_request(bi);
29313 } else {
29314 @@ -1596,15 +1596,15 @@ static void raid5_end_read_request(struc
29315 clear_bit(R5_ReadError, &sh->dev[i].flags);
29316 clear_bit(R5_ReWrite, &sh->dev[i].flags);
29317 }
29318 - if (atomic_read(&conf->disks[i].rdev->read_errors))
29319 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
29320 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
29321 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
29322 } else {
29323 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
29324 int retry = 0;
29325 rdev = conf->disks[i].rdev;
29326
29327 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
29328 - atomic_inc(&rdev->read_errors);
29329 + atomic_inc_unchecked(&rdev->read_errors);
29330 if (conf->mddev->degraded >= conf->max_degraded)
29331 printk_rl(KERN_WARNING
29332 "md/raid:%s: read error not correctable "
29333 @@ -1622,7 +1622,7 @@ static void raid5_end_read_request(struc
29334 (unsigned long long)(sh->sector
29335 + rdev->data_offset),
29336 bdn);
29337 - else if (atomic_read(&rdev->read_errors)
29338 + else if (atomic_read_unchecked(&rdev->read_errors)
29339 > conf->max_nr_stripes)
29340 printk(KERN_WARNING
29341 "md/raid:%s: Too many read errors, failing device %s.\n",
29342 @@ -1945,6 +1945,7 @@ static sector_t compute_blocknr(struct s
29343 sector_t r_sector;
29344 struct stripe_head sh2;
29345
29346 + pax_track_stack();
29347
29348 chunk_offset = sector_div(new_sector, sectors_per_chunk);
29349 stripe = new_sector;
29350 diff -urNp linux-3.0.7/drivers/media/common/saa7146_hlp.c linux-3.0.7/drivers/media/common/saa7146_hlp.c
29351 --- linux-3.0.7/drivers/media/common/saa7146_hlp.c 2011-07-21 22:17:23.000000000 -0400
29352 +++ linux-3.0.7/drivers/media/common/saa7146_hlp.c 2011-08-23 21:48:14.000000000 -0400
29353 @@ -353,6 +353,8 @@ static void calculate_clipping_registers
29354
29355 int x[32], y[32], w[32], h[32];
29356
29357 + pax_track_stack();
29358 +
29359 /* clear out memory */
29360 memset(&line_list[0], 0x00, sizeof(u32)*32);
29361 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
29362 diff -urNp linux-3.0.7/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-3.0.7/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
29363 --- linux-3.0.7/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-07-21 22:17:23.000000000 -0400
29364 +++ linux-3.0.7/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-08-23 21:48:14.000000000 -0400
29365 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
29366 u8 buf[HOST_LINK_BUF_SIZE];
29367 int i;
29368
29369 + pax_track_stack();
29370 +
29371 dprintk("%s\n", __func__);
29372
29373 /* check if we have space for a link buf in the rx_buffer */
29374 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
29375 unsigned long timeout;
29376 int written;
29377
29378 + pax_track_stack();
29379 +
29380 dprintk("%s\n", __func__);
29381
29382 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
29383 diff -urNp linux-3.0.7/drivers/media/dvb/dvb-core/dvb_demux.h linux-3.0.7/drivers/media/dvb/dvb-core/dvb_demux.h
29384 --- linux-3.0.7/drivers/media/dvb/dvb-core/dvb_demux.h 2011-07-21 22:17:23.000000000 -0400
29385 +++ linux-3.0.7/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-24 18:24:40.000000000 -0400
29386 @@ -68,12 +68,12 @@ struct dvb_demux_feed {
29387 union {
29388 struct dmx_ts_feed ts;
29389 struct dmx_section_feed sec;
29390 - } feed;
29391 + } __no_const feed;
29392
29393 union {
29394 dmx_ts_cb ts;
29395 dmx_section_cb sec;
29396 - } cb;
29397 + } __no_const cb;
29398
29399 struct dvb_demux *demux;
29400 void *priv;
29401 diff -urNp linux-3.0.7/drivers/media/dvb/dvb-core/dvbdev.c linux-3.0.7/drivers/media/dvb/dvb-core/dvbdev.c
29402 --- linux-3.0.7/drivers/media/dvb/dvb-core/dvbdev.c 2011-07-21 22:17:23.000000000 -0400
29403 +++ linux-3.0.7/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-24 18:24:19.000000000 -0400
29404 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
29405 const struct dvb_device *template, void *priv, int type)
29406 {
29407 struct dvb_device *dvbdev;
29408 - struct file_operations *dvbdevfops;
29409 + file_operations_no_const *dvbdevfops;
29410 struct device *clsdev;
29411 int minor;
29412 int id;
29413 diff -urNp linux-3.0.7/drivers/media/dvb/dvb-usb/cxusb.c linux-3.0.7/drivers/media/dvb/dvb-usb/cxusb.c
29414 --- linux-3.0.7/drivers/media/dvb/dvb-usb/cxusb.c 2011-07-21 22:17:23.000000000 -0400
29415 +++ linux-3.0.7/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-24 18:26:33.000000000 -0400
29416 @@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_di
29417 struct dib0700_adapter_state {
29418 int (*set_param_save) (struct dvb_frontend *,
29419 struct dvb_frontend_parameters *);
29420 -};
29421 +} __no_const;
29422
29423 static int dib7070_set_param_override(struct dvb_frontend *fe,
29424 struct dvb_frontend_parameters *fep)
29425 diff -urNp linux-3.0.7/drivers/media/dvb/dvb-usb/dib0700_core.c linux-3.0.7/drivers/media/dvb/dvb-usb/dib0700_core.c
29426 --- linux-3.0.7/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-07-21 22:17:23.000000000 -0400
29427 +++ linux-3.0.7/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-08-23 21:48:14.000000000 -0400
29428 @@ -434,6 +434,8 @@ int dib0700_download_firmware(struct usb
29429 if (!buf)
29430 return -ENOMEM;
29431
29432 + pax_track_stack();
29433 +
29434 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
29435 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
29436 hx.addr, hx.len, hx.chk);
29437 diff -urNp linux-3.0.7/drivers/media/dvb/dvb-usb/dibusb.h linux-3.0.7/drivers/media/dvb/dvb-usb/dibusb.h
29438 --- linux-3.0.7/drivers/media/dvb/dvb-usb/dibusb.h 2011-07-21 22:17:23.000000000 -0400
29439 +++ linux-3.0.7/drivers/media/dvb/dvb-usb/dibusb.h 2011-08-24 18:27:27.000000000 -0400
29440 @@ -97,7 +97,7 @@
29441 #define DIBUSB_IOCTL_CMD_DISABLE_STREAM 0x02
29442
29443 struct dibusb_state {
29444 - struct dib_fe_xfer_ops ops;
29445 + dib_fe_xfer_ops_no_const ops;
29446 int mt2060_present;
29447 u8 tuner_addr;
29448 };
29449 diff -urNp linux-3.0.7/drivers/media/dvb/dvb-usb/dw2102.c linux-3.0.7/drivers/media/dvb/dvb-usb/dw2102.c
29450 --- linux-3.0.7/drivers/media/dvb/dvb-usb/dw2102.c 2011-07-21 22:17:23.000000000 -0400
29451 +++ linux-3.0.7/drivers/media/dvb/dvb-usb/dw2102.c 2011-08-24 18:27:45.000000000 -0400
29452 @@ -95,7 +95,7 @@ struct su3000_state {
29453
29454 struct s6x0_state {
29455 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
29456 -};
29457 +} __no_const;
29458
29459 /* debug */
29460 static int dvb_usb_dw2102_debug;
29461 diff -urNp linux-3.0.7/drivers/media/dvb/dvb-usb/lmedm04.c linux-3.0.7/drivers/media/dvb/dvb-usb/lmedm04.c
29462 --- linux-3.0.7/drivers/media/dvb/dvb-usb/lmedm04.c 2011-07-21 22:17:23.000000000 -0400
29463 +++ linux-3.0.7/drivers/media/dvb/dvb-usb/lmedm04.c 2011-08-23 21:48:14.000000000 -0400
29464 @@ -742,6 +742,7 @@ static int lme2510_download_firmware(str
29465 usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
29466 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
29467
29468 + pax_track_stack();
29469
29470 data[0] = 0x8a;
29471 len_in = 1;
29472 @@ -764,6 +765,8 @@ static void lme_coldreset(struct usb_dev
29473 int ret = 0, len_in;
29474 u8 data[512] = {0};
29475
29476 + pax_track_stack();
29477 +
29478 data[0] = 0x0a;
29479 len_in = 1;
29480 info("FRM Firmware Cold Reset");
29481 diff -urNp linux-3.0.7/drivers/media/dvb/frontends/dib3000.h linux-3.0.7/drivers/media/dvb/frontends/dib3000.h
29482 --- linux-3.0.7/drivers/media/dvb/frontends/dib3000.h 2011-07-21 22:17:23.000000000 -0400
29483 +++ linux-3.0.7/drivers/media/dvb/frontends/dib3000.h 2011-10-07 19:07:39.000000000 -0400
29484 @@ -40,10 +40,11 @@ struct dib_fe_xfer_ops
29485 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
29486 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
29487 };
29488 +typedef struct dib_fe_xfer_ops __no_const dib_fe_xfer_ops_no_const;
29489
29490 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
29491 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
29492 - struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops);
29493 + struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops);
29494 #else
29495 static inline struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
29496 struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
29497 diff -urNp linux-3.0.7/drivers/media/dvb/frontends/dib3000mb.c linux-3.0.7/drivers/media/dvb/frontends/dib3000mb.c
29498 --- linux-3.0.7/drivers/media/dvb/frontends/dib3000mb.c 2011-07-21 22:17:23.000000000 -0400
29499 +++ linux-3.0.7/drivers/media/dvb/frontends/dib3000mb.c 2011-08-24 18:28:42.000000000 -0400
29500 @@ -756,7 +756,7 @@ static int dib3000mb_tuner_pass_ctrl(str
29501 static struct dvb_frontend_ops dib3000mb_ops;
29502
29503 struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
29504 - struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
29505 + struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops)
29506 {
29507 struct dib3000_state* state = NULL;
29508
29509 diff -urNp linux-3.0.7/drivers/media/dvb/frontends/mb86a16.c linux-3.0.7/drivers/media/dvb/frontends/mb86a16.c
29510 --- linux-3.0.7/drivers/media/dvb/frontends/mb86a16.c 2011-07-21 22:17:23.000000000 -0400
29511 +++ linux-3.0.7/drivers/media/dvb/frontends/mb86a16.c 2011-08-23 21:48:14.000000000 -0400
29512 @@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
29513 int ret = -1;
29514 int sync;
29515
29516 + pax_track_stack();
29517 +
29518 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
29519
29520 fcp = 3000;
29521 diff -urNp linux-3.0.7/drivers/media/dvb/frontends/or51211.c linux-3.0.7/drivers/media/dvb/frontends/or51211.c
29522 --- linux-3.0.7/drivers/media/dvb/frontends/or51211.c 2011-07-21 22:17:23.000000000 -0400
29523 +++ linux-3.0.7/drivers/media/dvb/frontends/or51211.c 2011-08-23 21:48:14.000000000 -0400
29524 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
29525 u8 tudata[585];
29526 int i;
29527
29528 + pax_track_stack();
29529 +
29530 dprintk("Firmware is %zd bytes\n",fw->size);
29531
29532 /* Get eprom data */
29533 diff -urNp linux-3.0.7/drivers/media/dvb/ngene/ngene-cards.c linux-3.0.7/drivers/media/dvb/ngene/ngene-cards.c
29534 --- linux-3.0.7/drivers/media/dvb/ngene/ngene-cards.c 2011-07-21 22:17:23.000000000 -0400
29535 +++ linux-3.0.7/drivers/media/dvb/ngene/ngene-cards.c 2011-10-11 10:44:33.000000000 -0400
29536 @@ -379,7 +379,7 @@ static struct ngene_info ngene_info_m780
29537
29538 /****************************************************************************/
29539
29540 -static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
29541 +static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
29542 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
29543 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
29544 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
29545 diff -urNp linux-3.0.7/drivers/media/video/cx18/cx18-driver.c linux-3.0.7/drivers/media/video/cx18/cx18-driver.c
29546 --- linux-3.0.7/drivers/media/video/cx18/cx18-driver.c 2011-07-21 22:17:23.000000000 -0400
29547 +++ linux-3.0.7/drivers/media/video/cx18/cx18-driver.c 2011-08-23 21:48:14.000000000 -0400
29548 @@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
29549 struct i2c_client c;
29550 u8 eedata[256];
29551
29552 + pax_track_stack();
29553 +
29554 memset(&c, 0, sizeof(c));
29555 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
29556 c.adapter = &cx->i2c_adap[0];
29557 diff -urNp linux-3.0.7/drivers/media/video/cx23885/cx23885-input.c linux-3.0.7/drivers/media/video/cx23885/cx23885-input.c
29558 --- linux-3.0.7/drivers/media/video/cx23885/cx23885-input.c 2011-07-21 22:17:23.000000000 -0400
29559 +++ linux-3.0.7/drivers/media/video/cx23885/cx23885-input.c 2011-08-23 21:48:14.000000000 -0400
29560 @@ -53,6 +53,8 @@ static void cx23885_input_process_measur
29561 bool handle = false;
29562 struct ir_raw_event ir_core_event[64];
29563
29564 + pax_track_stack();
29565 +
29566 do {
29567 num = 0;
29568 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
29569 diff -urNp linux-3.0.7/drivers/media/video/cx88/cx88-alsa.c linux-3.0.7/drivers/media/video/cx88/cx88-alsa.c
29570 --- linux-3.0.7/drivers/media/video/cx88/cx88-alsa.c 2011-07-21 22:17:23.000000000 -0400
29571 +++ linux-3.0.7/drivers/media/video/cx88/cx88-alsa.c 2011-10-11 10:44:33.000000000 -0400
29572 @@ -764,7 +764,7 @@ static struct snd_kcontrol_new snd_cx88_
29573 * Only boards with eeprom and byte 1 at eeprom=1 have it
29574 */
29575
29576 -static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
29577 +static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
29578 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
29579 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
29580 {0, }
29581 diff -urNp linux-3.0.7/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-3.0.7/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
29582 --- linux-3.0.7/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-07-21 22:17:23.000000000 -0400
29583 +++ linux-3.0.7/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-08-23 21:48:14.000000000 -0400
29584 @@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
29585 u8 *eeprom;
29586 struct tveeprom tvdata;
29587
29588 + pax_track_stack();
29589 +
29590 memset(&tvdata,0,sizeof(tvdata));
29591
29592 eeprom = pvr2_eeprom_fetch(hdw);
29593 diff -urNp linux-3.0.7/drivers/media/video/saa7134/saa6752hs.c linux-3.0.7/drivers/media/video/saa7134/saa6752hs.c
29594 --- linux-3.0.7/drivers/media/video/saa7134/saa6752hs.c 2011-07-21 22:17:23.000000000 -0400
29595 +++ linux-3.0.7/drivers/media/video/saa7134/saa6752hs.c 2011-08-23 21:48:14.000000000 -0400
29596 @@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
29597 unsigned char localPAT[256];
29598 unsigned char localPMT[256];
29599
29600 + pax_track_stack();
29601 +
29602 /* Set video format - must be done first as it resets other settings */
29603 set_reg8(client, 0x41, h->video_format);
29604
29605 diff -urNp linux-3.0.7/drivers/media/video/saa7164/saa7164-cmd.c linux-3.0.7/drivers/media/video/saa7164/saa7164-cmd.c
29606 --- linux-3.0.7/drivers/media/video/saa7164/saa7164-cmd.c 2011-07-21 22:17:23.000000000 -0400
29607 +++ linux-3.0.7/drivers/media/video/saa7164/saa7164-cmd.c 2011-08-23 21:48:14.000000000 -0400
29608 @@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
29609 u8 tmp[512];
29610 dprintk(DBGLVL_CMD, "%s()\n", __func__);
29611
29612 + pax_track_stack();
29613 +
29614 /* While any outstand message on the bus exists... */
29615 do {
29616
29617 @@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
29618 u8 tmp[512];
29619 dprintk(DBGLVL_CMD, "%s()\n", __func__);
29620
29621 + pax_track_stack();
29622 +
29623 while (loop) {
29624
29625 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
29626 diff -urNp linux-3.0.7/drivers/media/video/timblogiw.c linux-3.0.7/drivers/media/video/timblogiw.c
29627 --- linux-3.0.7/drivers/media/video/timblogiw.c 2011-07-21 22:17:23.000000000 -0400
29628 +++ linux-3.0.7/drivers/media/video/timblogiw.c 2011-08-24 18:29:20.000000000 -0400
29629 @@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *f
29630
29631 /* Platform device functions */
29632
29633 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
29634 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
29635 .vidioc_querycap = timblogiw_querycap,
29636 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
29637 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
29638 diff -urNp linux-3.0.7/drivers/media/video/usbvision/usbvision-core.c linux-3.0.7/drivers/media/video/usbvision/usbvision-core.c
29639 --- linux-3.0.7/drivers/media/video/usbvision/usbvision-core.c 2011-07-21 22:17:23.000000000 -0400
29640 +++ linux-3.0.7/drivers/media/video/usbvision/usbvision-core.c 2011-08-23 21:48:14.000000000 -0400
29641 @@ -707,6 +707,8 @@ static enum parse_state usbvision_parse_
29642 unsigned char rv, gv, bv;
29643 static unsigned char *Y, *U, *V;
29644
29645 + pax_track_stack();
29646 +
29647 frame = usbvision->cur_frame;
29648 image_size = frame->frmwidth * frame->frmheight;
29649 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
29650 diff -urNp linux-3.0.7/drivers/media/video/videobuf-dma-sg.c linux-3.0.7/drivers/media/video/videobuf-dma-sg.c
29651 --- linux-3.0.7/drivers/media/video/videobuf-dma-sg.c 2011-07-21 22:17:23.000000000 -0400
29652 +++ linux-3.0.7/drivers/media/video/videobuf-dma-sg.c 2011-08-23 21:48:14.000000000 -0400
29653 @@ -606,6 +606,8 @@ void *videobuf_sg_alloc(size_t size)
29654 {
29655 struct videobuf_queue q;
29656
29657 + pax_track_stack();
29658 +
29659 /* Required to make generic handler to call __videobuf_alloc */
29660 q.int_ops = &sg_ops;
29661
29662 diff -urNp linux-3.0.7/drivers/message/fusion/mptbase.c linux-3.0.7/drivers/message/fusion/mptbase.c
29663 --- linux-3.0.7/drivers/message/fusion/mptbase.c 2011-07-21 22:17:23.000000000 -0400
29664 +++ linux-3.0.7/drivers/message/fusion/mptbase.c 2011-08-23 21:48:14.000000000 -0400
29665 @@ -6681,8 +6681,13 @@ static int mpt_iocinfo_proc_show(struct
29666 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
29667 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
29668
29669 +#ifdef CONFIG_GRKERNSEC_HIDESYM
29670 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
29671 +#else
29672 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
29673 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
29674 +#endif
29675 +
29676 /*
29677 * Rounding UP to nearest 4-kB boundary here...
29678 */
29679 diff -urNp linux-3.0.7/drivers/message/fusion/mptsas.c linux-3.0.7/drivers/message/fusion/mptsas.c
29680 --- linux-3.0.7/drivers/message/fusion/mptsas.c 2011-07-21 22:17:23.000000000 -0400
29681 +++ linux-3.0.7/drivers/message/fusion/mptsas.c 2011-08-23 21:47:55.000000000 -0400
29682 @@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
29683 return 0;
29684 }
29685
29686 +static inline void
29687 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
29688 +{
29689 + if (phy_info->port_details) {
29690 + phy_info->port_details->rphy = rphy;
29691 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
29692 + ioc->name, rphy));
29693 + }
29694 +
29695 + if (rphy) {
29696 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
29697 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
29698 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
29699 + ioc->name, rphy, rphy->dev.release));
29700 + }
29701 +}
29702 +
29703 /* no mutex */
29704 static void
29705 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
29706 @@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
29707 return NULL;
29708 }
29709
29710 -static inline void
29711 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
29712 -{
29713 - if (phy_info->port_details) {
29714 - phy_info->port_details->rphy = rphy;
29715 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
29716 - ioc->name, rphy));
29717 - }
29718 -
29719 - if (rphy) {
29720 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
29721 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
29722 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
29723 - ioc->name, rphy, rphy->dev.release));
29724 - }
29725 -}
29726 -
29727 static inline struct sas_port *
29728 mptsas_get_port(struct mptsas_phyinfo *phy_info)
29729 {
29730 diff -urNp linux-3.0.7/drivers/message/fusion/mptscsih.c linux-3.0.7/drivers/message/fusion/mptscsih.c
29731 --- linux-3.0.7/drivers/message/fusion/mptscsih.c 2011-07-21 22:17:23.000000000 -0400
29732 +++ linux-3.0.7/drivers/message/fusion/mptscsih.c 2011-08-23 21:47:55.000000000 -0400
29733 @@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
29734
29735 h = shost_priv(SChost);
29736
29737 - if (h) {
29738 - if (h->info_kbuf == NULL)
29739 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
29740 - return h->info_kbuf;
29741 - h->info_kbuf[0] = '\0';
29742 + if (!h)
29743 + return NULL;
29744
29745 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
29746 - h->info_kbuf[size-1] = '\0';
29747 - }
29748 + if (h->info_kbuf == NULL)
29749 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
29750 + return h->info_kbuf;
29751 + h->info_kbuf[0] = '\0';
29752 +
29753 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
29754 + h->info_kbuf[size-1] = '\0';
29755
29756 return h->info_kbuf;
29757 }
29758 diff -urNp linux-3.0.7/drivers/message/i2o/i2o_config.c linux-3.0.7/drivers/message/i2o/i2o_config.c
29759 --- linux-3.0.7/drivers/message/i2o/i2o_config.c 2011-07-21 22:17:23.000000000 -0400
29760 +++ linux-3.0.7/drivers/message/i2o/i2o_config.c 2011-08-23 21:48:14.000000000 -0400
29761 @@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
29762 struct i2o_message *msg;
29763 unsigned int iop;
29764
29765 + pax_track_stack();
29766 +
29767 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
29768 return -EFAULT;
29769
29770 diff -urNp linux-3.0.7/drivers/message/i2o/i2o_proc.c linux-3.0.7/drivers/message/i2o/i2o_proc.c
29771 --- linux-3.0.7/drivers/message/i2o/i2o_proc.c 2011-07-21 22:17:23.000000000 -0400
29772 +++ linux-3.0.7/drivers/message/i2o/i2o_proc.c 2011-08-23 21:47:55.000000000 -0400
29773 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
29774 "Array Controller Device"
29775 };
29776
29777 -static char *chtostr(u8 * chars, int n)
29778 -{
29779 - char tmp[256];
29780 - tmp[0] = 0;
29781 - return strncat(tmp, (char *)chars, n);
29782 -}
29783 -
29784 static int i2o_report_query_status(struct seq_file *seq, int block_status,
29785 char *group)
29786 {
29787 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
29788
29789 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
29790 seq_printf(seq, "%-#8x", ddm_table.module_id);
29791 - seq_printf(seq, "%-29s",
29792 - chtostr(ddm_table.module_name_version, 28));
29793 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
29794 seq_printf(seq, "%9d ", ddm_table.data_size);
29795 seq_printf(seq, "%8d", ddm_table.code_size);
29796
29797 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
29798
29799 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
29800 seq_printf(seq, "%-#8x", dst->module_id);
29801 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
29802 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
29803 + seq_printf(seq, "%-.28s", dst->module_name_version);
29804 + seq_printf(seq, "%-.8s", dst->date);
29805 seq_printf(seq, "%8d ", dst->module_size);
29806 seq_printf(seq, "%8d ", dst->mpb_size);
29807 seq_printf(seq, "0x%04x", dst->module_flags);
29808 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
29809 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
29810 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
29811 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
29812 - seq_printf(seq, "Vendor info : %s\n",
29813 - chtostr((u8 *) (work32 + 2), 16));
29814 - seq_printf(seq, "Product info : %s\n",
29815 - chtostr((u8 *) (work32 + 6), 16));
29816 - seq_printf(seq, "Description : %s\n",
29817 - chtostr((u8 *) (work32 + 10), 16));
29818 - seq_printf(seq, "Product rev. : %s\n",
29819 - chtostr((u8 *) (work32 + 14), 8));
29820 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
29821 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
29822 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
29823 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
29824
29825 seq_printf(seq, "Serial number : ");
29826 print_serial_number(seq, (u8 *) (work32 + 16),
29827 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
29828 }
29829
29830 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
29831 - seq_printf(seq, "Module name : %s\n",
29832 - chtostr(result.module_name, 24));
29833 - seq_printf(seq, "Module revision : %s\n",
29834 - chtostr(result.module_rev, 8));
29835 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
29836 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
29837
29838 seq_printf(seq, "Serial number : ");
29839 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
29840 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
29841 return 0;
29842 }
29843
29844 - seq_printf(seq, "Device name : %s\n",
29845 - chtostr(result.device_name, 64));
29846 - seq_printf(seq, "Service name : %s\n",
29847 - chtostr(result.service_name, 64));
29848 - seq_printf(seq, "Physical name : %s\n",
29849 - chtostr(result.physical_location, 64));
29850 - seq_printf(seq, "Instance number : %s\n",
29851 - chtostr(result.instance_number, 4));
29852 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
29853 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
29854 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
29855 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
29856
29857 return 0;
29858 }
29859 diff -urNp linux-3.0.7/drivers/message/i2o/iop.c linux-3.0.7/drivers/message/i2o/iop.c
29860 --- linux-3.0.7/drivers/message/i2o/iop.c 2011-07-21 22:17:23.000000000 -0400
29861 +++ linux-3.0.7/drivers/message/i2o/iop.c 2011-08-23 21:47:55.000000000 -0400
29862 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
29863
29864 spin_lock_irqsave(&c->context_list_lock, flags);
29865
29866 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
29867 - atomic_inc(&c->context_list_counter);
29868 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
29869 + atomic_inc_unchecked(&c->context_list_counter);
29870
29871 - entry->context = atomic_read(&c->context_list_counter);
29872 + entry->context = atomic_read_unchecked(&c->context_list_counter);
29873
29874 list_add(&entry->list, &c->context_list);
29875
29876 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
29877
29878 #if BITS_PER_LONG == 64
29879 spin_lock_init(&c->context_list_lock);
29880 - atomic_set(&c->context_list_counter, 0);
29881 + atomic_set_unchecked(&c->context_list_counter, 0);
29882 INIT_LIST_HEAD(&c->context_list);
29883 #endif
29884
29885 diff -urNp linux-3.0.7/drivers/mfd/ab3100-core.c linux-3.0.7/drivers/mfd/ab3100-core.c
29886 --- linux-3.0.7/drivers/mfd/ab3100-core.c 2011-07-21 22:17:23.000000000 -0400
29887 +++ linux-3.0.7/drivers/mfd/ab3100-core.c 2011-10-11 10:44:33.000000000 -0400
29888 @@ -809,7 +809,7 @@ struct ab_family_id {
29889 char *name;
29890 };
29891
29892 -static const struct ab_family_id ids[] __devinitdata = {
29893 +static const struct ab_family_id ids[] __devinitconst = {
29894 /* AB3100 */
29895 {
29896 .id = 0xc0,
29897 diff -urNp linux-3.0.7/drivers/mfd/abx500-core.c linux-3.0.7/drivers/mfd/abx500-core.c
29898 --- linux-3.0.7/drivers/mfd/abx500-core.c 2011-07-21 22:17:23.000000000 -0400
29899 +++ linux-3.0.7/drivers/mfd/abx500-core.c 2011-08-23 21:47:55.000000000 -0400
29900 @@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
29901
29902 struct abx500_device_entry {
29903 struct list_head list;
29904 - struct abx500_ops ops;
29905 + abx500_ops_no_const ops;
29906 struct device *dev;
29907 };
29908
29909 diff -urNp linux-3.0.7/drivers/mfd/janz-cmodio.c linux-3.0.7/drivers/mfd/janz-cmodio.c
29910 --- linux-3.0.7/drivers/mfd/janz-cmodio.c 2011-07-21 22:17:23.000000000 -0400
29911 +++ linux-3.0.7/drivers/mfd/janz-cmodio.c 2011-08-23 21:47:55.000000000 -0400
29912 @@ -13,6 +13,7 @@
29913
29914 #include <linux/kernel.h>
29915 #include <linux/module.h>
29916 +#include <linux/slab.h>
29917 #include <linux/init.h>
29918 #include <linux/pci.h>
29919 #include <linux/interrupt.h>
29920 diff -urNp linux-3.0.7/drivers/mfd/wm8350-i2c.c linux-3.0.7/drivers/mfd/wm8350-i2c.c
29921 --- linux-3.0.7/drivers/mfd/wm8350-i2c.c 2011-07-21 22:17:23.000000000 -0400
29922 +++ linux-3.0.7/drivers/mfd/wm8350-i2c.c 2011-08-23 21:48:14.000000000 -0400
29923 @@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
29924 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
29925 int ret;
29926
29927 + pax_track_stack();
29928 +
29929 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
29930 return -EINVAL;
29931
29932 diff -urNp linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.c linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.c
29933 --- linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.c 2011-10-17 23:17:09.000000000 -0400
29934 +++ linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.c 2011-10-17 23:17:19.000000000 -0400
29935 @@ -437,7 +437,7 @@ static irqreturn_t lis302dl_interrupt(in
29936 * the lid is closed. This leads to interrupts as soon as a little move
29937 * is done.
29938 */
29939 - atomic_inc(&lis3_dev.count);
29940 + atomic_inc_unchecked(&lis3_dev.count);
29941
29942 wake_up_interruptible(&lis3_dev.misc_wait);
29943 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
29944 @@ -520,7 +520,7 @@ static int lis3lv02d_misc_open(struct in
29945 if (lis3_dev.pm_dev)
29946 pm_runtime_get_sync(lis3_dev.pm_dev);
29947
29948 - atomic_set(&lis3_dev.count, 0);
29949 + atomic_set_unchecked(&lis3_dev.count, 0);
29950 return 0;
29951 }
29952
29953 @@ -547,7 +547,7 @@ static ssize_t lis3lv02d_misc_read(struc
29954 add_wait_queue(&lis3_dev.misc_wait, &wait);
29955 while (true) {
29956 set_current_state(TASK_INTERRUPTIBLE);
29957 - data = atomic_xchg(&lis3_dev.count, 0);
29958 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
29959 if (data)
29960 break;
29961
29962 @@ -585,7 +585,7 @@ out:
29963 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
29964 {
29965 poll_wait(file, &lis3_dev.misc_wait, wait);
29966 - if (atomic_read(&lis3_dev.count))
29967 + if (atomic_read_unchecked(&lis3_dev.count))
29968 return POLLIN | POLLRDNORM;
29969 return 0;
29970 }
29971 diff -urNp linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.h linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.h
29972 --- linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.h 2011-07-21 22:17:23.000000000 -0400
29973 +++ linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.h 2011-08-23 21:47:55.000000000 -0400
29974 @@ -265,7 +265,7 @@ struct lis3lv02d {
29975 struct input_polled_dev *idev; /* input device */
29976 struct platform_device *pdev; /* platform device */
29977 struct regulator_bulk_data regulators[2];
29978 - atomic_t count; /* interrupt count after last read */
29979 + atomic_unchecked_t count; /* interrupt count after last read */
29980 union axis_conversion ac; /* hw -> logical axis */
29981 int mapped_btns[3];
29982
29983 diff -urNp linux-3.0.7/drivers/misc/sgi-gru/gruhandles.c linux-3.0.7/drivers/misc/sgi-gru/gruhandles.c
29984 --- linux-3.0.7/drivers/misc/sgi-gru/gruhandles.c 2011-07-21 22:17:23.000000000 -0400
29985 +++ linux-3.0.7/drivers/misc/sgi-gru/gruhandles.c 2011-08-23 21:47:55.000000000 -0400
29986 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
29987 unsigned long nsec;
29988
29989 nsec = CLKS2NSEC(clks);
29990 - atomic_long_inc(&mcs_op_statistics[op].count);
29991 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
29992 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
29993 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
29994 if (mcs_op_statistics[op].max < nsec)
29995 mcs_op_statistics[op].max = nsec;
29996 }
29997 diff -urNp linux-3.0.7/drivers/misc/sgi-gru/gruprocfs.c linux-3.0.7/drivers/misc/sgi-gru/gruprocfs.c
29998 --- linux-3.0.7/drivers/misc/sgi-gru/gruprocfs.c 2011-07-21 22:17:23.000000000 -0400
29999 +++ linux-3.0.7/drivers/misc/sgi-gru/gruprocfs.c 2011-08-23 21:47:55.000000000 -0400
30000 @@ -32,9 +32,9 @@
30001
30002 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
30003
30004 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
30005 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
30006 {
30007 - unsigned long val = atomic_long_read(v);
30008 + unsigned long val = atomic_long_read_unchecked(v);
30009
30010 seq_printf(s, "%16lu %s\n", val, id);
30011 }
30012 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
30013
30014 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
30015 for (op = 0; op < mcsop_last; op++) {
30016 - count = atomic_long_read(&mcs_op_statistics[op].count);
30017 - total = atomic_long_read(&mcs_op_statistics[op].total);
30018 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
30019 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
30020 max = mcs_op_statistics[op].max;
30021 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
30022 count ? total / count : 0, max);
30023 diff -urNp linux-3.0.7/drivers/misc/sgi-gru/grutables.h linux-3.0.7/drivers/misc/sgi-gru/grutables.h
30024 --- linux-3.0.7/drivers/misc/sgi-gru/grutables.h 2011-07-21 22:17:23.000000000 -0400
30025 +++ linux-3.0.7/drivers/misc/sgi-gru/grutables.h 2011-08-23 21:47:55.000000000 -0400
30026 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
30027 * GRU statistics.
30028 */
30029 struct gru_stats_s {
30030 - atomic_long_t vdata_alloc;
30031 - atomic_long_t vdata_free;
30032 - atomic_long_t gts_alloc;
30033 - atomic_long_t gts_free;
30034 - atomic_long_t gms_alloc;
30035 - atomic_long_t gms_free;
30036 - atomic_long_t gts_double_allocate;
30037 - atomic_long_t assign_context;
30038 - atomic_long_t assign_context_failed;
30039 - atomic_long_t free_context;
30040 - atomic_long_t load_user_context;
30041 - atomic_long_t load_kernel_context;
30042 - atomic_long_t lock_kernel_context;
30043 - atomic_long_t unlock_kernel_context;
30044 - atomic_long_t steal_user_context;
30045 - atomic_long_t steal_kernel_context;
30046 - atomic_long_t steal_context_failed;
30047 - atomic_long_t nopfn;
30048 - atomic_long_t asid_new;
30049 - atomic_long_t asid_next;
30050 - atomic_long_t asid_wrap;
30051 - atomic_long_t asid_reuse;
30052 - atomic_long_t intr;
30053 - atomic_long_t intr_cbr;
30054 - atomic_long_t intr_tfh;
30055 - atomic_long_t intr_spurious;
30056 - atomic_long_t intr_mm_lock_failed;
30057 - atomic_long_t call_os;
30058 - atomic_long_t call_os_wait_queue;
30059 - atomic_long_t user_flush_tlb;
30060 - atomic_long_t user_unload_context;
30061 - atomic_long_t user_exception;
30062 - atomic_long_t set_context_option;
30063 - atomic_long_t check_context_retarget_intr;
30064 - atomic_long_t check_context_unload;
30065 - atomic_long_t tlb_dropin;
30066 - atomic_long_t tlb_preload_page;
30067 - atomic_long_t tlb_dropin_fail_no_asid;
30068 - atomic_long_t tlb_dropin_fail_upm;
30069 - atomic_long_t tlb_dropin_fail_invalid;
30070 - atomic_long_t tlb_dropin_fail_range_active;
30071 - atomic_long_t tlb_dropin_fail_idle;
30072 - atomic_long_t tlb_dropin_fail_fmm;
30073 - atomic_long_t tlb_dropin_fail_no_exception;
30074 - atomic_long_t tfh_stale_on_fault;
30075 - atomic_long_t mmu_invalidate_range;
30076 - atomic_long_t mmu_invalidate_page;
30077 - atomic_long_t flush_tlb;
30078 - atomic_long_t flush_tlb_gru;
30079 - atomic_long_t flush_tlb_gru_tgh;
30080 - atomic_long_t flush_tlb_gru_zero_asid;
30081 -
30082 - atomic_long_t copy_gpa;
30083 - atomic_long_t read_gpa;
30084 -
30085 - atomic_long_t mesq_receive;
30086 - atomic_long_t mesq_receive_none;
30087 - atomic_long_t mesq_send;
30088 - atomic_long_t mesq_send_failed;
30089 - atomic_long_t mesq_noop;
30090 - atomic_long_t mesq_send_unexpected_error;
30091 - atomic_long_t mesq_send_lb_overflow;
30092 - atomic_long_t mesq_send_qlimit_reached;
30093 - atomic_long_t mesq_send_amo_nacked;
30094 - atomic_long_t mesq_send_put_nacked;
30095 - atomic_long_t mesq_page_overflow;
30096 - atomic_long_t mesq_qf_locked;
30097 - atomic_long_t mesq_qf_noop_not_full;
30098 - atomic_long_t mesq_qf_switch_head_failed;
30099 - atomic_long_t mesq_qf_unexpected_error;
30100 - atomic_long_t mesq_noop_unexpected_error;
30101 - atomic_long_t mesq_noop_lb_overflow;
30102 - atomic_long_t mesq_noop_qlimit_reached;
30103 - atomic_long_t mesq_noop_amo_nacked;
30104 - atomic_long_t mesq_noop_put_nacked;
30105 - atomic_long_t mesq_noop_page_overflow;
30106 + atomic_long_unchecked_t vdata_alloc;
30107 + atomic_long_unchecked_t vdata_free;
30108 + atomic_long_unchecked_t gts_alloc;
30109 + atomic_long_unchecked_t gts_free;
30110 + atomic_long_unchecked_t gms_alloc;
30111 + atomic_long_unchecked_t gms_free;
30112 + atomic_long_unchecked_t gts_double_allocate;
30113 + atomic_long_unchecked_t assign_context;
30114 + atomic_long_unchecked_t assign_context_failed;
30115 + atomic_long_unchecked_t free_context;
30116 + atomic_long_unchecked_t load_user_context;
30117 + atomic_long_unchecked_t load_kernel_context;
30118 + atomic_long_unchecked_t lock_kernel_context;
30119 + atomic_long_unchecked_t unlock_kernel_context;
30120 + atomic_long_unchecked_t steal_user_context;
30121 + atomic_long_unchecked_t steal_kernel_context;
30122 + atomic_long_unchecked_t steal_context_failed;
30123 + atomic_long_unchecked_t nopfn;
30124 + atomic_long_unchecked_t asid_new;
30125 + atomic_long_unchecked_t asid_next;
30126 + atomic_long_unchecked_t asid_wrap;
30127 + atomic_long_unchecked_t asid_reuse;
30128 + atomic_long_unchecked_t intr;
30129 + atomic_long_unchecked_t intr_cbr;
30130 + atomic_long_unchecked_t intr_tfh;
30131 + atomic_long_unchecked_t intr_spurious;
30132 + atomic_long_unchecked_t intr_mm_lock_failed;
30133 + atomic_long_unchecked_t call_os;
30134 + atomic_long_unchecked_t call_os_wait_queue;
30135 + atomic_long_unchecked_t user_flush_tlb;
30136 + atomic_long_unchecked_t user_unload_context;
30137 + atomic_long_unchecked_t user_exception;
30138 + atomic_long_unchecked_t set_context_option;
30139 + atomic_long_unchecked_t check_context_retarget_intr;
30140 + atomic_long_unchecked_t check_context_unload;
30141 + atomic_long_unchecked_t tlb_dropin;
30142 + atomic_long_unchecked_t tlb_preload_page;
30143 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
30144 + atomic_long_unchecked_t tlb_dropin_fail_upm;
30145 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
30146 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
30147 + atomic_long_unchecked_t tlb_dropin_fail_idle;
30148 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
30149 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
30150 + atomic_long_unchecked_t tfh_stale_on_fault;
30151 + atomic_long_unchecked_t mmu_invalidate_range;
30152 + atomic_long_unchecked_t mmu_invalidate_page;
30153 + atomic_long_unchecked_t flush_tlb;
30154 + atomic_long_unchecked_t flush_tlb_gru;
30155 + atomic_long_unchecked_t flush_tlb_gru_tgh;
30156 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
30157 +
30158 + atomic_long_unchecked_t copy_gpa;
30159 + atomic_long_unchecked_t read_gpa;
30160 +
30161 + atomic_long_unchecked_t mesq_receive;
30162 + atomic_long_unchecked_t mesq_receive_none;
30163 + atomic_long_unchecked_t mesq_send;
30164 + atomic_long_unchecked_t mesq_send_failed;
30165 + atomic_long_unchecked_t mesq_noop;
30166 + atomic_long_unchecked_t mesq_send_unexpected_error;
30167 + atomic_long_unchecked_t mesq_send_lb_overflow;
30168 + atomic_long_unchecked_t mesq_send_qlimit_reached;
30169 + atomic_long_unchecked_t mesq_send_amo_nacked;
30170 + atomic_long_unchecked_t mesq_send_put_nacked;
30171 + atomic_long_unchecked_t mesq_page_overflow;
30172 + atomic_long_unchecked_t mesq_qf_locked;
30173 + atomic_long_unchecked_t mesq_qf_noop_not_full;
30174 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
30175 + atomic_long_unchecked_t mesq_qf_unexpected_error;
30176 + atomic_long_unchecked_t mesq_noop_unexpected_error;
30177 + atomic_long_unchecked_t mesq_noop_lb_overflow;
30178 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
30179 + atomic_long_unchecked_t mesq_noop_amo_nacked;
30180 + atomic_long_unchecked_t mesq_noop_put_nacked;
30181 + atomic_long_unchecked_t mesq_noop_page_overflow;
30182
30183 };
30184
30185 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
30186 tghop_invalidate, mcsop_last};
30187
30188 struct mcs_op_statistic {
30189 - atomic_long_t count;
30190 - atomic_long_t total;
30191 + atomic_long_unchecked_t count;
30192 + atomic_long_unchecked_t total;
30193 unsigned long max;
30194 };
30195
30196 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
30197
30198 #define STAT(id) do { \
30199 if (gru_options & OPT_STATS) \
30200 - atomic_long_inc(&gru_stats.id); \
30201 + atomic_long_inc_unchecked(&gru_stats.id); \
30202 } while (0)
30203
30204 #ifdef CONFIG_SGI_GRU_DEBUG
30205 diff -urNp linux-3.0.7/drivers/misc/sgi-xp/xpc.h linux-3.0.7/drivers/misc/sgi-xp/xpc.h
30206 --- linux-3.0.7/drivers/misc/sgi-xp/xpc.h 2011-07-21 22:17:23.000000000 -0400
30207 +++ linux-3.0.7/drivers/misc/sgi-xp/xpc.h 2011-10-11 10:44:33.000000000 -0400
30208 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
30209 void (*received_payload) (struct xpc_channel *, void *);
30210 void (*notify_senders_of_disconnect) (struct xpc_channel *);
30211 };
30212 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
30213
30214 /* struct xpc_partition act_state values (for XPC HB) */
30215
30216 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_regis
30217 /* found in xpc_main.c */
30218 extern struct device *xpc_part;
30219 extern struct device *xpc_chan;
30220 -extern struct xpc_arch_operations xpc_arch_ops;
30221 +extern xpc_arch_operations_no_const xpc_arch_ops;
30222 extern int xpc_disengage_timelimit;
30223 extern int xpc_disengage_timedout;
30224 extern int xpc_activate_IRQ_rcvd;
30225 diff -urNp linux-3.0.7/drivers/misc/sgi-xp/xpc_main.c linux-3.0.7/drivers/misc/sgi-xp/xpc_main.c
30226 --- linux-3.0.7/drivers/misc/sgi-xp/xpc_main.c 2011-07-21 22:17:23.000000000 -0400
30227 +++ linux-3.0.7/drivers/misc/sgi-xp/xpc_main.c 2011-10-11 10:44:33.000000000 -0400
30228 @@ -162,7 +162,7 @@ static struct notifier_block xpc_die_not
30229 .notifier_call = xpc_system_die,
30230 };
30231
30232 -struct xpc_arch_operations xpc_arch_ops;
30233 +xpc_arch_operations_no_const xpc_arch_ops;
30234
30235 /*
30236 * Timer function to enforce the timelimit on the partition disengage.
30237 diff -urNp linux-3.0.7/drivers/misc/sgi-xp/xp.h linux-3.0.7/drivers/misc/sgi-xp/xp.h
30238 --- linux-3.0.7/drivers/misc/sgi-xp/xp.h 2011-07-21 22:17:23.000000000 -0400
30239 +++ linux-3.0.7/drivers/misc/sgi-xp/xp.h 2011-08-23 21:47:55.000000000 -0400
30240 @@ -289,7 +289,7 @@ struct xpc_interface {
30241 xpc_notify_func, void *);
30242 void (*received) (short, int, void *);
30243 enum xp_retval (*partid_to_nasids) (short, void *);
30244 -};
30245 +} __no_const;
30246
30247 extern struct xpc_interface xpc_interface;
30248
30249 diff -urNp linux-3.0.7/drivers/mmc/host/sdhci-pci.c linux-3.0.7/drivers/mmc/host/sdhci-pci.c
30250 --- linux-3.0.7/drivers/mmc/host/sdhci-pci.c 2011-07-21 22:17:23.000000000 -0400
30251 +++ linux-3.0.7/drivers/mmc/host/sdhci-pci.c 2011-10-11 10:44:33.000000000 -0400
30252 @@ -524,7 +524,7 @@ static const struct sdhci_pci_fixes sdhc
30253 .probe = via_probe,
30254 };
30255
30256 -static const struct pci_device_id pci_ids[] __devinitdata = {
30257 +static const struct pci_device_id pci_ids[] __devinitconst = {
30258 {
30259 .vendor = PCI_VENDOR_ID_RICOH,
30260 .device = PCI_DEVICE_ID_RICOH_R5C822,
30261 diff -urNp linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0001.c linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0001.c
30262 --- linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0001.c 2011-07-21 22:17:23.000000000 -0400
30263 +++ linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0001.c 2011-08-23 21:48:14.000000000 -0400
30264 @@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
30265 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
30266 unsigned long timeo = jiffies + HZ;
30267
30268 + pax_track_stack();
30269 +
30270 /* Prevent setting state FL_SYNCING for chip in suspended state. */
30271 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
30272 goto sleep;
30273 @@ -1653,6 +1655,8 @@ static int __xipram do_write_buffer(stru
30274 unsigned long initial_adr;
30275 int initial_len = len;
30276
30277 + pax_track_stack();
30278 +
30279 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
30280 adr += chip->start;
30281 initial_adr = adr;
30282 @@ -1871,6 +1875,8 @@ static int __xipram do_erase_oneblock(st
30283 int retries = 3;
30284 int ret;
30285
30286 + pax_track_stack();
30287 +
30288 adr += chip->start;
30289
30290 retry:
30291 diff -urNp linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0020.c linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0020.c
30292 --- linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0020.c 2011-07-21 22:17:23.000000000 -0400
30293 +++ linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0020.c 2011-08-23 21:48:14.000000000 -0400
30294 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
30295 unsigned long cmd_addr;
30296 struct cfi_private *cfi = map->fldrv_priv;
30297
30298 + pax_track_stack();
30299 +
30300 adr += chip->start;
30301
30302 /* Ensure cmd read/writes are aligned. */
30303 @@ -429,6 +431,8 @@ static inline int do_write_buffer(struct
30304 DECLARE_WAITQUEUE(wait, current);
30305 int wbufsize, z;
30306
30307 + pax_track_stack();
30308 +
30309 /* M58LW064A requires bus alignment for buffer wriets -- saw */
30310 if (adr & (map_bankwidth(map)-1))
30311 return -EINVAL;
30312 @@ -743,6 +747,8 @@ static inline int do_erase_oneblock(stru
30313 DECLARE_WAITQUEUE(wait, current);
30314 int ret = 0;
30315
30316 + pax_track_stack();
30317 +
30318 adr += chip->start;
30319
30320 /* Let's determine this according to the interleave only once */
30321 @@ -1048,6 +1054,8 @@ static inline int do_lock_oneblock(struc
30322 unsigned long timeo = jiffies + HZ;
30323 DECLARE_WAITQUEUE(wait, current);
30324
30325 + pax_track_stack();
30326 +
30327 adr += chip->start;
30328
30329 /* Let's determine this according to the interleave only once */
30330 @@ -1197,6 +1205,8 @@ static inline int do_unlock_oneblock(str
30331 unsigned long timeo = jiffies + HZ;
30332 DECLARE_WAITQUEUE(wait, current);
30333
30334 + pax_track_stack();
30335 +
30336 adr += chip->start;
30337
30338 /* Let's determine this according to the interleave only once */
30339 diff -urNp linux-3.0.7/drivers/mtd/devices/doc2000.c linux-3.0.7/drivers/mtd/devices/doc2000.c
30340 --- linux-3.0.7/drivers/mtd/devices/doc2000.c 2011-07-21 22:17:23.000000000 -0400
30341 +++ linux-3.0.7/drivers/mtd/devices/doc2000.c 2011-08-23 21:47:55.000000000 -0400
30342 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
30343
30344 /* The ECC will not be calculated correctly if less than 512 is written */
30345 /* DBB-
30346 - if (len != 0x200 && eccbuf)
30347 + if (len != 0x200)
30348 printk(KERN_WARNING
30349 "ECC needs a full sector write (adr: %lx size %lx)\n",
30350 (long) to, (long) len);
30351 diff -urNp linux-3.0.7/drivers/mtd/devices/doc2001.c linux-3.0.7/drivers/mtd/devices/doc2001.c
30352 --- linux-3.0.7/drivers/mtd/devices/doc2001.c 2011-07-21 22:17:23.000000000 -0400
30353 +++ linux-3.0.7/drivers/mtd/devices/doc2001.c 2011-08-23 21:47:55.000000000 -0400
30354 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
30355 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
30356
30357 /* Don't allow read past end of device */
30358 - if (from >= this->totlen)
30359 + if (from >= this->totlen || !len)
30360 return -EINVAL;
30361
30362 /* Don't allow a single read to cross a 512-byte block boundary */
30363 diff -urNp linux-3.0.7/drivers/mtd/ftl.c linux-3.0.7/drivers/mtd/ftl.c
30364 --- linux-3.0.7/drivers/mtd/ftl.c 2011-07-21 22:17:23.000000000 -0400
30365 +++ linux-3.0.7/drivers/mtd/ftl.c 2011-08-23 21:48:14.000000000 -0400
30366 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
30367 loff_t offset;
30368 uint16_t srcunitswap = cpu_to_le16(srcunit);
30369
30370 + pax_track_stack();
30371 +
30372 eun = &part->EUNInfo[srcunit];
30373 xfer = &part->XferInfo[xferunit];
30374 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
30375 diff -urNp linux-3.0.7/drivers/mtd/inftlcore.c linux-3.0.7/drivers/mtd/inftlcore.c
30376 --- linux-3.0.7/drivers/mtd/inftlcore.c 2011-07-21 22:17:23.000000000 -0400
30377 +++ linux-3.0.7/drivers/mtd/inftlcore.c 2011-08-23 21:48:14.000000000 -0400
30378 @@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
30379 struct inftl_oob oob;
30380 size_t retlen;
30381
30382 + pax_track_stack();
30383 +
30384 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
30385 "pending=%d)\n", inftl, thisVUC, pendingblock);
30386
30387 diff -urNp linux-3.0.7/drivers/mtd/inftlmount.c linux-3.0.7/drivers/mtd/inftlmount.c
30388 --- linux-3.0.7/drivers/mtd/inftlmount.c 2011-07-21 22:17:23.000000000 -0400
30389 +++ linux-3.0.7/drivers/mtd/inftlmount.c 2011-08-23 21:48:14.000000000 -0400
30390 @@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
30391 struct INFTLPartition *ip;
30392 size_t retlen;
30393
30394 + pax_track_stack();
30395 +
30396 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
30397
30398 /*
30399 diff -urNp linux-3.0.7/drivers/mtd/lpddr/qinfo_probe.c linux-3.0.7/drivers/mtd/lpddr/qinfo_probe.c
30400 --- linux-3.0.7/drivers/mtd/lpddr/qinfo_probe.c 2011-07-21 22:17:23.000000000 -0400
30401 +++ linux-3.0.7/drivers/mtd/lpddr/qinfo_probe.c 2011-08-23 21:48:14.000000000 -0400
30402 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
30403 {
30404 map_word pfow_val[4];
30405
30406 + pax_track_stack();
30407 +
30408 /* Check identification string */
30409 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
30410 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
30411 diff -urNp linux-3.0.7/drivers/mtd/mtdchar.c linux-3.0.7/drivers/mtd/mtdchar.c
30412 --- linux-3.0.7/drivers/mtd/mtdchar.c 2011-07-21 22:17:23.000000000 -0400
30413 +++ linux-3.0.7/drivers/mtd/mtdchar.c 2011-08-23 21:48:14.000000000 -0400
30414 @@ -553,6 +553,8 @@ static int mtd_ioctl(struct file *file,
30415 u_long size;
30416 struct mtd_info_user info;
30417
30418 + pax_track_stack();
30419 +
30420 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
30421
30422 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
30423 diff -urNp linux-3.0.7/drivers/mtd/nand/denali.c linux-3.0.7/drivers/mtd/nand/denali.c
30424 --- linux-3.0.7/drivers/mtd/nand/denali.c 2011-07-21 22:17:23.000000000 -0400
30425 +++ linux-3.0.7/drivers/mtd/nand/denali.c 2011-08-23 21:47:55.000000000 -0400
30426 @@ -26,6 +26,7 @@
30427 #include <linux/pci.h>
30428 #include <linux/mtd/mtd.h>
30429 #include <linux/module.h>
30430 +#include <linux/slab.h>
30431
30432 #include "denali.h"
30433
30434 diff -urNp linux-3.0.7/drivers/mtd/nftlcore.c linux-3.0.7/drivers/mtd/nftlcore.c
30435 --- linux-3.0.7/drivers/mtd/nftlcore.c 2011-07-21 22:17:23.000000000 -0400
30436 +++ linux-3.0.7/drivers/mtd/nftlcore.c 2011-08-23 21:48:14.000000000 -0400
30437 @@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
30438 int inplace = 1;
30439 size_t retlen;
30440
30441 + pax_track_stack();
30442 +
30443 memset(BlockMap, 0xff, sizeof(BlockMap));
30444 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
30445
30446 diff -urNp linux-3.0.7/drivers/mtd/nftlmount.c linux-3.0.7/drivers/mtd/nftlmount.c
30447 --- linux-3.0.7/drivers/mtd/nftlmount.c 2011-07-21 22:17:23.000000000 -0400
30448 +++ linux-3.0.7/drivers/mtd/nftlmount.c 2011-08-23 21:48:14.000000000 -0400
30449 @@ -24,6 +24,7 @@
30450 #include <asm/errno.h>
30451 #include <linux/delay.h>
30452 #include <linux/slab.h>
30453 +#include <linux/sched.h>
30454 #include <linux/mtd/mtd.h>
30455 #include <linux/mtd/nand.h>
30456 #include <linux/mtd/nftl.h>
30457 @@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
30458 struct mtd_info *mtd = nftl->mbd.mtd;
30459 unsigned int i;
30460
30461 + pax_track_stack();
30462 +
30463 /* Assume logical EraseSize == physical erasesize for starting the scan.
30464 We'll sort it out later if we find a MediaHeader which says otherwise */
30465 /* Actually, we won't. The new DiskOnChip driver has already scanned
30466 diff -urNp linux-3.0.7/drivers/mtd/ubi/build.c linux-3.0.7/drivers/mtd/ubi/build.c
30467 --- linux-3.0.7/drivers/mtd/ubi/build.c 2011-07-21 22:17:23.000000000 -0400
30468 +++ linux-3.0.7/drivers/mtd/ubi/build.c 2011-08-23 21:47:55.000000000 -0400
30469 @@ -1287,7 +1287,7 @@ module_exit(ubi_exit);
30470 static int __init bytes_str_to_int(const char *str)
30471 {
30472 char *endp;
30473 - unsigned long result;
30474 + unsigned long result, scale = 1;
30475
30476 result = simple_strtoul(str, &endp, 0);
30477 if (str == endp || result >= INT_MAX) {
30478 @@ -1298,11 +1298,11 @@ static int __init bytes_str_to_int(const
30479
30480 switch (*endp) {
30481 case 'G':
30482 - result *= 1024;
30483 + scale *= 1024;
30484 case 'M':
30485 - result *= 1024;
30486 + scale *= 1024;
30487 case 'K':
30488 - result *= 1024;
30489 + scale *= 1024;
30490 if (endp[1] == 'i' && endp[2] == 'B')
30491 endp += 2;
30492 case '\0':
30493 @@ -1313,7 +1313,13 @@ static int __init bytes_str_to_int(const
30494 return -EINVAL;
30495 }
30496
30497 - return result;
30498 + if ((intoverflow_t)result*scale >= INT_MAX) {
30499 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
30500 + str);
30501 + return -EINVAL;
30502 + }
30503 +
30504 + return result*scale;
30505 }
30506
30507 /**
30508 diff -urNp linux-3.0.7/drivers/net/atlx/atl2.c linux-3.0.7/drivers/net/atlx/atl2.c
30509 --- linux-3.0.7/drivers/net/atlx/atl2.c 2011-07-21 22:17:23.000000000 -0400
30510 +++ linux-3.0.7/drivers/net/atlx/atl2.c 2011-10-11 10:44:33.000000000 -0400
30511 @@ -2840,7 +2840,7 @@ static void atl2_force_ps(struct atl2_hw
30512 */
30513
30514 #define ATL2_PARAM(X, desc) \
30515 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
30516 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
30517 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
30518 MODULE_PARM_DESC(X, desc);
30519 #else
30520 diff -urNp linux-3.0.7/drivers/net/bna/bfa_ioc_ct.c linux-3.0.7/drivers/net/bna/bfa_ioc_ct.c
30521 --- linux-3.0.7/drivers/net/bna/bfa_ioc_ct.c 2011-07-21 22:17:23.000000000 -0400
30522 +++ linux-3.0.7/drivers/net/bna/bfa_ioc_ct.c 2011-08-23 21:47:55.000000000 -0400
30523 @@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
30524 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
30525 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
30526
30527 -static struct bfa_ioc_hwif nw_hwif_ct;
30528 +static struct bfa_ioc_hwif nw_hwif_ct = {
30529 + .ioc_pll_init = bfa_ioc_ct_pll_init,
30530 + .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
30531 + .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
30532 + .ioc_reg_init = bfa_ioc_ct_reg_init,
30533 + .ioc_map_port = bfa_ioc_ct_map_port,
30534 + .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
30535 + .ioc_notify_fail = bfa_ioc_ct_notify_fail,
30536 + .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
30537 + .ioc_sync_start = bfa_ioc_ct_sync_start,
30538 + .ioc_sync_join = bfa_ioc_ct_sync_join,
30539 + .ioc_sync_leave = bfa_ioc_ct_sync_leave,
30540 + .ioc_sync_ack = bfa_ioc_ct_sync_ack,
30541 + .ioc_sync_complete = bfa_ioc_ct_sync_complete
30542 +};
30543
30544 /**
30545 * Called from bfa_ioc_attach() to map asic specific calls.
30546 @@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
30547 void
30548 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
30549 {
30550 - nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
30551 - nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
30552 - nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
30553 - nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
30554 - nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
30555 - nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
30556 - nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
30557 - nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
30558 - nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
30559 - nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
30560 - nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
30561 - nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
30562 - nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
30563 -
30564 ioc->ioc_hwif = &nw_hwif_ct;
30565 }
30566
30567 diff -urNp linux-3.0.7/drivers/net/bna/bnad.c linux-3.0.7/drivers/net/bna/bnad.c
30568 --- linux-3.0.7/drivers/net/bna/bnad.c 2011-07-21 22:17:23.000000000 -0400
30569 +++ linux-3.0.7/drivers/net/bna/bnad.c 2011-08-23 21:47:55.000000000 -0400
30570 @@ -1681,7 +1681,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
30571 struct bna_intr_info *intr_info =
30572 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
30573 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
30574 - struct bna_tx_event_cbfn tx_cbfn;
30575 + static struct bna_tx_event_cbfn tx_cbfn = {
30576 + /* Initialize the tx event handlers */
30577 + .tcb_setup_cbfn = bnad_cb_tcb_setup,
30578 + .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
30579 + .tx_stall_cbfn = bnad_cb_tx_stall,
30580 + .tx_resume_cbfn = bnad_cb_tx_resume,
30581 + .tx_cleanup_cbfn = bnad_cb_tx_cleanup
30582 + };
30583 struct bna_tx *tx;
30584 unsigned long flags;
30585
30586 @@ -1690,13 +1697,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
30587 tx_config->txq_depth = bnad->txq_depth;
30588 tx_config->tx_type = BNA_TX_T_REGULAR;
30589
30590 - /* Initialize the tx event handlers */
30591 - tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
30592 - tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
30593 - tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
30594 - tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
30595 - tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
30596 -
30597 /* Get BNA's resource requirement for one tx object */
30598 spin_lock_irqsave(&bnad->bna_lock, flags);
30599 bna_tx_res_req(bnad->num_txq_per_tx,
30600 @@ -1827,21 +1827,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
30601 struct bna_intr_info *intr_info =
30602 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
30603 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
30604 - struct bna_rx_event_cbfn rx_cbfn;
30605 + static struct bna_rx_event_cbfn rx_cbfn = {
30606 + /* Initialize the Rx event handlers */
30607 + .rcb_setup_cbfn = bnad_cb_rcb_setup,
30608 + .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
30609 + .ccb_setup_cbfn = bnad_cb_ccb_setup,
30610 + .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
30611 + .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
30612 + .rx_post_cbfn = bnad_cb_rx_post
30613 + };
30614 struct bna_rx *rx;
30615 unsigned long flags;
30616
30617 /* Initialize the Rx object configuration */
30618 bnad_init_rx_config(bnad, rx_config);
30619
30620 - /* Initialize the Rx event handlers */
30621 - rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
30622 - rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
30623 - rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
30624 - rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
30625 - rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
30626 - rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
30627 -
30628 /* Get BNA's resource requirement for one Rx object */
30629 spin_lock_irqsave(&bnad->bna_lock, flags);
30630 bna_rx_res_req(rx_config, res_info);
30631 diff -urNp linux-3.0.7/drivers/net/bnx2.c linux-3.0.7/drivers/net/bnx2.c
30632 --- linux-3.0.7/drivers/net/bnx2.c 2011-10-16 21:54:54.000000000 -0400
30633 +++ linux-3.0.7/drivers/net/bnx2.c 2011-10-16 21:55:27.000000000 -0400
30634 @@ -5831,6 +5831,8 @@ bnx2_test_nvram(struct bnx2 *bp)
30635 int rc = 0;
30636 u32 magic, csum;
30637
30638 + pax_track_stack();
30639 +
30640 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
30641 goto test_nvram_done;
30642
30643 diff -urNp linux-3.0.7/drivers/net/bnx2x/bnx2x_ethtool.c linux-3.0.7/drivers/net/bnx2x/bnx2x_ethtool.c
30644 --- linux-3.0.7/drivers/net/bnx2x/bnx2x_ethtool.c 2011-07-21 22:17:23.000000000 -0400
30645 +++ linux-3.0.7/drivers/net/bnx2x/bnx2x_ethtool.c 2011-08-23 21:48:14.000000000 -0400
30646 @@ -1705,6 +1705,8 @@ static int bnx2x_test_nvram(struct bnx2x
30647 int i, rc;
30648 u32 magic, crc;
30649
30650 + pax_track_stack();
30651 +
30652 if (BP_NOMCP(bp))
30653 return 0;
30654
30655 diff -urNp linux-3.0.7/drivers/net/can/mscan/mscan.c linux-3.0.7/drivers/net/can/mscan/mscan.c
30656 --- linux-3.0.7/drivers/net/can/mscan/mscan.c 2011-07-21 22:17:23.000000000 -0400
30657 +++ linux-3.0.7/drivers/net/can/mscan/mscan.c 2011-10-17 02:51:46.000000000 -0400
30658 @@ -261,11 +261,13 @@ static netdev_tx_t mscan_start_xmit(stru
30659 void __iomem *data = &regs->tx.dsr1_0;
30660 u16 *payload = (u16 *)frame->data;
30661
30662 - /* It is safe to write into dsr[dlc+1] */
30663 - for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
30664 + for (i = 0; i < frame->can_dlc / 2; i++) {
30665 out_be16(data, *payload++);
30666 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
30667 }
30668 + /* write remaining byte if necessary */
30669 + if (frame->can_dlc & 1)
30670 + out_8(data, frame->data[frame->can_dlc - 1]);
30671 }
30672
30673 out_8(&regs->tx.dlr, frame->can_dlc);
30674 @@ -330,10 +332,13 @@ static void mscan_get_rx_frame(struct ne
30675 void __iomem *data = &regs->rx.dsr1_0;
30676 u16 *payload = (u16 *)frame->data;
30677
30678 - for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
30679 + for (i = 0; i < frame->can_dlc / 2; i++) {
30680 *payload++ = in_be16(data);
30681 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
30682 }
30683 + /* read remaining byte if necessary */
30684 + if (frame->can_dlc & 1)
30685 + frame->data[frame->can_dlc - 1] = in_8(data);
30686 }
30687
30688 out_8(&regs->canrflg, MSCAN_RXF);
30689 diff -urNp linux-3.0.7/drivers/net/cxgb3/l2t.h linux-3.0.7/drivers/net/cxgb3/l2t.h
30690 --- linux-3.0.7/drivers/net/cxgb3/l2t.h 2011-10-16 21:54:54.000000000 -0400
30691 +++ linux-3.0.7/drivers/net/cxgb3/l2t.h 2011-10-16 21:55:27.000000000 -0400
30692 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)
30693 */
30694 struct l2t_skb_cb {
30695 arp_failure_handler_func arp_failure_handler;
30696 -};
30697 +} __no_const;
30698
30699 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
30700
30701 diff -urNp linux-3.0.7/drivers/net/cxgb4/cxgb4_main.c linux-3.0.7/drivers/net/cxgb4/cxgb4_main.c
30702 --- linux-3.0.7/drivers/net/cxgb4/cxgb4_main.c 2011-07-21 22:17:23.000000000 -0400
30703 +++ linux-3.0.7/drivers/net/cxgb4/cxgb4_main.c 2011-08-23 21:48:14.000000000 -0400
30704 @@ -3396,6 +3396,8 @@ static int __devinit enable_msix(struct
30705 unsigned int nchan = adap->params.nports;
30706 struct msix_entry entries[MAX_INGQ + 1];
30707
30708 + pax_track_stack();
30709 +
30710 for (i = 0; i < ARRAY_SIZE(entries); ++i)
30711 entries[i].entry = i;
30712
30713 diff -urNp linux-3.0.7/drivers/net/cxgb4/t4_hw.c linux-3.0.7/drivers/net/cxgb4/t4_hw.c
30714 --- linux-3.0.7/drivers/net/cxgb4/t4_hw.c 2011-07-21 22:17:23.000000000 -0400
30715 +++ linux-3.0.7/drivers/net/cxgb4/t4_hw.c 2011-08-23 21:48:14.000000000 -0400
30716 @@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
30717 u8 vpd[VPD_LEN], csum;
30718 unsigned int vpdr_len, kw_offset, id_len;
30719
30720 + pax_track_stack();
30721 +
30722 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
30723 if (ret < 0)
30724 return ret;
30725 diff -urNp linux-3.0.7/drivers/net/e1000e/82571.c linux-3.0.7/drivers/net/e1000e/82571.c
30726 --- linux-3.0.7/drivers/net/e1000e/82571.c 2011-07-21 22:17:23.000000000 -0400
30727 +++ linux-3.0.7/drivers/net/e1000e/82571.c 2011-08-23 21:47:55.000000000 -0400
30728 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
30729 {
30730 struct e1000_hw *hw = &adapter->hw;
30731 struct e1000_mac_info *mac = &hw->mac;
30732 - struct e1000_mac_operations *func = &mac->ops;
30733 + e1000_mac_operations_no_const *func = &mac->ops;
30734 u32 swsm = 0;
30735 u32 swsm2 = 0;
30736 bool force_clear_smbi = false;
30737 diff -urNp linux-3.0.7/drivers/net/e1000e/es2lan.c linux-3.0.7/drivers/net/e1000e/es2lan.c
30738 --- linux-3.0.7/drivers/net/e1000e/es2lan.c 2011-07-21 22:17:23.000000000 -0400
30739 +++ linux-3.0.7/drivers/net/e1000e/es2lan.c 2011-08-23 21:47:55.000000000 -0400
30740 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
30741 {
30742 struct e1000_hw *hw = &adapter->hw;
30743 struct e1000_mac_info *mac = &hw->mac;
30744 - struct e1000_mac_operations *func = &mac->ops;
30745 + e1000_mac_operations_no_const *func = &mac->ops;
30746
30747 /* Set media type */
30748 switch (adapter->pdev->device) {
30749 diff -urNp linux-3.0.7/drivers/net/e1000e/hw.h linux-3.0.7/drivers/net/e1000e/hw.h
30750 --- linux-3.0.7/drivers/net/e1000e/hw.h 2011-07-21 22:17:23.000000000 -0400
30751 +++ linux-3.0.7/drivers/net/e1000e/hw.h 2011-08-23 21:47:55.000000000 -0400
30752 @@ -776,6 +776,7 @@ struct e1000_mac_operations {
30753 void (*write_vfta)(struct e1000_hw *, u32, u32);
30754 s32 (*read_mac_addr)(struct e1000_hw *);
30755 };
30756 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
30757
30758 /* Function pointers for the PHY. */
30759 struct e1000_phy_operations {
30760 @@ -799,6 +800,7 @@ struct e1000_phy_operations {
30761 void (*power_up)(struct e1000_hw *);
30762 void (*power_down)(struct e1000_hw *);
30763 };
30764 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
30765
30766 /* Function pointers for the NVM. */
30767 struct e1000_nvm_operations {
30768 @@ -810,9 +812,10 @@ struct e1000_nvm_operations {
30769 s32 (*validate)(struct e1000_hw *);
30770 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
30771 };
30772 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
30773
30774 struct e1000_mac_info {
30775 - struct e1000_mac_operations ops;
30776 + e1000_mac_operations_no_const ops;
30777 u8 addr[ETH_ALEN];
30778 u8 perm_addr[ETH_ALEN];
30779
30780 @@ -853,7 +856,7 @@ struct e1000_mac_info {
30781 };
30782
30783 struct e1000_phy_info {
30784 - struct e1000_phy_operations ops;
30785 + e1000_phy_operations_no_const ops;
30786
30787 enum e1000_phy_type type;
30788
30789 @@ -887,7 +890,7 @@ struct e1000_phy_info {
30790 };
30791
30792 struct e1000_nvm_info {
30793 - struct e1000_nvm_operations ops;
30794 + e1000_nvm_operations_no_const ops;
30795
30796 enum e1000_nvm_type type;
30797 enum e1000_nvm_override override;
30798 diff -urNp linux-3.0.7/drivers/net/fealnx.c linux-3.0.7/drivers/net/fealnx.c
30799 --- linux-3.0.7/drivers/net/fealnx.c 2011-07-21 22:17:23.000000000 -0400
30800 +++ linux-3.0.7/drivers/net/fealnx.c 2011-10-11 10:44:33.000000000 -0400
30801 @@ -150,7 +150,7 @@ struct chip_info {
30802 int flags;
30803 };
30804
30805 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
30806 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
30807 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
30808 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
30809 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
30810 diff -urNp linux-3.0.7/drivers/net/hamradio/6pack.c linux-3.0.7/drivers/net/hamradio/6pack.c
30811 --- linux-3.0.7/drivers/net/hamradio/6pack.c 2011-07-21 22:17:23.000000000 -0400
30812 +++ linux-3.0.7/drivers/net/hamradio/6pack.c 2011-08-23 21:48:14.000000000 -0400
30813 @@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
30814 unsigned char buf[512];
30815 int count1;
30816
30817 + pax_track_stack();
30818 +
30819 if (!count)
30820 return;
30821
30822 diff -urNp linux-3.0.7/drivers/net/igb/e1000_hw.h linux-3.0.7/drivers/net/igb/e1000_hw.h
30823 --- linux-3.0.7/drivers/net/igb/e1000_hw.h 2011-07-21 22:17:23.000000000 -0400
30824 +++ linux-3.0.7/drivers/net/igb/e1000_hw.h 2011-08-23 21:47:55.000000000 -0400
30825 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
30826 s32 (*read_mac_addr)(struct e1000_hw *);
30827 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
30828 };
30829 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
30830
30831 struct e1000_phy_operations {
30832 s32 (*acquire)(struct e1000_hw *);
30833 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
30834 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
30835 s32 (*write_reg)(struct e1000_hw *, u32, u16);
30836 };
30837 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
30838
30839 struct e1000_nvm_operations {
30840 s32 (*acquire)(struct e1000_hw *);
30841 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
30842 s32 (*update)(struct e1000_hw *);
30843 s32 (*validate)(struct e1000_hw *);
30844 };
30845 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
30846
30847 struct e1000_info {
30848 s32 (*get_invariants)(struct e1000_hw *);
30849 @@ -350,7 +353,7 @@ struct e1000_info {
30850 extern const struct e1000_info e1000_82575_info;
30851
30852 struct e1000_mac_info {
30853 - struct e1000_mac_operations ops;
30854 + e1000_mac_operations_no_const ops;
30855
30856 u8 addr[6];
30857 u8 perm_addr[6];
30858 @@ -388,7 +391,7 @@ struct e1000_mac_info {
30859 };
30860
30861 struct e1000_phy_info {
30862 - struct e1000_phy_operations ops;
30863 + e1000_phy_operations_no_const ops;
30864
30865 enum e1000_phy_type type;
30866
30867 @@ -423,7 +426,7 @@ struct e1000_phy_info {
30868 };
30869
30870 struct e1000_nvm_info {
30871 - struct e1000_nvm_operations ops;
30872 + e1000_nvm_operations_no_const ops;
30873 enum e1000_nvm_type type;
30874 enum e1000_nvm_override override;
30875
30876 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
30877 s32 (*check_for_ack)(struct e1000_hw *, u16);
30878 s32 (*check_for_rst)(struct e1000_hw *, u16);
30879 };
30880 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
30881
30882 struct e1000_mbx_stats {
30883 u32 msgs_tx;
30884 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
30885 };
30886
30887 struct e1000_mbx_info {
30888 - struct e1000_mbx_operations ops;
30889 + e1000_mbx_operations_no_const ops;
30890 struct e1000_mbx_stats stats;
30891 u32 timeout;
30892 u32 usec_delay;
30893 diff -urNp linux-3.0.7/drivers/net/igbvf/vf.h linux-3.0.7/drivers/net/igbvf/vf.h
30894 --- linux-3.0.7/drivers/net/igbvf/vf.h 2011-07-21 22:17:23.000000000 -0400
30895 +++ linux-3.0.7/drivers/net/igbvf/vf.h 2011-08-23 21:47:55.000000000 -0400
30896 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
30897 s32 (*read_mac_addr)(struct e1000_hw *);
30898 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
30899 };
30900 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
30901
30902 struct e1000_mac_info {
30903 - struct e1000_mac_operations ops;
30904 + e1000_mac_operations_no_const ops;
30905 u8 addr[6];
30906 u8 perm_addr[6];
30907
30908 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
30909 s32 (*check_for_ack)(struct e1000_hw *);
30910 s32 (*check_for_rst)(struct e1000_hw *);
30911 };
30912 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
30913
30914 struct e1000_mbx_stats {
30915 u32 msgs_tx;
30916 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
30917 };
30918
30919 struct e1000_mbx_info {
30920 - struct e1000_mbx_operations ops;
30921 + e1000_mbx_operations_no_const ops;
30922 struct e1000_mbx_stats stats;
30923 u32 timeout;
30924 u32 usec_delay;
30925 diff -urNp linux-3.0.7/drivers/net/ixgb/ixgb_main.c linux-3.0.7/drivers/net/ixgb/ixgb_main.c
30926 --- linux-3.0.7/drivers/net/ixgb/ixgb_main.c 2011-07-21 22:17:23.000000000 -0400
30927 +++ linux-3.0.7/drivers/net/ixgb/ixgb_main.c 2011-08-23 21:48:14.000000000 -0400
30928 @@ -1070,6 +1070,8 @@ ixgb_set_multi(struct net_device *netdev
30929 u32 rctl;
30930 int i;
30931
30932 + pax_track_stack();
30933 +
30934 /* Check for Promiscuous and All Multicast modes */
30935
30936 rctl = IXGB_READ_REG(hw, RCTL);
30937 diff -urNp linux-3.0.7/drivers/net/ixgb/ixgb_param.c linux-3.0.7/drivers/net/ixgb/ixgb_param.c
30938 --- linux-3.0.7/drivers/net/ixgb/ixgb_param.c 2011-07-21 22:17:23.000000000 -0400
30939 +++ linux-3.0.7/drivers/net/ixgb/ixgb_param.c 2011-08-23 21:48:14.000000000 -0400
30940 @@ -261,6 +261,9 @@ void __devinit
30941 ixgb_check_options(struct ixgb_adapter *adapter)
30942 {
30943 int bd = adapter->bd_number;
30944 +
30945 + pax_track_stack();
30946 +
30947 if (bd >= IXGB_MAX_NIC) {
30948 pr_notice("Warning: no configuration for board #%i\n", bd);
30949 pr_notice("Using defaults for all values\n");
30950 diff -urNp linux-3.0.7/drivers/net/ixgbe/ixgbe_type.h linux-3.0.7/drivers/net/ixgbe/ixgbe_type.h
30951 --- linux-3.0.7/drivers/net/ixgbe/ixgbe_type.h 2011-07-21 22:17:23.000000000 -0400
30952 +++ linux-3.0.7/drivers/net/ixgbe/ixgbe_type.h 2011-08-23 21:47:55.000000000 -0400
30953 @@ -2584,6 +2584,7 @@ struct ixgbe_eeprom_operations {
30954 s32 (*update_checksum)(struct ixgbe_hw *);
30955 u16 (*calc_checksum)(struct ixgbe_hw *);
30956 };
30957 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
30958
30959 struct ixgbe_mac_operations {
30960 s32 (*init_hw)(struct ixgbe_hw *);
30961 @@ -2639,6 +2640,7 @@ struct ixgbe_mac_operations {
30962 /* Flow Control */
30963 s32 (*fc_enable)(struct ixgbe_hw *, s32);
30964 };
30965 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
30966
30967 struct ixgbe_phy_operations {
30968 s32 (*identify)(struct ixgbe_hw *);
30969 @@ -2658,9 +2660,10 @@ struct ixgbe_phy_operations {
30970 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
30971 s32 (*check_overtemp)(struct ixgbe_hw *);
30972 };
30973 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
30974
30975 struct ixgbe_eeprom_info {
30976 - struct ixgbe_eeprom_operations ops;
30977 + ixgbe_eeprom_operations_no_const ops;
30978 enum ixgbe_eeprom_type type;
30979 u32 semaphore_delay;
30980 u16 word_size;
30981 @@ -2670,7 +2673,7 @@ struct ixgbe_eeprom_info {
30982
30983 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
30984 struct ixgbe_mac_info {
30985 - struct ixgbe_mac_operations ops;
30986 + ixgbe_mac_operations_no_const ops;
30987 enum ixgbe_mac_type type;
30988 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
30989 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
30990 @@ -2698,7 +2701,7 @@ struct ixgbe_mac_info {
30991 };
30992
30993 struct ixgbe_phy_info {
30994 - struct ixgbe_phy_operations ops;
30995 + ixgbe_phy_operations_no_const ops;
30996 struct mdio_if_info mdio;
30997 enum ixgbe_phy_type type;
30998 u32 id;
30999 @@ -2726,6 +2729,7 @@ struct ixgbe_mbx_operations {
31000 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
31001 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
31002 };
31003 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
31004
31005 struct ixgbe_mbx_stats {
31006 u32 msgs_tx;
31007 @@ -2737,7 +2741,7 @@ struct ixgbe_mbx_stats {
31008 };
31009
31010 struct ixgbe_mbx_info {
31011 - struct ixgbe_mbx_operations ops;
31012 + ixgbe_mbx_operations_no_const ops;
31013 struct ixgbe_mbx_stats stats;
31014 u32 timeout;
31015 u32 usec_delay;
31016 diff -urNp linux-3.0.7/drivers/net/ixgbevf/vf.h linux-3.0.7/drivers/net/ixgbevf/vf.h
31017 --- linux-3.0.7/drivers/net/ixgbevf/vf.h 2011-07-21 22:17:23.000000000 -0400
31018 +++ linux-3.0.7/drivers/net/ixgbevf/vf.h 2011-08-23 21:47:55.000000000 -0400
31019 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
31020 s32 (*clear_vfta)(struct ixgbe_hw *);
31021 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
31022 };
31023 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
31024
31025 enum ixgbe_mac_type {
31026 ixgbe_mac_unknown = 0,
31027 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
31028 };
31029
31030 struct ixgbe_mac_info {
31031 - struct ixgbe_mac_operations ops;
31032 + ixgbe_mac_operations_no_const ops;
31033 u8 addr[6];
31034 u8 perm_addr[6];
31035
31036 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
31037 s32 (*check_for_ack)(struct ixgbe_hw *);
31038 s32 (*check_for_rst)(struct ixgbe_hw *);
31039 };
31040 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
31041
31042 struct ixgbe_mbx_stats {
31043 u32 msgs_tx;
31044 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
31045 };
31046
31047 struct ixgbe_mbx_info {
31048 - struct ixgbe_mbx_operations ops;
31049 + ixgbe_mbx_operations_no_const ops;
31050 struct ixgbe_mbx_stats stats;
31051 u32 timeout;
31052 u32 udelay;
31053 diff -urNp linux-3.0.7/drivers/net/ksz884x.c linux-3.0.7/drivers/net/ksz884x.c
31054 --- linux-3.0.7/drivers/net/ksz884x.c 2011-07-21 22:17:23.000000000 -0400
31055 +++ linux-3.0.7/drivers/net/ksz884x.c 2011-08-23 21:48:14.000000000 -0400
31056 @@ -6534,6 +6534,8 @@ static void netdev_get_ethtool_stats(str
31057 int rc;
31058 u64 counter[TOTAL_PORT_COUNTER_NUM];
31059
31060 + pax_track_stack();
31061 +
31062 mutex_lock(&hw_priv->lock);
31063 n = SWITCH_PORT_NUM;
31064 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
31065 diff -urNp linux-3.0.7/drivers/net/mlx4/main.c linux-3.0.7/drivers/net/mlx4/main.c
31066 --- linux-3.0.7/drivers/net/mlx4/main.c 2011-07-21 22:17:23.000000000 -0400
31067 +++ linux-3.0.7/drivers/net/mlx4/main.c 2011-08-23 21:48:14.000000000 -0400
31068 @@ -40,6 +40,7 @@
31069 #include <linux/dma-mapping.h>
31070 #include <linux/slab.h>
31071 #include <linux/io-mapping.h>
31072 +#include <linux/sched.h>
31073
31074 #include <linux/mlx4/device.h>
31075 #include <linux/mlx4/doorbell.h>
31076 @@ -764,6 +765,8 @@ static int mlx4_init_hca(struct mlx4_dev
31077 u64 icm_size;
31078 int err;
31079
31080 + pax_track_stack();
31081 +
31082 err = mlx4_QUERY_FW(dev);
31083 if (err) {
31084 if (err == -EACCES)
31085 diff -urNp linux-3.0.7/drivers/net/niu.c linux-3.0.7/drivers/net/niu.c
31086 --- linux-3.0.7/drivers/net/niu.c 2011-09-02 18:11:21.000000000 -0400
31087 +++ linux-3.0.7/drivers/net/niu.c 2011-08-23 21:48:14.000000000 -0400
31088 @@ -9056,6 +9056,8 @@ static void __devinit niu_try_msix(struc
31089 int i, num_irqs, err;
31090 u8 first_ldg;
31091
31092 + pax_track_stack();
31093 +
31094 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
31095 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
31096 ldg_num_map[i] = first_ldg + i;
31097 diff -urNp linux-3.0.7/drivers/net/pcnet32.c linux-3.0.7/drivers/net/pcnet32.c
31098 --- linux-3.0.7/drivers/net/pcnet32.c 2011-07-21 22:17:23.000000000 -0400
31099 +++ linux-3.0.7/drivers/net/pcnet32.c 2011-08-23 21:47:55.000000000 -0400
31100 @@ -82,7 +82,7 @@ static int cards_found;
31101 /*
31102 * VLB I/O addresses
31103 */
31104 -static unsigned int pcnet32_portlist[] __initdata =
31105 +static unsigned int pcnet32_portlist[] __devinitdata =
31106 { 0x300, 0x320, 0x340, 0x360, 0 };
31107
31108 static int pcnet32_debug;
31109 @@ -270,7 +270,7 @@ struct pcnet32_private {
31110 struct sk_buff **rx_skbuff;
31111 dma_addr_t *tx_dma_addr;
31112 dma_addr_t *rx_dma_addr;
31113 - struct pcnet32_access a;
31114 + struct pcnet32_access *a;
31115 spinlock_t lock; /* Guard lock */
31116 unsigned int cur_rx, cur_tx; /* The next free ring entry */
31117 unsigned int rx_ring_size; /* current rx ring size */
31118 @@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
31119 u16 val;
31120
31121 netif_wake_queue(dev);
31122 - val = lp->a.read_csr(ioaddr, CSR3);
31123 + val = lp->a->read_csr(ioaddr, CSR3);
31124 val &= 0x00ff;
31125 - lp->a.write_csr(ioaddr, CSR3, val);
31126 + lp->a->write_csr(ioaddr, CSR3, val);
31127 napi_enable(&lp->napi);
31128 }
31129
31130 @@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
31131 r = mii_link_ok(&lp->mii_if);
31132 } else if (lp->chip_version >= PCNET32_79C970A) {
31133 ulong ioaddr = dev->base_addr; /* card base I/O address */
31134 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
31135 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
31136 } else { /* can not detect link on really old chips */
31137 r = 1;
31138 }
31139 @@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct
31140 pcnet32_netif_stop(dev);
31141
31142 spin_lock_irqsave(&lp->lock, flags);
31143 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31144 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31145
31146 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
31147
31148 @@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct
31149 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
31150 {
31151 struct pcnet32_private *lp = netdev_priv(dev);
31152 - struct pcnet32_access *a = &lp->a; /* access to registers */
31153 + struct pcnet32_access *a = lp->a; /* access to registers */
31154 ulong ioaddr = dev->base_addr; /* card base I/O address */
31155 struct sk_buff *skb; /* sk buff */
31156 int x, i; /* counters */
31157 @@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct
31158 pcnet32_netif_stop(dev);
31159
31160 spin_lock_irqsave(&lp->lock, flags);
31161 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31162 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31163
31164 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
31165
31166 /* Reset the PCNET32 */
31167 - lp->a.reset(ioaddr);
31168 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31169 + lp->a->reset(ioaddr);
31170 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31171
31172 /* switch pcnet32 to 32bit mode */
31173 - lp->a.write_bcr(ioaddr, 20, 2);
31174 + lp->a->write_bcr(ioaddr, 20, 2);
31175
31176 /* purge & init rings but don't actually restart */
31177 pcnet32_restart(dev, 0x0000);
31178
31179 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31180 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31181
31182 /* Initialize Transmit buffers. */
31183 size = data_len + 15;
31184 @@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct
31185
31186 /* set int loopback in CSR15 */
31187 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
31188 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
31189 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
31190
31191 teststatus = cpu_to_le16(0x8000);
31192 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
31193 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
31194
31195 /* Check status of descriptors */
31196 for (x = 0; x < numbuffs; x++) {
31197 @@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct
31198 }
31199 }
31200
31201 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31202 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31203 wmb();
31204 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
31205 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
31206 @@ -1015,7 +1015,7 @@ clean_up:
31207 pcnet32_restart(dev, CSR0_NORMAL);
31208 } else {
31209 pcnet32_purge_rx_ring(dev);
31210 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
31211 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
31212 }
31213 spin_unlock_irqrestore(&lp->lock, flags);
31214
31215 @@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct ne
31216 enum ethtool_phys_id_state state)
31217 {
31218 struct pcnet32_private *lp = netdev_priv(dev);
31219 - struct pcnet32_access *a = &lp->a;
31220 + struct pcnet32_access *a = lp->a;
31221 ulong ioaddr = dev->base_addr;
31222 unsigned long flags;
31223 int i;
31224 @@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_de
31225 {
31226 int csr5;
31227 struct pcnet32_private *lp = netdev_priv(dev);
31228 - struct pcnet32_access *a = &lp->a;
31229 + struct pcnet32_access *a = lp->a;
31230 ulong ioaddr = dev->base_addr;
31231 int ticks;
31232
31233 @@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_stru
31234 spin_lock_irqsave(&lp->lock, flags);
31235 if (pcnet32_tx(dev)) {
31236 /* reset the chip to clear the error condition, then restart */
31237 - lp->a.reset(ioaddr);
31238 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31239 + lp->a->reset(ioaddr);
31240 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31241 pcnet32_restart(dev, CSR0_START);
31242 netif_wake_queue(dev);
31243 }
31244 @@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_stru
31245 __napi_complete(napi);
31246
31247 /* clear interrupt masks */
31248 - val = lp->a.read_csr(ioaddr, CSR3);
31249 + val = lp->a->read_csr(ioaddr, CSR3);
31250 val &= 0x00ff;
31251 - lp->a.write_csr(ioaddr, CSR3, val);
31252 + lp->a->write_csr(ioaddr, CSR3, val);
31253
31254 /* Set interrupt enable. */
31255 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
31256 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
31257
31258 spin_unlock_irqrestore(&lp->lock, flags);
31259 }
31260 @@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_
31261 int i, csr0;
31262 u16 *buff = ptr;
31263 struct pcnet32_private *lp = netdev_priv(dev);
31264 - struct pcnet32_access *a = &lp->a;
31265 + struct pcnet32_access *a = lp->a;
31266 ulong ioaddr = dev->base_addr;
31267 unsigned long flags;
31268
31269 @@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_
31270 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
31271 if (lp->phymask & (1 << j)) {
31272 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
31273 - lp->a.write_bcr(ioaddr, 33,
31274 + lp->a->write_bcr(ioaddr, 33,
31275 (j << 5) | i);
31276 - *buff++ = lp->a.read_bcr(ioaddr, 34);
31277 + *buff++ = lp->a->read_bcr(ioaddr, 34);
31278 }
31279 }
31280 }
31281 @@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int
31282 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
31283 lp->options |= PCNET32_PORT_FD;
31284
31285 - lp->a = *a;
31286 + lp->a = a;
31287
31288 /* prior to register_netdev, dev->name is not yet correct */
31289 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
31290 @@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int
31291 if (lp->mii) {
31292 /* lp->phycount and lp->phymask are set to 0 by memset above */
31293
31294 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
31295 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
31296 /* scan for PHYs */
31297 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
31298 unsigned short id1, id2;
31299 @@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int
31300 pr_info("Found PHY %04x:%04x at address %d\n",
31301 id1, id2, i);
31302 }
31303 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
31304 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
31305 if (lp->phycount > 1)
31306 lp->options |= PCNET32_PORT_MII;
31307 }
31308 @@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_devic
31309 }
31310
31311 /* Reset the PCNET32 */
31312 - lp->a.reset(ioaddr);
31313 + lp->a->reset(ioaddr);
31314
31315 /* switch pcnet32 to 32bit mode */
31316 - lp->a.write_bcr(ioaddr, 20, 2);
31317 + lp->a->write_bcr(ioaddr, 20, 2);
31318
31319 netif_printk(lp, ifup, KERN_DEBUG, dev,
31320 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
31321 @@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_devic
31322 (u32) (lp->init_dma_addr));
31323
31324 /* set/reset autoselect bit */
31325 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
31326 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
31327 if (lp->options & PCNET32_PORT_ASEL)
31328 val |= 2;
31329 - lp->a.write_bcr(ioaddr, 2, val);
31330 + lp->a->write_bcr(ioaddr, 2, val);
31331
31332 /* handle full duplex setting */
31333 if (lp->mii_if.full_duplex) {
31334 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
31335 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
31336 if (lp->options & PCNET32_PORT_FD) {
31337 val |= 1;
31338 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
31339 @@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_devic
31340 if (lp->chip_version == 0x2627)
31341 val |= 3;
31342 }
31343 - lp->a.write_bcr(ioaddr, 9, val);
31344 + lp->a->write_bcr(ioaddr, 9, val);
31345 }
31346
31347 /* set/reset GPSI bit in test register */
31348 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
31349 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
31350 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
31351 val |= 0x10;
31352 - lp->a.write_csr(ioaddr, 124, val);
31353 + lp->a->write_csr(ioaddr, 124, val);
31354
31355 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
31356 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
31357 @@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_devic
31358 * duplex, and/or enable auto negotiation, and clear DANAS
31359 */
31360 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
31361 - lp->a.write_bcr(ioaddr, 32,
31362 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
31363 + lp->a->write_bcr(ioaddr, 32,
31364 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
31365 /* disable Auto Negotiation, set 10Mpbs, HD */
31366 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
31367 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
31368 if (lp->options & PCNET32_PORT_FD)
31369 val |= 0x10;
31370 if (lp->options & PCNET32_PORT_100)
31371 val |= 0x08;
31372 - lp->a.write_bcr(ioaddr, 32, val);
31373 + lp->a->write_bcr(ioaddr, 32, val);
31374 } else {
31375 if (lp->options & PCNET32_PORT_ASEL) {
31376 - lp->a.write_bcr(ioaddr, 32,
31377 - lp->a.read_bcr(ioaddr,
31378 + lp->a->write_bcr(ioaddr, 32,
31379 + lp->a->read_bcr(ioaddr,
31380 32) | 0x0080);
31381 /* enable auto negotiate, setup, disable fd */
31382 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
31383 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
31384 val |= 0x20;
31385 - lp->a.write_bcr(ioaddr, 32, val);
31386 + lp->a->write_bcr(ioaddr, 32, val);
31387 }
31388 }
31389 } else {
31390 @@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_devic
31391 * There is really no good other way to handle multiple PHYs
31392 * other than turning off all automatics
31393 */
31394 - val = lp->a.read_bcr(ioaddr, 2);
31395 - lp->a.write_bcr(ioaddr, 2, val & ~2);
31396 - val = lp->a.read_bcr(ioaddr, 32);
31397 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
31398 + val = lp->a->read_bcr(ioaddr, 2);
31399 + lp->a->write_bcr(ioaddr, 2, val & ~2);
31400 + val = lp->a->read_bcr(ioaddr, 32);
31401 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
31402
31403 if (!(lp->options & PCNET32_PORT_ASEL)) {
31404 /* setup ecmd */
31405 @@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_devic
31406 ethtool_cmd_speed_set(&ecmd,
31407 (lp->options & PCNET32_PORT_100) ?
31408 SPEED_100 : SPEED_10);
31409 - bcr9 = lp->a.read_bcr(ioaddr, 9);
31410 + bcr9 = lp->a->read_bcr(ioaddr, 9);
31411
31412 if (lp->options & PCNET32_PORT_FD) {
31413 ecmd.duplex = DUPLEX_FULL;
31414 @@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_devic
31415 ecmd.duplex = DUPLEX_HALF;
31416 bcr9 |= ~(1 << 0);
31417 }
31418 - lp->a.write_bcr(ioaddr, 9, bcr9);
31419 + lp->a->write_bcr(ioaddr, 9, bcr9);
31420 }
31421
31422 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
31423 @@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_devic
31424
31425 #ifdef DO_DXSUFLO
31426 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
31427 - val = lp->a.read_csr(ioaddr, CSR3);
31428 + val = lp->a->read_csr(ioaddr, CSR3);
31429 val |= 0x40;
31430 - lp->a.write_csr(ioaddr, CSR3, val);
31431 + lp->a->write_csr(ioaddr, CSR3, val);
31432 }
31433 #endif
31434
31435 @@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_devic
31436 napi_enable(&lp->napi);
31437
31438 /* Re-initialize the PCNET32, and start it when done. */
31439 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
31440 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
31441 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
31442 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
31443
31444 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31445 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
31446 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31447 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
31448
31449 netif_start_queue(dev);
31450
31451 @@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_devic
31452
31453 i = 0;
31454 while (i++ < 100)
31455 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
31456 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
31457 break;
31458 /*
31459 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
31460 * reports that doing so triggers a bug in the '974.
31461 */
31462 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
31463 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
31464
31465 netif_printk(lp, ifup, KERN_DEBUG, dev,
31466 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
31467 i,
31468 (u32) (lp->init_dma_addr),
31469 - lp->a.read_csr(ioaddr, CSR0));
31470 + lp->a->read_csr(ioaddr, CSR0));
31471
31472 spin_unlock_irqrestore(&lp->lock, flags);
31473
31474 @@ -2218,7 +2218,7 @@ err_free_ring:
31475 * Switch back to 16bit mode to avoid problems with dumb
31476 * DOS packet driver after a warm reboot
31477 */
31478 - lp->a.write_bcr(ioaddr, 20, 4);
31479 + lp->a->write_bcr(ioaddr, 20, 4);
31480
31481 err_free_irq:
31482 spin_unlock_irqrestore(&lp->lock, flags);
31483 @@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_d
31484
31485 /* wait for stop */
31486 for (i = 0; i < 100; i++)
31487 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
31488 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
31489 break;
31490
31491 if (i >= 100)
31492 @@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_d
31493 return;
31494
31495 /* ReInit Ring */
31496 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
31497 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
31498 i = 0;
31499 while (i++ < 1000)
31500 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
31501 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
31502 break;
31503
31504 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
31505 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
31506 }
31507
31508 static void pcnet32_tx_timeout(struct net_device *dev)
31509 @@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct ne
31510 /* Transmitter timeout, serious problems. */
31511 if (pcnet32_debug & NETIF_MSG_DRV)
31512 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
31513 - dev->name, lp->a.read_csr(ioaddr, CSR0));
31514 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
31515 + dev->name, lp->a->read_csr(ioaddr, CSR0));
31516 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
31517 dev->stats.tx_errors++;
31518 if (netif_msg_tx_err(lp)) {
31519 int i;
31520 @@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(st
31521
31522 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
31523 "%s() called, csr0 %4.4x\n",
31524 - __func__, lp->a.read_csr(ioaddr, CSR0));
31525 + __func__, lp->a->read_csr(ioaddr, CSR0));
31526
31527 /* Default status -- will not enable Successful-TxDone
31528 * interrupt when that option is available to us.
31529 @@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(st
31530 dev->stats.tx_bytes += skb->len;
31531
31532 /* Trigger an immediate send poll. */
31533 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
31534 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
31535
31536 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
31537 lp->tx_full = 1;
31538 @@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id)
31539
31540 spin_lock(&lp->lock);
31541
31542 - csr0 = lp->a.read_csr(ioaddr, CSR0);
31543 + csr0 = lp->a->read_csr(ioaddr, CSR0);
31544 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
31545 if (csr0 == 0xffff)
31546 break; /* PCMCIA remove happened */
31547 /* Acknowledge all of the current interrupt sources ASAP. */
31548 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
31549 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
31550
31551 netif_printk(lp, intr, KERN_DEBUG, dev,
31552 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
31553 - csr0, lp->a.read_csr(ioaddr, CSR0));
31554 + csr0, lp->a->read_csr(ioaddr, CSR0));
31555
31556 /* Log misc errors. */
31557 if (csr0 & 0x4000)
31558 @@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id)
31559 if (napi_schedule_prep(&lp->napi)) {
31560 u16 val;
31561 /* set interrupt masks */
31562 - val = lp->a.read_csr(ioaddr, CSR3);
31563 + val = lp->a->read_csr(ioaddr, CSR3);
31564 val |= 0x5f00;
31565 - lp->a.write_csr(ioaddr, CSR3, val);
31566 + lp->a->write_csr(ioaddr, CSR3, val);
31567
31568 __napi_schedule(&lp->napi);
31569 break;
31570 }
31571 - csr0 = lp->a.read_csr(ioaddr, CSR0);
31572 + csr0 = lp->a->read_csr(ioaddr, CSR0);
31573 }
31574
31575 netif_printk(lp, intr, KERN_DEBUG, dev,
31576 "exiting interrupt, csr0=%#4.4x\n",
31577 - lp->a.read_csr(ioaddr, CSR0));
31578 + lp->a->read_csr(ioaddr, CSR0));
31579
31580 spin_unlock(&lp->lock);
31581
31582 @@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_devi
31583
31584 spin_lock_irqsave(&lp->lock, flags);
31585
31586 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
31587 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
31588
31589 netif_printk(lp, ifdown, KERN_DEBUG, dev,
31590 "Shutting down ethercard, status was %2.2x\n",
31591 - lp->a.read_csr(ioaddr, CSR0));
31592 + lp->a->read_csr(ioaddr, CSR0));
31593
31594 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
31595 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
31596 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
31597
31598 /*
31599 * Switch back to 16bit mode to avoid problems with dumb
31600 * DOS packet driver after a warm reboot
31601 */
31602 - lp->a.write_bcr(ioaddr, 20, 4);
31603 + lp->a->write_bcr(ioaddr, 20, 4);
31604
31605 spin_unlock_irqrestore(&lp->lock, flags);
31606
31607 @@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_
31608 unsigned long flags;
31609
31610 spin_lock_irqsave(&lp->lock, flags);
31611 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
31612 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
31613 spin_unlock_irqrestore(&lp->lock, flags);
31614
31615 return &dev->stats;
31616 @@ -2578,10 +2578,10 @@ static void pcnet32_load_multicast(struc
31617 if (dev->flags & IFF_ALLMULTI) {
31618 ib->filter[0] = cpu_to_le32(~0U);
31619 ib->filter[1] = cpu_to_le32(~0U);
31620 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
31621 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
31622 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
31623 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
31624 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
31625 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
31626 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
31627 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
31628 return;
31629 }
31630 /* clear the multicast filter */
31631 @@ -2601,7 +2601,7 @@ static void pcnet32_load_multicast(struc
31632 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
31633 }
31634 for (i = 0; i < 4; i++)
31635 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
31636 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
31637 le16_to_cpu(mcast_table[i]));
31638 }
31639
31640 @@ -2616,28 +2616,28 @@ static void pcnet32_set_multicast_list(s
31641
31642 spin_lock_irqsave(&lp->lock, flags);
31643 suspended = pcnet32_suspend(dev, &flags, 0);
31644 - csr15 = lp->a.read_csr(ioaddr, CSR15);
31645 + csr15 = lp->a->read_csr(ioaddr, CSR15);
31646 if (dev->flags & IFF_PROMISC) {
31647 /* Log any net taps. */
31648 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
31649 lp->init_block->mode =
31650 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
31651 7);
31652 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
31653 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
31654 } else {
31655 lp->init_block->mode =
31656 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
31657 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
31658 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
31659 pcnet32_load_multicast(dev);
31660 }
31661
31662 if (suspended) {
31663 int csr5;
31664 /* clear SUSPEND (SPND) - CSR5 bit 0 */
31665 - csr5 = lp->a.read_csr(ioaddr, CSR5);
31666 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
31667 + csr5 = lp->a->read_csr(ioaddr, CSR5);
31668 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
31669 } else {
31670 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
31671 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
31672 pcnet32_restart(dev, CSR0_NORMAL);
31673 netif_wake_queue(dev);
31674 }
31675 @@ -2655,8 +2655,8 @@ static int mdio_read(struct net_device *
31676 if (!lp->mii)
31677 return 0;
31678
31679 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
31680 - val_out = lp->a.read_bcr(ioaddr, 34);
31681 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
31682 + val_out = lp->a->read_bcr(ioaddr, 34);
31683
31684 return val_out;
31685 }
31686 @@ -2670,8 +2670,8 @@ static void mdio_write(struct net_device
31687 if (!lp->mii)
31688 return;
31689
31690 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
31691 - lp->a.write_bcr(ioaddr, 34, val);
31692 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
31693 + lp->a->write_bcr(ioaddr, 34, val);
31694 }
31695
31696 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
31697 @@ -2748,7 +2748,7 @@ static void pcnet32_check_media(struct n
31698 curr_link = mii_link_ok(&lp->mii_if);
31699 } else {
31700 ulong ioaddr = dev->base_addr; /* card base I/O address */
31701 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
31702 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
31703 }
31704 if (!curr_link) {
31705 if (prev_link || verbose) {
31706 @@ -2771,13 +2771,13 @@ static void pcnet32_check_media(struct n
31707 (ecmd.duplex == DUPLEX_FULL)
31708 ? "full" : "half");
31709 }
31710 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
31711 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
31712 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
31713 if (lp->mii_if.full_duplex)
31714 bcr9 |= (1 << 0);
31715 else
31716 bcr9 &= ~(1 << 0);
31717 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
31718 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
31719 }
31720 } else {
31721 netif_info(lp, link, dev, "link up\n");
31722 diff -urNp linux-3.0.7/drivers/net/ppp_generic.c linux-3.0.7/drivers/net/ppp_generic.c
31723 --- linux-3.0.7/drivers/net/ppp_generic.c 2011-07-21 22:17:23.000000000 -0400
31724 +++ linux-3.0.7/drivers/net/ppp_generic.c 2011-08-23 21:47:55.000000000 -0400
31725 @@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
31726 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
31727 struct ppp_stats stats;
31728 struct ppp_comp_stats cstats;
31729 - char *vers;
31730
31731 switch (cmd) {
31732 case SIOCGPPPSTATS:
31733 @@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
31734 break;
31735
31736 case SIOCGPPPVER:
31737 - vers = PPP_VERSION;
31738 - if (copy_to_user(addr, vers, strlen(vers) + 1))
31739 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
31740 break;
31741 err = 0;
31742 break;
31743 diff -urNp linux-3.0.7/drivers/net/r8169.c linux-3.0.7/drivers/net/r8169.c
31744 --- linux-3.0.7/drivers/net/r8169.c 2011-09-02 18:11:21.000000000 -0400
31745 +++ linux-3.0.7/drivers/net/r8169.c 2011-08-23 21:47:55.000000000 -0400
31746 @@ -645,12 +645,12 @@ struct rtl8169_private {
31747 struct mdio_ops {
31748 void (*write)(void __iomem *, int, int);
31749 int (*read)(void __iomem *, int);
31750 - } mdio_ops;
31751 + } __no_const mdio_ops;
31752
31753 struct pll_power_ops {
31754 void (*down)(struct rtl8169_private *);
31755 void (*up)(struct rtl8169_private *);
31756 - } pll_power_ops;
31757 + } __no_const pll_power_ops;
31758
31759 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
31760 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
31761 diff -urNp linux-3.0.7/drivers/net/sis190.c linux-3.0.7/drivers/net/sis190.c
31762 --- linux-3.0.7/drivers/net/sis190.c 2011-09-02 18:11:21.000000000 -0400
31763 +++ linux-3.0.7/drivers/net/sis190.c 2011-10-11 10:44:33.000000000 -0400
31764 @@ -1623,7 +1623,7 @@ static int __devinit sis190_get_mac_addr
31765 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
31766 struct net_device *dev)
31767 {
31768 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
31769 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
31770 struct sis190_private *tp = netdev_priv(dev);
31771 struct pci_dev *isa_bridge;
31772 u8 reg, tmp8;
31773 diff -urNp linux-3.0.7/drivers/net/sundance.c linux-3.0.7/drivers/net/sundance.c
31774 --- linux-3.0.7/drivers/net/sundance.c 2011-07-21 22:17:23.000000000 -0400
31775 +++ linux-3.0.7/drivers/net/sundance.c 2011-10-11 10:44:33.000000000 -0400
31776 @@ -218,7 +218,7 @@ enum {
31777 struct pci_id_info {
31778 const char *name;
31779 };
31780 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
31781 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
31782 {"D-Link DFE-550TX FAST Ethernet Adapter"},
31783 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
31784 {"D-Link DFE-580TX 4 port Server Adapter"},
31785 diff -urNp linux-3.0.7/drivers/net/tg3.h linux-3.0.7/drivers/net/tg3.h
31786 --- linux-3.0.7/drivers/net/tg3.h 2011-07-21 22:17:23.000000000 -0400
31787 +++ linux-3.0.7/drivers/net/tg3.h 2011-08-23 21:47:55.000000000 -0400
31788 @@ -134,6 +134,7 @@
31789 #define CHIPREV_ID_5750_A0 0x4000
31790 #define CHIPREV_ID_5750_A1 0x4001
31791 #define CHIPREV_ID_5750_A3 0x4003
31792 +#define CHIPREV_ID_5750_C1 0x4201
31793 #define CHIPREV_ID_5750_C2 0x4202
31794 #define CHIPREV_ID_5752_A0_HW 0x5000
31795 #define CHIPREV_ID_5752_A0 0x6000
31796 diff -urNp linux-3.0.7/drivers/net/tokenring/abyss.c linux-3.0.7/drivers/net/tokenring/abyss.c
31797 --- linux-3.0.7/drivers/net/tokenring/abyss.c 2011-07-21 22:17:23.000000000 -0400
31798 +++ linux-3.0.7/drivers/net/tokenring/abyss.c 2011-08-23 21:47:55.000000000 -0400
31799 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
31800
31801 static int __init abyss_init (void)
31802 {
31803 - abyss_netdev_ops = tms380tr_netdev_ops;
31804 + pax_open_kernel();
31805 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
31806
31807 - abyss_netdev_ops.ndo_open = abyss_open;
31808 - abyss_netdev_ops.ndo_stop = abyss_close;
31809 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
31810 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
31811 + pax_close_kernel();
31812
31813 return pci_register_driver(&abyss_driver);
31814 }
31815 diff -urNp linux-3.0.7/drivers/net/tokenring/madgemc.c linux-3.0.7/drivers/net/tokenring/madgemc.c
31816 --- linux-3.0.7/drivers/net/tokenring/madgemc.c 2011-07-21 22:17:23.000000000 -0400
31817 +++ linux-3.0.7/drivers/net/tokenring/madgemc.c 2011-08-23 21:47:55.000000000 -0400
31818 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver
31819
31820 static int __init madgemc_init (void)
31821 {
31822 - madgemc_netdev_ops = tms380tr_netdev_ops;
31823 - madgemc_netdev_ops.ndo_open = madgemc_open;
31824 - madgemc_netdev_ops.ndo_stop = madgemc_close;
31825 + pax_open_kernel();
31826 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
31827 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
31828 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
31829 + pax_close_kernel();
31830
31831 return mca_register_driver (&madgemc_driver);
31832 }
31833 diff -urNp linux-3.0.7/drivers/net/tokenring/proteon.c linux-3.0.7/drivers/net/tokenring/proteon.c
31834 --- linux-3.0.7/drivers/net/tokenring/proteon.c 2011-07-21 22:17:23.000000000 -0400
31835 +++ linux-3.0.7/drivers/net/tokenring/proteon.c 2011-08-23 21:47:55.000000000 -0400
31836 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
31837 struct platform_device *pdev;
31838 int i, num = 0, err = 0;
31839
31840 - proteon_netdev_ops = tms380tr_netdev_ops;
31841 - proteon_netdev_ops.ndo_open = proteon_open;
31842 - proteon_netdev_ops.ndo_stop = tms380tr_close;
31843 + pax_open_kernel();
31844 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
31845 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
31846 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
31847 + pax_close_kernel();
31848
31849 err = platform_driver_register(&proteon_driver);
31850 if (err)
31851 diff -urNp linux-3.0.7/drivers/net/tokenring/skisa.c linux-3.0.7/drivers/net/tokenring/skisa.c
31852 --- linux-3.0.7/drivers/net/tokenring/skisa.c 2011-07-21 22:17:23.000000000 -0400
31853 +++ linux-3.0.7/drivers/net/tokenring/skisa.c 2011-08-23 21:47:55.000000000 -0400
31854 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
31855 struct platform_device *pdev;
31856 int i, num = 0, err = 0;
31857
31858 - sk_isa_netdev_ops = tms380tr_netdev_ops;
31859 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
31860 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
31861 + pax_open_kernel();
31862 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
31863 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
31864 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
31865 + pax_close_kernel();
31866
31867 err = platform_driver_register(&sk_isa_driver);
31868 if (err)
31869 diff -urNp linux-3.0.7/drivers/net/tulip/de2104x.c linux-3.0.7/drivers/net/tulip/de2104x.c
31870 --- linux-3.0.7/drivers/net/tulip/de2104x.c 2011-07-21 22:17:23.000000000 -0400
31871 +++ linux-3.0.7/drivers/net/tulip/de2104x.c 2011-08-23 21:48:14.000000000 -0400
31872 @@ -1794,6 +1794,8 @@ static void __devinit de21041_get_srom_i
31873 struct de_srom_info_leaf *il;
31874 void *bufp;
31875
31876 + pax_track_stack();
31877 +
31878 /* download entire eeprom */
31879 for (i = 0; i < DE_EEPROM_WORDS; i++)
31880 ((__le16 *)ee_data)[i] =
31881 diff -urNp linux-3.0.7/drivers/net/tulip/de4x5.c linux-3.0.7/drivers/net/tulip/de4x5.c
31882 --- linux-3.0.7/drivers/net/tulip/de4x5.c 2011-07-21 22:17:23.000000000 -0400
31883 +++ linux-3.0.7/drivers/net/tulip/de4x5.c 2011-08-23 21:47:55.000000000 -0400
31884 @@ -5401,7 +5401,7 @@ de4x5_ioctl(struct net_device *dev, stru
31885 for (i=0; i<ETH_ALEN; i++) {
31886 tmp.addr[i] = dev->dev_addr[i];
31887 }
31888 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
31889 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
31890 break;
31891
31892 case DE4X5_SET_HWADDR: /* Set the hardware address */
31893 @@ -5441,7 +5441,7 @@ de4x5_ioctl(struct net_device *dev, stru
31894 spin_lock_irqsave(&lp->lock, flags);
31895 memcpy(&statbuf, &lp->pktStats, ioc->len);
31896 spin_unlock_irqrestore(&lp->lock, flags);
31897 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
31898 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
31899 return -EFAULT;
31900 break;
31901 }
31902 diff -urNp linux-3.0.7/drivers/net/tulip/eeprom.c linux-3.0.7/drivers/net/tulip/eeprom.c
31903 --- linux-3.0.7/drivers/net/tulip/eeprom.c 2011-07-21 22:17:23.000000000 -0400
31904 +++ linux-3.0.7/drivers/net/tulip/eeprom.c 2011-10-11 10:44:33.000000000 -0400
31905 @@ -81,7 +81,7 @@ static struct eeprom_fixup eeprom_fixups
31906 {NULL}};
31907
31908
31909 -static const char *block_name[] __devinitdata = {
31910 +static const char *block_name[] __devinitconst = {
31911 "21140 non-MII",
31912 "21140 MII PHY",
31913 "21142 Serial PHY",
31914 diff -urNp linux-3.0.7/drivers/net/tulip/winbond-840.c linux-3.0.7/drivers/net/tulip/winbond-840.c
31915 --- linux-3.0.7/drivers/net/tulip/winbond-840.c 2011-07-21 22:17:23.000000000 -0400
31916 +++ linux-3.0.7/drivers/net/tulip/winbond-840.c 2011-10-11 10:44:33.000000000 -0400
31917 @@ -236,7 +236,7 @@ struct pci_id_info {
31918 int drv_flags; /* Driver use, intended as capability flags. */
31919 };
31920
31921 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
31922 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
31923 { /* Sometime a Level-One switch card. */
31924 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
31925 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
31926 diff -urNp linux-3.0.7/drivers/net/usb/hso.c linux-3.0.7/drivers/net/usb/hso.c
31927 --- linux-3.0.7/drivers/net/usb/hso.c 2011-07-21 22:17:23.000000000 -0400
31928 +++ linux-3.0.7/drivers/net/usb/hso.c 2011-08-23 21:47:55.000000000 -0400
31929 @@ -71,7 +71,7 @@
31930 #include <asm/byteorder.h>
31931 #include <linux/serial_core.h>
31932 #include <linux/serial.h>
31933 -
31934 +#include <asm/local.h>
31935
31936 #define MOD_AUTHOR "Option Wireless"
31937 #define MOD_DESCRIPTION "USB High Speed Option driver"
31938 @@ -257,7 +257,7 @@ struct hso_serial {
31939
31940 /* from usb_serial_port */
31941 struct tty_struct *tty;
31942 - int open_count;
31943 + local_t open_count;
31944 spinlock_t serial_lock;
31945
31946 int (*write_data) (struct hso_serial *serial);
31947 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
31948 struct urb *urb;
31949
31950 urb = serial->rx_urb[0];
31951 - if (serial->open_count > 0) {
31952 + if (local_read(&serial->open_count) > 0) {
31953 count = put_rxbuf_data(urb, serial);
31954 if (count == -1)
31955 return;
31956 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
31957 DUMP1(urb->transfer_buffer, urb->actual_length);
31958
31959 /* Anyone listening? */
31960 - if (serial->open_count == 0)
31961 + if (local_read(&serial->open_count) == 0)
31962 return;
31963
31964 if (status == 0) {
31965 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
31966 spin_unlock_irq(&serial->serial_lock);
31967
31968 /* check for port already opened, if not set the termios */
31969 - serial->open_count++;
31970 - if (serial->open_count == 1) {
31971 + if (local_inc_return(&serial->open_count) == 1) {
31972 serial->rx_state = RX_IDLE;
31973 /* Force default termio settings */
31974 _hso_serial_set_termios(tty, NULL);
31975 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
31976 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
31977 if (result) {
31978 hso_stop_serial_device(serial->parent);
31979 - serial->open_count--;
31980 + local_dec(&serial->open_count);
31981 kref_put(&serial->parent->ref, hso_serial_ref_free);
31982 }
31983 } else {
31984 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
31985
31986 /* reset the rts and dtr */
31987 /* do the actual close */
31988 - serial->open_count--;
31989 + local_dec(&serial->open_count);
31990
31991 - if (serial->open_count <= 0) {
31992 - serial->open_count = 0;
31993 + if (local_read(&serial->open_count) <= 0) {
31994 + local_set(&serial->open_count, 0);
31995 spin_lock_irq(&serial->serial_lock);
31996 if (serial->tty == tty) {
31997 serial->tty->driver_data = NULL;
31998 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
31999
32000 /* the actual setup */
32001 spin_lock_irqsave(&serial->serial_lock, flags);
32002 - if (serial->open_count)
32003 + if (local_read(&serial->open_count))
32004 _hso_serial_set_termios(tty, old);
32005 else
32006 tty->termios = old;
32007 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
32008 D1("Pending read interrupt on port %d\n", i);
32009 spin_lock(&serial->serial_lock);
32010 if (serial->rx_state == RX_IDLE &&
32011 - serial->open_count > 0) {
32012 + local_read(&serial->open_count) > 0) {
32013 /* Setup and send a ctrl req read on
32014 * port i */
32015 if (!serial->rx_urb_filled[0]) {
32016 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interfa
32017 /* Start all serial ports */
32018 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
32019 if (serial_table[i] && (serial_table[i]->interface == iface)) {
32020 - if (dev2ser(serial_table[i])->open_count) {
32021 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
32022 result =
32023 hso_start_serial_device(serial_table[i], GFP_NOIO);
32024 hso_kick_transmit(dev2ser(serial_table[i]));
32025 diff -urNp linux-3.0.7/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-3.0.7/drivers/net/vmxnet3/vmxnet3_ethtool.c
32026 --- linux-3.0.7/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-07-21 22:17:23.000000000 -0400
32027 +++ linux-3.0.7/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-08-23 21:47:55.000000000 -0400
32028 @@ -594,8 +594,7 @@ vmxnet3_set_rss_indir(struct net_device
32029 * Return with error code if any of the queue indices
32030 * is out of range
32031 */
32032 - if (p->ring_index[i] < 0 ||
32033 - p->ring_index[i] >= adapter->num_rx_queues)
32034 + if (p->ring_index[i] >= adapter->num_rx_queues)
32035 return -EINVAL;
32036 }
32037
32038 diff -urNp linux-3.0.7/drivers/net/vxge/vxge-config.h linux-3.0.7/drivers/net/vxge/vxge-config.h
32039 --- linux-3.0.7/drivers/net/vxge/vxge-config.h 2011-07-21 22:17:23.000000000 -0400
32040 +++ linux-3.0.7/drivers/net/vxge/vxge-config.h 2011-08-23 21:47:55.000000000 -0400
32041 @@ -512,7 +512,7 @@ struct vxge_hw_uld_cbs {
32042 void (*link_down)(struct __vxge_hw_device *devh);
32043 void (*crit_err)(struct __vxge_hw_device *devh,
32044 enum vxge_hw_event type, u64 ext_data);
32045 -};
32046 +} __no_const;
32047
32048 /*
32049 * struct __vxge_hw_blockpool_entry - Block private data structure
32050 diff -urNp linux-3.0.7/drivers/net/vxge/vxge-main.c linux-3.0.7/drivers/net/vxge/vxge-main.c
32051 --- linux-3.0.7/drivers/net/vxge/vxge-main.c 2011-07-21 22:17:23.000000000 -0400
32052 +++ linux-3.0.7/drivers/net/vxge/vxge-main.c 2011-08-23 21:48:14.000000000 -0400
32053 @@ -98,6 +98,8 @@ static inline void VXGE_COMPLETE_VPATH_T
32054 struct sk_buff *completed[NR_SKB_COMPLETED];
32055 int more;
32056
32057 + pax_track_stack();
32058 +
32059 do {
32060 more = 0;
32061 skb_ptr = completed;
32062 @@ -1920,6 +1922,8 @@ static enum vxge_hw_status vxge_rth_conf
32063 u8 mtable[256] = {0}; /* CPU to vpath mapping */
32064 int index;
32065
32066 + pax_track_stack();
32067 +
32068 /*
32069 * Filling
32070 * - itable with bucket numbers
32071 diff -urNp linux-3.0.7/drivers/net/vxge/vxge-traffic.h linux-3.0.7/drivers/net/vxge/vxge-traffic.h
32072 --- linux-3.0.7/drivers/net/vxge/vxge-traffic.h 2011-07-21 22:17:23.000000000 -0400
32073 +++ linux-3.0.7/drivers/net/vxge/vxge-traffic.h 2011-08-23 21:47:55.000000000 -0400
32074 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
32075 struct vxge_hw_mempool_dma *dma_object,
32076 u32 index,
32077 u32 is_last);
32078 -};
32079 +} __no_const;
32080
32081 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
32082 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
32083 diff -urNp linux-3.0.7/drivers/net/wan/cycx_x25.c linux-3.0.7/drivers/net/wan/cycx_x25.c
32084 --- linux-3.0.7/drivers/net/wan/cycx_x25.c 2011-07-21 22:17:23.000000000 -0400
32085 +++ linux-3.0.7/drivers/net/wan/cycx_x25.c 2011-08-23 21:48:14.000000000 -0400
32086 @@ -1018,6 +1018,8 @@ static void hex_dump(char *msg, unsigned
32087 unsigned char hex[1024],
32088 * phex = hex;
32089
32090 + pax_track_stack();
32091 +
32092 if (len >= (sizeof(hex) / 2))
32093 len = (sizeof(hex) / 2) - 1;
32094
32095 diff -urNp linux-3.0.7/drivers/net/wan/hdlc_x25.c linux-3.0.7/drivers/net/wan/hdlc_x25.c
32096 --- linux-3.0.7/drivers/net/wan/hdlc_x25.c 2011-07-21 22:17:23.000000000 -0400
32097 +++ linux-3.0.7/drivers/net/wan/hdlc_x25.c 2011-08-23 21:47:55.000000000 -0400
32098 @@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
32099
32100 static int x25_open(struct net_device *dev)
32101 {
32102 - struct lapb_register_struct cb;
32103 + static struct lapb_register_struct cb = {
32104 + .connect_confirmation = x25_connected,
32105 + .connect_indication = x25_connected,
32106 + .disconnect_confirmation = x25_disconnected,
32107 + .disconnect_indication = x25_disconnected,
32108 + .data_indication = x25_data_indication,
32109 + .data_transmit = x25_data_transmit
32110 + };
32111 int result;
32112
32113 - cb.connect_confirmation = x25_connected;
32114 - cb.connect_indication = x25_connected;
32115 - cb.disconnect_confirmation = x25_disconnected;
32116 - cb.disconnect_indication = x25_disconnected;
32117 - cb.data_indication = x25_data_indication;
32118 - cb.data_transmit = x25_data_transmit;
32119 -
32120 result = lapb_register(dev, &cb);
32121 if (result != LAPB_OK)
32122 return result;
32123 diff -urNp linux-3.0.7/drivers/net/wimax/i2400m/usb-fw.c linux-3.0.7/drivers/net/wimax/i2400m/usb-fw.c
32124 --- linux-3.0.7/drivers/net/wimax/i2400m/usb-fw.c 2011-07-21 22:17:23.000000000 -0400
32125 +++ linux-3.0.7/drivers/net/wimax/i2400m/usb-fw.c 2011-08-23 21:48:14.000000000 -0400
32126 @@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
32127 int do_autopm = 1;
32128 DECLARE_COMPLETION_ONSTACK(notif_completion);
32129
32130 + pax_track_stack();
32131 +
32132 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
32133 i2400m, ack, ack_size);
32134 BUG_ON(_ack == i2400m->bm_ack_buf);
32135 diff -urNp linux-3.0.7/drivers/net/wireless/airo.c linux-3.0.7/drivers/net/wireless/airo.c
32136 --- linux-3.0.7/drivers/net/wireless/airo.c 2011-09-02 18:11:21.000000000 -0400
32137 +++ linux-3.0.7/drivers/net/wireless/airo.c 2011-08-23 21:48:14.000000000 -0400
32138 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
32139 BSSListElement * loop_net;
32140 BSSListElement * tmp_net;
32141
32142 + pax_track_stack();
32143 +
32144 /* Blow away current list of scan results */
32145 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
32146 list_move_tail (&loop_net->list, &ai->network_free_list);
32147 @@ -3794,6 +3796,8 @@ static u16 setup_card(struct airo_info *
32148 WepKeyRid wkr;
32149 int rc;
32150
32151 + pax_track_stack();
32152 +
32153 memset( &mySsid, 0, sizeof( mySsid ) );
32154 kfree (ai->flash);
32155 ai->flash = NULL;
32156 @@ -4753,6 +4757,8 @@ static int proc_stats_rid_open( struct i
32157 __le32 *vals = stats.vals;
32158 int len;
32159
32160 + pax_track_stack();
32161 +
32162 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32163 return -ENOMEM;
32164 data = file->private_data;
32165 @@ -5476,6 +5482,8 @@ static int proc_BSSList_open( struct ino
32166 /* If doLoseSync is not 1, we won't do a Lose Sync */
32167 int doLoseSync = -1;
32168
32169 + pax_track_stack();
32170 +
32171 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32172 return -ENOMEM;
32173 data = file->private_data;
32174 @@ -7181,6 +7189,8 @@ static int airo_get_aplist(struct net_de
32175 int i;
32176 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
32177
32178 + pax_track_stack();
32179 +
32180 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
32181 if (!qual)
32182 return -ENOMEM;
32183 @@ -7741,6 +7751,8 @@ static void airo_read_wireless_stats(str
32184 CapabilityRid cap_rid;
32185 __le32 *vals = stats_rid.vals;
32186
32187 + pax_track_stack();
32188 +
32189 /* Get stats out of the card */
32190 clear_bit(JOB_WSTATS, &local->jobs);
32191 if (local->power.event) {
32192 diff -urNp linux-3.0.7/drivers/net/wireless/ath/ath5k/debug.c linux-3.0.7/drivers/net/wireless/ath/ath5k/debug.c
32193 --- linux-3.0.7/drivers/net/wireless/ath/ath5k/debug.c 2011-07-21 22:17:23.000000000 -0400
32194 +++ linux-3.0.7/drivers/net/wireless/ath/ath5k/debug.c 2011-08-23 21:48:14.000000000 -0400
32195 @@ -204,6 +204,8 @@ static ssize_t read_file_beacon(struct f
32196 unsigned int v;
32197 u64 tsf;
32198
32199 + pax_track_stack();
32200 +
32201 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
32202 len += snprintf(buf+len, sizeof(buf)-len,
32203 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
32204 @@ -323,6 +325,8 @@ static ssize_t read_file_debug(struct fi
32205 unsigned int len = 0;
32206 unsigned int i;
32207
32208 + pax_track_stack();
32209 +
32210 len += snprintf(buf+len, sizeof(buf)-len,
32211 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
32212
32213 @@ -384,6 +388,8 @@ static ssize_t read_file_antenna(struct
32214 unsigned int i;
32215 unsigned int v;
32216
32217 + pax_track_stack();
32218 +
32219 len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
32220 sc->ah->ah_ant_mode);
32221 len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
32222 @@ -494,6 +500,8 @@ static ssize_t read_file_misc(struct fil
32223 unsigned int len = 0;
32224 u32 filt = ath5k_hw_get_rx_filter(sc->ah);
32225
32226 + pax_track_stack();
32227 +
32228 len += snprintf(buf+len, sizeof(buf)-len, "bssid-mask: %pM\n",
32229 sc->bssidmask);
32230 len += snprintf(buf+len, sizeof(buf)-len, "filter-flags: 0x%x ",
32231 @@ -550,6 +558,8 @@ static ssize_t read_file_frameerrors(str
32232 unsigned int len = 0;
32233 int i;
32234
32235 + pax_track_stack();
32236 +
32237 len += snprintf(buf+len, sizeof(buf)-len,
32238 "RX\n---------------------\n");
32239 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
32240 @@ -667,6 +677,8 @@ static ssize_t read_file_ani(struct file
32241 char buf[700];
32242 unsigned int len = 0;
32243
32244 + pax_track_stack();
32245 +
32246 len += snprintf(buf+len, sizeof(buf)-len,
32247 "HW has PHY error counters:\t%s\n",
32248 sc->ah->ah_capabilities.cap_has_phyerr_counters ?
32249 @@ -827,6 +839,8 @@ static ssize_t read_file_queue(struct fi
32250 struct ath5k_buf *bf, *bf0;
32251 int i, n;
32252
32253 + pax_track_stack();
32254 +
32255 len += snprintf(buf+len, sizeof(buf)-len,
32256 "available txbuffers: %d\n", sc->txbuf_len);
32257
32258 diff -urNp linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_calib.c
32259 --- linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-07-21 22:17:23.000000000 -0400
32260 +++ linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-08-23 21:48:14.000000000 -0400
32261 @@ -757,6 +757,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
32262 int i, im, j;
32263 int nmeasurement;
32264
32265 + pax_track_stack();
32266 +
32267 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
32268 if (ah->txchainmask & (1 << i))
32269 num_chains++;
32270 diff -urNp linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
32271 --- linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-07-21 22:17:23.000000000 -0400
32272 +++ linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-08-23 21:48:14.000000000 -0400
32273 @@ -356,6 +356,8 @@ static bool create_pa_curve(u32 *data_L,
32274 int theta_low_bin = 0;
32275 int i;
32276
32277 + pax_track_stack();
32278 +
32279 /* disregard any bin that contains <= 16 samples */
32280 thresh_accum_cnt = 16;
32281 scale_factor = 5;
32282 diff -urNp linux-3.0.7/drivers/net/wireless/ath/ath9k/debug.c linux-3.0.7/drivers/net/wireless/ath/ath9k/debug.c
32283 --- linux-3.0.7/drivers/net/wireless/ath/ath9k/debug.c 2011-07-21 22:17:23.000000000 -0400
32284 +++ linux-3.0.7/drivers/net/wireless/ath/ath9k/debug.c 2011-08-23 21:48:14.000000000 -0400
32285 @@ -337,6 +337,8 @@ static ssize_t read_file_interrupt(struc
32286 char buf[512];
32287 unsigned int len = 0;
32288
32289 + pax_track_stack();
32290 +
32291 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
32292 len += snprintf(buf + len, sizeof(buf) - len,
32293 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
32294 @@ -427,6 +429,8 @@ static ssize_t read_file_wiphy(struct fi
32295 u8 addr[ETH_ALEN];
32296 u32 tmp;
32297
32298 + pax_track_stack();
32299 +
32300 len += snprintf(buf + len, sizeof(buf) - len,
32301 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
32302 wiphy_name(sc->hw->wiphy),
32303 diff -urNp linux-3.0.7/drivers/net/wireless/ath/ath9k/htc_drv_debug.c linux-3.0.7/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
32304 --- linux-3.0.7/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-07-21 22:17:23.000000000 -0400
32305 +++ linux-3.0.7/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-08-23 21:48:14.000000000 -0400
32306 @@ -31,6 +31,8 @@ static ssize_t read_file_tgt_int_stats(s
32307 unsigned int len = 0;
32308 int ret = 0;
32309
32310 + pax_track_stack();
32311 +
32312 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
32313
32314 ath9k_htc_ps_wakeup(priv);
32315 @@ -89,6 +91,8 @@ static ssize_t read_file_tgt_tx_stats(st
32316 unsigned int len = 0;
32317 int ret = 0;
32318
32319 + pax_track_stack();
32320 +
32321 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
32322
32323 ath9k_htc_ps_wakeup(priv);
32324 @@ -159,6 +163,8 @@ static ssize_t read_file_tgt_rx_stats(st
32325 unsigned int len = 0;
32326 int ret = 0;
32327
32328 + pax_track_stack();
32329 +
32330 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
32331
32332 ath9k_htc_ps_wakeup(priv);
32333 @@ -203,6 +209,8 @@ static ssize_t read_file_xmit(struct fil
32334 char buf[512];
32335 unsigned int len = 0;
32336
32337 + pax_track_stack();
32338 +
32339 len += snprintf(buf + len, sizeof(buf) - len,
32340 "%20s : %10u\n", "Buffers queued",
32341 priv->debug.tx_stats.buf_queued);
32342 @@ -376,6 +384,8 @@ static ssize_t read_file_slot(struct fil
32343 char buf[512];
32344 unsigned int len = 0;
32345
32346 + pax_track_stack();
32347 +
32348 spin_lock_bh(&priv->tx.tx_lock);
32349
32350 len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
32351 @@ -411,6 +421,8 @@ static ssize_t read_file_queue(struct fi
32352 char buf[512];
32353 unsigned int len = 0;
32354
32355 + pax_track_stack();
32356 +
32357 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
32358 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
32359
32360 diff -urNp linux-3.0.7/drivers/net/wireless/ath/ath9k/hw.h linux-3.0.7/drivers/net/wireless/ath/ath9k/hw.h
32361 --- linux-3.0.7/drivers/net/wireless/ath/ath9k/hw.h 2011-09-02 18:11:21.000000000 -0400
32362 +++ linux-3.0.7/drivers/net/wireless/ath/ath9k/hw.h 2011-08-23 21:47:55.000000000 -0400
32363 @@ -585,7 +585,7 @@ struct ath_hw_private_ops {
32364
32365 /* ANI */
32366 void (*ani_cache_ini_regs)(struct ath_hw *ah);
32367 -};
32368 +} __no_const;
32369
32370 /**
32371 * struct ath_hw_ops - callbacks used by hardware code and driver code
32372 @@ -637,7 +637,7 @@ struct ath_hw_ops {
32373 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
32374 struct ath_hw_antcomb_conf *antconf);
32375
32376 -};
32377 +} __no_const;
32378
32379 struct ath_nf_limits {
32380 s16 max;
32381 @@ -650,7 +650,7 @@ struct ath_nf_limits {
32382 #define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
32383
32384 struct ath_hw {
32385 - struct ath_ops reg_ops;
32386 + ath_ops_no_const reg_ops;
32387
32388 struct ieee80211_hw *hw;
32389 struct ath_common common;
32390 diff -urNp linux-3.0.7/drivers/net/wireless/ath/ath.h linux-3.0.7/drivers/net/wireless/ath/ath.h
32391 --- linux-3.0.7/drivers/net/wireless/ath/ath.h 2011-07-21 22:17:23.000000000 -0400
32392 +++ linux-3.0.7/drivers/net/wireless/ath/ath.h 2011-08-23 21:47:55.000000000 -0400
32393 @@ -121,6 +121,7 @@ struct ath_ops {
32394 void (*write_flush) (void *);
32395 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
32396 };
32397 +typedef struct ath_ops __no_const ath_ops_no_const;
32398
32399 struct ath_common;
32400 struct ath_bus_ops;
32401 diff -urNp linux-3.0.7/drivers/net/wireless/ipw2x00/ipw2100.c linux-3.0.7/drivers/net/wireless/ipw2x00/ipw2100.c
32402 --- linux-3.0.7/drivers/net/wireless/ipw2x00/ipw2100.c 2011-07-21 22:17:23.000000000 -0400
32403 +++ linux-3.0.7/drivers/net/wireless/ipw2x00/ipw2100.c 2011-08-23 21:48:14.000000000 -0400
32404 @@ -2100,6 +2100,8 @@ static int ipw2100_set_essid(struct ipw2
32405 int err;
32406 DECLARE_SSID_BUF(ssid);
32407
32408 + pax_track_stack();
32409 +
32410 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
32411
32412 if (ssid_len)
32413 @@ -5449,6 +5451,8 @@ static int ipw2100_set_key(struct ipw210
32414 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
32415 int err;
32416
32417 + pax_track_stack();
32418 +
32419 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
32420 idx, keylen, len);
32421
32422 diff -urNp linux-3.0.7/drivers/net/wireless/ipw2x00/libipw_rx.c linux-3.0.7/drivers/net/wireless/ipw2x00/libipw_rx.c
32423 --- linux-3.0.7/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-07-21 22:17:23.000000000 -0400
32424 +++ linux-3.0.7/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-08-23 21:48:14.000000000 -0400
32425 @@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
32426 unsigned long flags;
32427 DECLARE_SSID_BUF(ssid);
32428
32429 + pax_track_stack();
32430 +
32431 LIBIPW_DEBUG_SCAN("'%s' (%pM"
32432 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
32433 print_ssid(ssid, info_element->data, info_element->len),
32434 diff -urNp linux-3.0.7/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-3.0.7/drivers/net/wireless/iwlegacy/iwl3945-base.c
32435 --- linux-3.0.7/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-10-16 21:54:54.000000000 -0400
32436 +++ linux-3.0.7/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-10-16 21:55:27.000000000 -0400
32437 @@ -3962,7 +3962,9 @@ static int iwl3945_pci_probe(struct pci_
32438 */
32439 if (iwl3945_mod_params.disable_hw_scan) {
32440 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
32441 - iwl3945_hw_ops.hw_scan = NULL;
32442 + pax_open_kernel();
32443 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
32444 + pax_close_kernel();
32445 }
32446
32447 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
32448 diff -urNp linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
32449 --- linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-07-21 22:17:23.000000000 -0400
32450 +++ linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-08-23 21:48:14.000000000 -0400
32451 @@ -910,6 +910,8 @@ static void rs_tx_status(void *priv_r, s
32452 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
32453 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
32454
32455 + pax_track_stack();
32456 +
32457 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
32458
32459 /* Treat uninitialized rate scaling data same as non-existing. */
32460 @@ -2918,6 +2920,8 @@ static void rs_fill_link_cmd(struct iwl_
32461 container_of(lq_sta, struct iwl_station_priv, lq_sta);
32462 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
32463
32464 + pax_track_stack();
32465 +
32466 /* Override starting rate (index 0) if needed for debug purposes */
32467 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
32468
32469 diff -urNp linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debugfs.c
32470 --- linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-07-21 22:17:23.000000000 -0400
32471 +++ linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-08-23 21:48:14.000000000 -0400
32472 @@ -548,6 +548,8 @@ static ssize_t iwl_dbgfs_status_read(str
32473 int pos = 0;
32474 const size_t bufsz = sizeof(buf);
32475
32476 + pax_track_stack();
32477 +
32478 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
32479 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
32480 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
32481 @@ -680,6 +682,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
32482 char buf[256 * NUM_IWL_RXON_CTX];
32483 const size_t bufsz = sizeof(buf);
32484
32485 + pax_track_stack();
32486 +
32487 for_each_context(priv, ctx) {
32488 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
32489 ctx->ctxid);
32490 diff -urNp linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debug.h linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debug.h
32491 --- linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-07-21 22:17:23.000000000 -0400
32492 +++ linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-08-23 21:47:55.000000000 -0400
32493 @@ -68,8 +68,8 @@ do {
32494 } while (0)
32495
32496 #else
32497 -#define IWL_DEBUG(__priv, level, fmt, args...)
32498 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
32499 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
32500 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
32501 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
32502 const void *p, u32 len)
32503 {}
32504 diff -urNp linux-3.0.7/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-3.0.7/drivers/net/wireless/iwmc3200wifi/debugfs.c
32505 --- linux-3.0.7/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-07-21 22:17:23.000000000 -0400
32506 +++ linux-3.0.7/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-08-23 21:48:14.000000000 -0400
32507 @@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
32508 int buf_len = 512;
32509 size_t len = 0;
32510
32511 + pax_track_stack();
32512 +
32513 if (*ppos != 0)
32514 return 0;
32515 if (count < sizeof(buf))
32516 diff -urNp linux-3.0.7/drivers/net/wireless/mac80211_hwsim.c linux-3.0.7/drivers/net/wireless/mac80211_hwsim.c
32517 --- linux-3.0.7/drivers/net/wireless/mac80211_hwsim.c 2011-07-21 22:17:23.000000000 -0400
32518 +++ linux-3.0.7/drivers/net/wireless/mac80211_hwsim.c 2011-08-23 21:47:55.000000000 -0400
32519 @@ -1260,9 +1260,11 @@ static int __init init_mac80211_hwsim(vo
32520 return -EINVAL;
32521
32522 if (fake_hw_scan) {
32523 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
32524 - mac80211_hwsim_ops.sw_scan_start = NULL;
32525 - mac80211_hwsim_ops.sw_scan_complete = NULL;
32526 + pax_open_kernel();
32527 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
32528 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
32529 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
32530 + pax_close_kernel();
32531 }
32532
32533 spin_lock_init(&hwsim_radio_lock);
32534 diff -urNp linux-3.0.7/drivers/net/wireless/rndis_wlan.c linux-3.0.7/drivers/net/wireless/rndis_wlan.c
32535 --- linux-3.0.7/drivers/net/wireless/rndis_wlan.c 2011-07-21 22:17:23.000000000 -0400
32536 +++ linux-3.0.7/drivers/net/wireless/rndis_wlan.c 2011-08-23 21:47:55.000000000 -0400
32537 @@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
32538
32539 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
32540
32541 - if (rts_threshold < 0 || rts_threshold > 2347)
32542 + if (rts_threshold > 2347)
32543 rts_threshold = 2347;
32544
32545 tmp = cpu_to_le32(rts_threshold);
32546 diff -urNp linux-3.0.7/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-3.0.7/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
32547 --- linux-3.0.7/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-07-21 22:17:23.000000000 -0400
32548 +++ linux-3.0.7/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-08-23 21:48:14.000000000 -0400
32549 @@ -837,6 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(st
32550 u8 rfpath;
32551 u8 num_total_rfpath = rtlphy->num_total_rfpath;
32552
32553 + pax_track_stack();
32554 +
32555 precommoncmdcnt = 0;
32556 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
32557 MAX_PRECMD_CNT,
32558 diff -urNp linux-3.0.7/drivers/net/wireless/wl1251/wl1251.h linux-3.0.7/drivers/net/wireless/wl1251/wl1251.h
32559 --- linux-3.0.7/drivers/net/wireless/wl1251/wl1251.h 2011-07-21 22:17:23.000000000 -0400
32560 +++ linux-3.0.7/drivers/net/wireless/wl1251/wl1251.h 2011-08-23 21:47:55.000000000 -0400
32561 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
32562 void (*reset)(struct wl1251 *wl);
32563 void (*enable_irq)(struct wl1251 *wl);
32564 void (*disable_irq)(struct wl1251 *wl);
32565 -};
32566 +} __no_const;
32567
32568 struct wl1251 {
32569 struct ieee80211_hw *hw;
32570 diff -urNp linux-3.0.7/drivers/net/wireless/wl12xx/spi.c linux-3.0.7/drivers/net/wireless/wl12xx/spi.c
32571 --- linux-3.0.7/drivers/net/wireless/wl12xx/spi.c 2011-07-21 22:17:23.000000000 -0400
32572 +++ linux-3.0.7/drivers/net/wireless/wl12xx/spi.c 2011-08-23 21:48:14.000000000 -0400
32573 @@ -280,6 +280,8 @@ static void wl1271_spi_raw_write(struct
32574 u32 chunk_len;
32575 int i;
32576
32577 + pax_track_stack();
32578 +
32579 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
32580
32581 spi_message_init(&m);
32582 diff -urNp linux-3.0.7/drivers/oprofile/buffer_sync.c linux-3.0.7/drivers/oprofile/buffer_sync.c
32583 --- linux-3.0.7/drivers/oprofile/buffer_sync.c 2011-07-21 22:17:23.000000000 -0400
32584 +++ linux-3.0.7/drivers/oprofile/buffer_sync.c 2011-08-23 21:47:55.000000000 -0400
32585 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
32586 if (cookie == NO_COOKIE)
32587 offset = pc;
32588 if (cookie == INVALID_COOKIE) {
32589 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
32590 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
32591 offset = pc;
32592 }
32593 if (cookie != last_cookie) {
32594 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
32595 /* add userspace sample */
32596
32597 if (!mm) {
32598 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
32599 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
32600 return 0;
32601 }
32602
32603 cookie = lookup_dcookie(mm, s->eip, &offset);
32604
32605 if (cookie == INVALID_COOKIE) {
32606 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
32607 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
32608 return 0;
32609 }
32610
32611 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
32612 /* ignore backtraces if failed to add a sample */
32613 if (state == sb_bt_start) {
32614 state = sb_bt_ignore;
32615 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
32616 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
32617 }
32618 }
32619 release_mm(mm);
32620 diff -urNp linux-3.0.7/drivers/oprofile/event_buffer.c linux-3.0.7/drivers/oprofile/event_buffer.c
32621 --- linux-3.0.7/drivers/oprofile/event_buffer.c 2011-07-21 22:17:23.000000000 -0400
32622 +++ linux-3.0.7/drivers/oprofile/event_buffer.c 2011-08-23 21:47:55.000000000 -0400
32623 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
32624 }
32625
32626 if (buffer_pos == buffer_size) {
32627 - atomic_inc(&oprofile_stats.event_lost_overflow);
32628 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
32629 return;
32630 }
32631
32632 diff -urNp linux-3.0.7/drivers/oprofile/oprof.c linux-3.0.7/drivers/oprofile/oprof.c
32633 --- linux-3.0.7/drivers/oprofile/oprof.c 2011-07-21 22:17:23.000000000 -0400
32634 +++ linux-3.0.7/drivers/oprofile/oprof.c 2011-08-23 21:47:55.000000000 -0400
32635 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st
32636 if (oprofile_ops.switch_events())
32637 return;
32638
32639 - atomic_inc(&oprofile_stats.multiplex_counter);
32640 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
32641 start_switch_worker();
32642 }
32643
32644 diff -urNp linux-3.0.7/drivers/oprofile/oprofilefs.c linux-3.0.7/drivers/oprofile/oprofilefs.c
32645 --- linux-3.0.7/drivers/oprofile/oprofilefs.c 2011-07-21 22:17:23.000000000 -0400
32646 +++ linux-3.0.7/drivers/oprofile/oprofilefs.c 2011-08-23 21:47:55.000000000 -0400
32647 @@ -186,7 +186,7 @@ static const struct file_operations atom
32648
32649
32650 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
32651 - char const *name, atomic_t *val)
32652 + char const *name, atomic_unchecked_t *val)
32653 {
32654 return __oprofilefs_create_file(sb, root, name,
32655 &atomic_ro_fops, 0444, val);
32656 diff -urNp linux-3.0.7/drivers/oprofile/oprofile_stats.c linux-3.0.7/drivers/oprofile/oprofile_stats.c
32657 --- linux-3.0.7/drivers/oprofile/oprofile_stats.c 2011-07-21 22:17:23.000000000 -0400
32658 +++ linux-3.0.7/drivers/oprofile/oprofile_stats.c 2011-08-23 21:47:55.000000000 -0400
32659 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
32660 cpu_buf->sample_invalid_eip = 0;
32661 }
32662
32663 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
32664 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
32665 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
32666 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
32667 - atomic_set(&oprofile_stats.multiplex_counter, 0);
32668 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
32669 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
32670 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
32671 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
32672 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
32673 }
32674
32675
32676 diff -urNp linux-3.0.7/drivers/oprofile/oprofile_stats.h linux-3.0.7/drivers/oprofile/oprofile_stats.h
32677 --- linux-3.0.7/drivers/oprofile/oprofile_stats.h 2011-07-21 22:17:23.000000000 -0400
32678 +++ linux-3.0.7/drivers/oprofile/oprofile_stats.h 2011-08-23 21:47:55.000000000 -0400
32679 @@ -13,11 +13,11 @@
32680 #include <asm/atomic.h>
32681
32682 struct oprofile_stat_struct {
32683 - atomic_t sample_lost_no_mm;
32684 - atomic_t sample_lost_no_mapping;
32685 - atomic_t bt_lost_no_mapping;
32686 - atomic_t event_lost_overflow;
32687 - atomic_t multiplex_counter;
32688 + atomic_unchecked_t sample_lost_no_mm;
32689 + atomic_unchecked_t sample_lost_no_mapping;
32690 + atomic_unchecked_t bt_lost_no_mapping;
32691 + atomic_unchecked_t event_lost_overflow;
32692 + atomic_unchecked_t multiplex_counter;
32693 };
32694
32695 extern struct oprofile_stat_struct oprofile_stats;
32696 diff -urNp linux-3.0.7/drivers/parport/procfs.c linux-3.0.7/drivers/parport/procfs.c
32697 --- linux-3.0.7/drivers/parport/procfs.c 2011-07-21 22:17:23.000000000 -0400
32698 +++ linux-3.0.7/drivers/parport/procfs.c 2011-08-23 21:47:55.000000000 -0400
32699 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
32700
32701 *ppos += len;
32702
32703 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
32704 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
32705 }
32706
32707 #ifdef CONFIG_PARPORT_1284
32708 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
32709
32710 *ppos += len;
32711
32712 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
32713 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
32714 }
32715 #endif /* IEEE1284.3 support. */
32716
32717 diff -urNp linux-3.0.7/drivers/pci/hotplug/cpci_hotplug.h linux-3.0.7/drivers/pci/hotplug/cpci_hotplug.h
32718 --- linux-3.0.7/drivers/pci/hotplug/cpci_hotplug.h 2011-07-21 22:17:23.000000000 -0400
32719 +++ linux-3.0.7/drivers/pci/hotplug/cpci_hotplug.h 2011-08-23 21:47:55.000000000 -0400
32720 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
32721 int (*hardware_test) (struct slot* slot, u32 value);
32722 u8 (*get_power) (struct slot* slot);
32723 int (*set_power) (struct slot* slot, int value);
32724 -};
32725 +} __no_const;
32726
32727 struct cpci_hp_controller {
32728 unsigned int irq;
32729 diff -urNp linux-3.0.7/drivers/pci/hotplug/cpqphp_nvram.c linux-3.0.7/drivers/pci/hotplug/cpqphp_nvram.c
32730 --- linux-3.0.7/drivers/pci/hotplug/cpqphp_nvram.c 2011-07-21 22:17:23.000000000 -0400
32731 +++ linux-3.0.7/drivers/pci/hotplug/cpqphp_nvram.c 2011-08-23 21:47:55.000000000 -0400
32732 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
32733
32734 void compaq_nvram_init (void __iomem *rom_start)
32735 {
32736 +
32737 +#ifndef CONFIG_PAX_KERNEXEC
32738 if (rom_start) {
32739 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
32740 }
32741 +#endif
32742 +
32743 dbg("int15 entry = %p\n", compaq_int15_entry_point);
32744
32745 /* initialize our int15 lock */
32746 diff -urNp linux-3.0.7/drivers/pci/pcie/aspm.c linux-3.0.7/drivers/pci/pcie/aspm.c
32747 --- linux-3.0.7/drivers/pci/pcie/aspm.c 2011-07-21 22:17:23.000000000 -0400
32748 +++ linux-3.0.7/drivers/pci/pcie/aspm.c 2011-08-23 21:47:55.000000000 -0400
32749 @@ -27,9 +27,9 @@
32750 #define MODULE_PARAM_PREFIX "pcie_aspm."
32751
32752 /* Note: those are not register definitions */
32753 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
32754 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
32755 -#define ASPM_STATE_L1 (4) /* L1 state */
32756 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
32757 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
32758 +#define ASPM_STATE_L1 (4U) /* L1 state */
32759 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
32760 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
32761
32762 diff -urNp linux-3.0.7/drivers/pci/probe.c linux-3.0.7/drivers/pci/probe.c
32763 --- linux-3.0.7/drivers/pci/probe.c 2011-07-21 22:17:23.000000000 -0400
32764 +++ linux-3.0.7/drivers/pci/probe.c 2011-08-23 21:47:55.000000000 -0400
32765 @@ -129,7 +129,7 @@ int __pci_read_base(struct pci_dev *dev,
32766 u32 l, sz, mask;
32767 u16 orig_cmd;
32768
32769 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
32770 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
32771
32772 if (!dev->mmio_always_on) {
32773 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
32774 diff -urNp linux-3.0.7/drivers/pci/proc.c linux-3.0.7/drivers/pci/proc.c
32775 --- linux-3.0.7/drivers/pci/proc.c 2011-07-21 22:17:23.000000000 -0400
32776 +++ linux-3.0.7/drivers/pci/proc.c 2011-08-23 21:48:14.000000000 -0400
32777 @@ -476,7 +476,16 @@ static const struct file_operations proc
32778 static int __init pci_proc_init(void)
32779 {
32780 struct pci_dev *dev = NULL;
32781 +
32782 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
32783 +#ifdef CONFIG_GRKERNSEC_PROC_USER
32784 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
32785 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
32786 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
32787 +#endif
32788 +#else
32789 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
32790 +#endif
32791 proc_create("devices", 0, proc_bus_pci_dir,
32792 &proc_bus_pci_dev_operations);
32793 proc_initialized = 1;
32794 diff -urNp linux-3.0.7/drivers/pci/xen-pcifront.c linux-3.0.7/drivers/pci/xen-pcifront.c
32795 --- linux-3.0.7/drivers/pci/xen-pcifront.c 2011-07-21 22:17:23.000000000 -0400
32796 +++ linux-3.0.7/drivers/pci/xen-pcifront.c 2011-08-23 21:48:14.000000000 -0400
32797 @@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
32798 struct pcifront_sd *sd = bus->sysdata;
32799 struct pcifront_device *pdev = pcifront_get_pdev(sd);
32800
32801 + pax_track_stack();
32802 +
32803 if (verbose_request)
32804 dev_info(&pdev->xdev->dev,
32805 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
32806 @@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
32807 struct pcifront_sd *sd = bus->sysdata;
32808 struct pcifront_device *pdev = pcifront_get_pdev(sd);
32809
32810 + pax_track_stack();
32811 +
32812 if (verbose_request)
32813 dev_info(&pdev->xdev->dev,
32814 "write dev=%04x:%02x:%02x.%01x - "
32815 @@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
32816 struct pcifront_device *pdev = pcifront_get_pdev(sd);
32817 struct msi_desc *entry;
32818
32819 + pax_track_stack();
32820 +
32821 if (nvec > SH_INFO_MAX_VEC) {
32822 dev_err(&dev->dev, "too much vector for pci frontend: %x."
32823 " Increase SH_INFO_MAX_VEC.\n", nvec);
32824 @@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
32825 struct pcifront_sd *sd = dev->bus->sysdata;
32826 struct pcifront_device *pdev = pcifront_get_pdev(sd);
32827
32828 + pax_track_stack();
32829 +
32830 err = do_pci_op(pdev, &op);
32831
32832 /* What should do for error ? */
32833 @@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
32834 struct pcifront_sd *sd = dev->bus->sysdata;
32835 struct pcifront_device *pdev = pcifront_get_pdev(sd);
32836
32837 + pax_track_stack();
32838 +
32839 err = do_pci_op(pdev, &op);
32840 if (likely(!err)) {
32841 vector[0] = op.value;
32842 diff -urNp linux-3.0.7/drivers/platform/x86/thinkpad_acpi.c linux-3.0.7/drivers/platform/x86/thinkpad_acpi.c
32843 --- linux-3.0.7/drivers/platform/x86/thinkpad_acpi.c 2011-07-21 22:17:23.000000000 -0400
32844 +++ linux-3.0.7/drivers/platform/x86/thinkpad_acpi.c 2011-08-23 21:47:55.000000000 -0400
32845 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
32846 return 0;
32847 }
32848
32849 -void static hotkey_mask_warn_incomplete_mask(void)
32850 +static void hotkey_mask_warn_incomplete_mask(void)
32851 {
32852 /* log only what the user can fix... */
32853 const u32 wantedmask = hotkey_driver_mask &
32854 diff -urNp linux-3.0.7/drivers/pnp/pnpbios/bioscalls.c linux-3.0.7/drivers/pnp/pnpbios/bioscalls.c
32855 --- linux-3.0.7/drivers/pnp/pnpbios/bioscalls.c 2011-07-21 22:17:23.000000000 -0400
32856 +++ linux-3.0.7/drivers/pnp/pnpbios/bioscalls.c 2011-08-23 21:47:55.000000000 -0400
32857 @@ -59,7 +59,7 @@ do { \
32858 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
32859 } while(0)
32860
32861 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
32862 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
32863 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
32864
32865 /*
32866 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
32867
32868 cpu = get_cpu();
32869 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
32870 +
32871 + pax_open_kernel();
32872 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
32873 + pax_close_kernel();
32874
32875 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
32876 spin_lock_irqsave(&pnp_bios_lock, flags);
32877 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
32878 :"memory");
32879 spin_unlock_irqrestore(&pnp_bios_lock, flags);
32880
32881 + pax_open_kernel();
32882 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
32883 + pax_close_kernel();
32884 +
32885 put_cpu();
32886
32887 /* If we get here and this is set then the PnP BIOS faulted on us. */
32888 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
32889 return status;
32890 }
32891
32892 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
32893 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
32894 {
32895 int i;
32896
32897 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
32898 pnp_bios_callpoint.offset = header->fields.pm16offset;
32899 pnp_bios_callpoint.segment = PNP_CS16;
32900
32901 + pax_open_kernel();
32902 +
32903 for_each_possible_cpu(i) {
32904 struct desc_struct *gdt = get_cpu_gdt_table(i);
32905 if (!gdt)
32906 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
32907 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
32908 (unsigned long)__va(header->fields.pm16dseg));
32909 }
32910 +
32911 + pax_close_kernel();
32912 }
32913 diff -urNp linux-3.0.7/drivers/pnp/resource.c linux-3.0.7/drivers/pnp/resource.c
32914 --- linux-3.0.7/drivers/pnp/resource.c 2011-07-21 22:17:23.000000000 -0400
32915 +++ linux-3.0.7/drivers/pnp/resource.c 2011-08-23 21:47:55.000000000 -0400
32916 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
32917 return 1;
32918
32919 /* check if the resource is valid */
32920 - if (*irq < 0 || *irq > 15)
32921 + if (*irq > 15)
32922 return 0;
32923
32924 /* check if the resource is reserved */
32925 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
32926 return 1;
32927
32928 /* check if the resource is valid */
32929 - if (*dma < 0 || *dma == 4 || *dma > 7)
32930 + if (*dma == 4 || *dma > 7)
32931 return 0;
32932
32933 /* check if the resource is reserved */
32934 diff -urNp linux-3.0.7/drivers/power/bq27x00_battery.c linux-3.0.7/drivers/power/bq27x00_battery.c
32935 --- linux-3.0.7/drivers/power/bq27x00_battery.c 2011-07-21 22:17:23.000000000 -0400
32936 +++ linux-3.0.7/drivers/power/bq27x00_battery.c 2011-08-23 21:47:55.000000000 -0400
32937 @@ -67,7 +67,7 @@
32938 struct bq27x00_device_info;
32939 struct bq27x00_access_methods {
32940 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
32941 -};
32942 +} __no_const;
32943
32944 enum bq27x00_chip { BQ27000, BQ27500 };
32945
32946 diff -urNp linux-3.0.7/drivers/regulator/max8660.c linux-3.0.7/drivers/regulator/max8660.c
32947 --- linux-3.0.7/drivers/regulator/max8660.c 2011-07-21 22:17:23.000000000 -0400
32948 +++ linux-3.0.7/drivers/regulator/max8660.c 2011-08-23 21:47:55.000000000 -0400
32949 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
32950 max8660->shadow_regs[MAX8660_OVER1] = 5;
32951 } else {
32952 /* Otherwise devices can be toggled via software */
32953 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
32954 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
32955 + pax_open_kernel();
32956 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
32957 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
32958 + pax_close_kernel();
32959 }
32960
32961 /*
32962 diff -urNp linux-3.0.7/drivers/regulator/mc13892-regulator.c linux-3.0.7/drivers/regulator/mc13892-regulator.c
32963 --- linux-3.0.7/drivers/regulator/mc13892-regulator.c 2011-07-21 22:17:23.000000000 -0400
32964 +++ linux-3.0.7/drivers/regulator/mc13892-regulator.c 2011-08-23 21:47:55.000000000 -0400
32965 @@ -564,10 +564,12 @@ static int __devinit mc13892_regulator_p
32966 }
32967 mc13xxx_unlock(mc13892);
32968
32969 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
32970 + pax_open_kernel();
32971 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
32972 = mc13892_vcam_set_mode;
32973 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
32974 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
32975 = mc13892_vcam_get_mode;
32976 + pax_close_kernel();
32977 for (i = 0; i < pdata->num_regulators; i++) {
32978 init_data = &pdata->regulators[i];
32979 priv->regulators[i] = regulator_register(
32980 diff -urNp linux-3.0.7/drivers/rtc/rtc-dev.c linux-3.0.7/drivers/rtc/rtc-dev.c
32981 --- linux-3.0.7/drivers/rtc/rtc-dev.c 2011-07-21 22:17:23.000000000 -0400
32982 +++ linux-3.0.7/drivers/rtc/rtc-dev.c 2011-08-23 21:48:14.000000000 -0400
32983 @@ -14,6 +14,7 @@
32984 #include <linux/module.h>
32985 #include <linux/rtc.h>
32986 #include <linux/sched.h>
32987 +#include <linux/grsecurity.h>
32988 #include "rtc-core.h"
32989
32990 static dev_t rtc_devt;
32991 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
32992 if (copy_from_user(&tm, uarg, sizeof(tm)))
32993 return -EFAULT;
32994
32995 + gr_log_timechange();
32996 +
32997 return rtc_set_time(rtc, &tm);
32998
32999 case RTC_PIE_ON:
33000 diff -urNp linux-3.0.7/drivers/scsi/aacraid/aacraid.h linux-3.0.7/drivers/scsi/aacraid/aacraid.h
33001 --- linux-3.0.7/drivers/scsi/aacraid/aacraid.h 2011-07-21 22:17:23.000000000 -0400
33002 +++ linux-3.0.7/drivers/scsi/aacraid/aacraid.h 2011-08-23 21:47:55.000000000 -0400
33003 @@ -492,7 +492,7 @@ struct adapter_ops
33004 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
33005 /* Administrative operations */
33006 int (*adapter_comm)(struct aac_dev * dev, int comm);
33007 -};
33008 +} __no_const;
33009
33010 /*
33011 * Define which interrupt handler needs to be installed
33012 diff -urNp linux-3.0.7/drivers/scsi/aacraid/commctrl.c linux-3.0.7/drivers/scsi/aacraid/commctrl.c
33013 --- linux-3.0.7/drivers/scsi/aacraid/commctrl.c 2011-07-21 22:17:23.000000000 -0400
33014 +++ linux-3.0.7/drivers/scsi/aacraid/commctrl.c 2011-08-23 21:48:14.000000000 -0400
33015 @@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
33016 u32 actual_fibsize64, actual_fibsize = 0;
33017 int i;
33018
33019 + pax_track_stack();
33020
33021 if (dev->in_reset) {
33022 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
33023 diff -urNp linux-3.0.7/drivers/scsi/aacraid/linit.c linux-3.0.7/drivers/scsi/aacraid/linit.c
33024 --- linux-3.0.7/drivers/scsi/aacraid/linit.c 2011-07-21 22:17:23.000000000 -0400
33025 +++ linux-3.0.7/drivers/scsi/aacraid/linit.c 2011-10-11 10:44:33.000000000 -0400
33026 @@ -92,7 +92,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_
33027 #elif defined(__devinitconst)
33028 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33029 #else
33030 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
33031 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33032 #endif
33033 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
33034 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
33035 diff -urNp linux-3.0.7/drivers/scsi/aic94xx/aic94xx_init.c linux-3.0.7/drivers/scsi/aic94xx/aic94xx_init.c
33036 --- linux-3.0.7/drivers/scsi/aic94xx/aic94xx_init.c 2011-07-21 22:17:23.000000000 -0400
33037 +++ linux-3.0.7/drivers/scsi/aic94xx/aic94xx_init.c 2011-10-11 10:44:33.000000000 -0400
33038 @@ -1012,7 +1012,7 @@ static struct sas_domain_function_templa
33039 .lldd_control_phy = asd_control_phy,
33040 };
33041
33042 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
33043 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
33044 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
33045 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
33046 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
33047 diff -urNp linux-3.0.7/drivers/scsi/bfa/bfad.c linux-3.0.7/drivers/scsi/bfa/bfad.c
33048 --- linux-3.0.7/drivers/scsi/bfa/bfad.c 2011-07-21 22:17:23.000000000 -0400
33049 +++ linux-3.0.7/drivers/scsi/bfa/bfad.c 2011-08-23 21:48:14.000000000 -0400
33050 @@ -1032,6 +1032,8 @@ bfad_start_ops(struct bfad_s *bfad) {
33051 struct bfad_vport_s *vport, *vport_new;
33052 struct bfa_fcs_driver_info_s driver_info;
33053
33054 + pax_track_stack();
33055 +
33056 /* Fill the driver_info info to fcs*/
33057 memset(&driver_info, 0, sizeof(driver_info));
33058 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
33059 diff -urNp linux-3.0.7/drivers/scsi/bfa/bfa_fcs_lport.c linux-3.0.7/drivers/scsi/bfa/bfa_fcs_lport.c
33060 --- linux-3.0.7/drivers/scsi/bfa/bfa_fcs_lport.c 2011-07-21 22:17:23.000000000 -0400
33061 +++ linux-3.0.7/drivers/scsi/bfa/bfa_fcs_lport.c 2011-08-23 21:48:14.000000000 -0400
33062 @@ -1559,6 +1559,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
33063 u16 len, count;
33064 u16 templen;
33065
33066 + pax_track_stack();
33067 +
33068 /*
33069 * get hba attributes
33070 */
33071 @@ -1836,6 +1838,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
33072 u8 count = 0;
33073 u16 templen;
33074
33075 + pax_track_stack();
33076 +
33077 /*
33078 * get port attributes
33079 */
33080 diff -urNp linux-3.0.7/drivers/scsi/bfa/bfa_fcs_rport.c linux-3.0.7/drivers/scsi/bfa/bfa_fcs_rport.c
33081 --- linux-3.0.7/drivers/scsi/bfa/bfa_fcs_rport.c 2011-07-21 22:17:23.000000000 -0400
33082 +++ linux-3.0.7/drivers/scsi/bfa/bfa_fcs_rport.c 2011-08-23 21:48:14.000000000 -0400
33083 @@ -1844,6 +1844,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
33084 struct fc_rpsc_speed_info_s speeds;
33085 struct bfa_port_attr_s pport_attr;
33086
33087 + pax_track_stack();
33088 +
33089 bfa_trc(port->fcs, rx_fchs->s_id);
33090 bfa_trc(port->fcs, rx_fchs->d_id);
33091
33092 diff -urNp linux-3.0.7/drivers/scsi/bfa/bfa.h linux-3.0.7/drivers/scsi/bfa/bfa.h
33093 --- linux-3.0.7/drivers/scsi/bfa/bfa.h 2011-07-21 22:17:23.000000000 -0400
33094 +++ linux-3.0.7/drivers/scsi/bfa/bfa.h 2011-08-23 21:47:55.000000000 -0400
33095 @@ -238,7 +238,7 @@ struct bfa_hwif_s {
33096 u32 *nvecs, u32 *maxvec);
33097 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
33098 u32 *end);
33099 -};
33100 +} __no_const;
33101 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
33102
33103 struct bfa_iocfc_s {
33104 diff -urNp linux-3.0.7/drivers/scsi/bfa/bfa_ioc.h linux-3.0.7/drivers/scsi/bfa/bfa_ioc.h
33105 --- linux-3.0.7/drivers/scsi/bfa/bfa_ioc.h 2011-07-21 22:17:23.000000000 -0400
33106 +++ linux-3.0.7/drivers/scsi/bfa/bfa_ioc.h 2011-08-23 21:47:55.000000000 -0400
33107 @@ -196,7 +196,7 @@ struct bfa_ioc_cbfn_s {
33108 bfa_ioc_disable_cbfn_t disable_cbfn;
33109 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
33110 bfa_ioc_reset_cbfn_t reset_cbfn;
33111 -};
33112 +} __no_const;
33113
33114 /*
33115 * Heartbeat failure notification queue element.
33116 @@ -268,7 +268,7 @@ struct bfa_ioc_hwif_s {
33117 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
33118 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
33119 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
33120 -};
33121 +} __no_const;
33122
33123 #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
33124 #define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
33125 diff -urNp linux-3.0.7/drivers/scsi/BusLogic.c linux-3.0.7/drivers/scsi/BusLogic.c
33126 --- linux-3.0.7/drivers/scsi/BusLogic.c 2011-07-21 22:17:23.000000000 -0400
33127 +++ linux-3.0.7/drivers/scsi/BusLogic.c 2011-08-23 21:48:14.000000000 -0400
33128 @@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
33129 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
33130 *PrototypeHostAdapter)
33131 {
33132 + pax_track_stack();
33133 +
33134 /*
33135 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
33136 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
33137 diff -urNp linux-3.0.7/drivers/scsi/dpt_i2o.c linux-3.0.7/drivers/scsi/dpt_i2o.c
33138 --- linux-3.0.7/drivers/scsi/dpt_i2o.c 2011-07-21 22:17:23.000000000 -0400
33139 +++ linux-3.0.7/drivers/scsi/dpt_i2o.c 2011-08-23 21:48:14.000000000 -0400
33140 @@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
33141 dma_addr_t addr;
33142 ulong flags = 0;
33143
33144 + pax_track_stack();
33145 +
33146 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
33147 // get user msg size in u32s
33148 if(get_user(size, &user_msg[0])){
33149 @@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
33150 s32 rcode;
33151 dma_addr_t addr;
33152
33153 + pax_track_stack();
33154 +
33155 memset(msg, 0 , sizeof(msg));
33156 len = scsi_bufflen(cmd);
33157 direction = 0x00000000;
33158 diff -urNp linux-3.0.7/drivers/scsi/eata.c linux-3.0.7/drivers/scsi/eata.c
33159 --- linux-3.0.7/drivers/scsi/eata.c 2011-07-21 22:17:23.000000000 -0400
33160 +++ linux-3.0.7/drivers/scsi/eata.c 2011-08-23 21:48:14.000000000 -0400
33161 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
33162 struct hostdata *ha;
33163 char name[16];
33164
33165 + pax_track_stack();
33166 +
33167 sprintf(name, "%s%d", driver_name, j);
33168
33169 if (!request_region(port_base, REGION_SIZE, driver_name)) {
33170 diff -urNp linux-3.0.7/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.0.7/drivers/scsi/fcoe/fcoe_ctlr.c
33171 --- linux-3.0.7/drivers/scsi/fcoe/fcoe_ctlr.c 2011-07-21 22:17:23.000000000 -0400
33172 +++ linux-3.0.7/drivers/scsi/fcoe/fcoe_ctlr.c 2011-08-23 21:48:14.000000000 -0400
33173 @@ -2503,6 +2503,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
33174 } buf;
33175 int rc;
33176
33177 + pax_track_stack();
33178 +
33179 fiph = (struct fip_header *)skb->data;
33180 sub = fiph->fip_subcode;
33181
33182 diff -urNp linux-3.0.7/drivers/scsi/gdth.c linux-3.0.7/drivers/scsi/gdth.c
33183 --- linux-3.0.7/drivers/scsi/gdth.c 2011-07-21 22:17:23.000000000 -0400
33184 +++ linux-3.0.7/drivers/scsi/gdth.c 2011-08-23 21:48:14.000000000 -0400
33185 @@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
33186 unsigned long flags;
33187 gdth_ha_str *ha;
33188
33189 + pax_track_stack();
33190 +
33191 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
33192 return -EFAULT;
33193 ha = gdth_find_ha(ldrv.ionode);
33194 @@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
33195 gdth_ha_str *ha;
33196 int rval;
33197
33198 + pax_track_stack();
33199 +
33200 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
33201 res.number >= MAX_HDRIVES)
33202 return -EFAULT;
33203 @@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
33204 gdth_ha_str *ha;
33205 int rval;
33206
33207 + pax_track_stack();
33208 +
33209 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
33210 return -EFAULT;
33211 ha = gdth_find_ha(gen.ionode);
33212 @@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
33213 int i;
33214 gdth_cmd_str gdtcmd;
33215 char cmnd[MAX_COMMAND_SIZE];
33216 +
33217 + pax_track_stack();
33218 +
33219 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
33220
33221 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
33222 diff -urNp linux-3.0.7/drivers/scsi/gdth_proc.c linux-3.0.7/drivers/scsi/gdth_proc.c
33223 --- linux-3.0.7/drivers/scsi/gdth_proc.c 2011-07-21 22:17:23.000000000 -0400
33224 +++ linux-3.0.7/drivers/scsi/gdth_proc.c 2011-08-23 21:48:14.000000000 -0400
33225 @@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
33226 u64 paddr;
33227
33228 char cmnd[MAX_COMMAND_SIZE];
33229 +
33230 + pax_track_stack();
33231 +
33232 memset(cmnd, 0xff, 12);
33233 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
33234
33235 @@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
33236 gdth_hget_str *phg;
33237 char cmnd[MAX_COMMAND_SIZE];
33238
33239 + pax_track_stack();
33240 +
33241 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
33242 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
33243 if (!gdtcmd || !estr)
33244 diff -urNp linux-3.0.7/drivers/scsi/hosts.c linux-3.0.7/drivers/scsi/hosts.c
33245 --- linux-3.0.7/drivers/scsi/hosts.c 2011-07-21 22:17:23.000000000 -0400
33246 +++ linux-3.0.7/drivers/scsi/hosts.c 2011-08-23 21:47:55.000000000 -0400
33247 @@ -42,7 +42,7 @@
33248 #include "scsi_logging.h"
33249
33250
33251 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
33252 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
33253
33254
33255 static void scsi_host_cls_release(struct device *dev)
33256 @@ -354,7 +354,7 @@ struct Scsi_Host *scsi_host_alloc(struct
33257 * subtract one because we increment first then return, but we need to
33258 * know what the next host number was before increment
33259 */
33260 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
33261 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
33262 shost->dma_channel = 0xff;
33263
33264 /* These three are default values which can be overridden */
33265 diff -urNp linux-3.0.7/drivers/scsi/hpsa.c linux-3.0.7/drivers/scsi/hpsa.c
33266 --- linux-3.0.7/drivers/scsi/hpsa.c 2011-10-16 21:54:54.000000000 -0400
33267 +++ linux-3.0.7/drivers/scsi/hpsa.c 2011-10-16 21:55:27.000000000 -0400
33268 @@ -498,7 +498,7 @@ static inline u32 next_command(struct ct
33269 u32 a;
33270
33271 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
33272 - return h->access.command_completed(h);
33273 + return h->access->command_completed(h);
33274
33275 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
33276 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
33277 @@ -2955,7 +2955,7 @@ static void start_io(struct ctlr_info *h
33278 while (!list_empty(&h->reqQ)) {
33279 c = list_entry(h->reqQ.next, struct CommandList, list);
33280 /* can't do anything if fifo is full */
33281 - if ((h->access.fifo_full(h))) {
33282 + if ((h->access->fifo_full(h))) {
33283 dev_warn(&h->pdev->dev, "fifo full\n");
33284 break;
33285 }
33286 @@ -2965,7 +2965,7 @@ static void start_io(struct ctlr_info *h
33287 h->Qdepth--;
33288
33289 /* Tell the controller execute command */
33290 - h->access.submit_command(h, c);
33291 + h->access->submit_command(h, c);
33292
33293 /* Put job onto the completed Q */
33294 addQ(&h->cmpQ, c);
33295 @@ -2974,17 +2974,17 @@ static void start_io(struct ctlr_info *h
33296
33297 static inline unsigned long get_next_completion(struct ctlr_info *h)
33298 {
33299 - return h->access.command_completed(h);
33300 + return h->access->command_completed(h);
33301 }
33302
33303 static inline bool interrupt_pending(struct ctlr_info *h)
33304 {
33305 - return h->access.intr_pending(h);
33306 + return h->access->intr_pending(h);
33307 }
33308
33309 static inline long interrupt_not_for_us(struct ctlr_info *h)
33310 {
33311 - return (h->access.intr_pending(h) == 0) ||
33312 + return (h->access->intr_pending(h) == 0) ||
33313 (h->interrupts_enabled == 0);
33314 }
33315
33316 @@ -3874,7 +3874,7 @@ static int __devinit hpsa_pci_init(struc
33317 if (prod_index < 0)
33318 return -ENODEV;
33319 h->product_name = products[prod_index].product_name;
33320 - h->access = *(products[prod_index].access);
33321 + h->access = products[prod_index].access;
33322
33323 if (hpsa_board_disabled(h->pdev)) {
33324 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
33325 @@ -4151,7 +4151,7 @@ reinit_after_soft_reset:
33326 }
33327
33328 /* make sure the board interrupts are off */
33329 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
33330 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
33331
33332 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
33333 goto clean2;
33334 @@ -4185,7 +4185,7 @@ reinit_after_soft_reset:
33335 * fake ones to scoop up any residual completions.
33336 */
33337 spin_lock_irqsave(&h->lock, flags);
33338 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
33339 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
33340 spin_unlock_irqrestore(&h->lock, flags);
33341 free_irq(h->intr[h->intr_mode], h);
33342 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
33343 @@ -4204,9 +4204,9 @@ reinit_after_soft_reset:
33344 dev_info(&h->pdev->dev, "Board READY.\n");
33345 dev_info(&h->pdev->dev,
33346 "Waiting for stale completions to drain.\n");
33347 - h->access.set_intr_mask(h, HPSA_INTR_ON);
33348 + h->access->set_intr_mask(h, HPSA_INTR_ON);
33349 msleep(10000);
33350 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
33351 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
33352
33353 rc = controller_reset_failed(h->cfgtable);
33354 if (rc)
33355 @@ -4227,7 +4227,7 @@ reinit_after_soft_reset:
33356 }
33357
33358 /* Turn the interrupts on so we can service requests */
33359 - h->access.set_intr_mask(h, HPSA_INTR_ON);
33360 + h->access->set_intr_mask(h, HPSA_INTR_ON);
33361
33362 hpsa_hba_inquiry(h);
33363 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
33364 @@ -4280,7 +4280,7 @@ static void hpsa_shutdown(struct pci_dev
33365 * To write all data in the battery backed cache to disks
33366 */
33367 hpsa_flush_cache(h);
33368 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
33369 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
33370 free_irq(h->intr[h->intr_mode], h);
33371 #ifdef CONFIG_PCI_MSI
33372 if (h->msix_vector)
33373 @@ -4443,7 +4443,7 @@ static __devinit void hpsa_enter_perform
33374 return;
33375 }
33376 /* Change the access methods to the performant access methods */
33377 - h->access = SA5_performant_access;
33378 + h->access = &SA5_performant_access;
33379 h->transMethod = CFGTBL_Trans_Performant;
33380 }
33381
33382 diff -urNp linux-3.0.7/drivers/scsi/hpsa.h linux-3.0.7/drivers/scsi/hpsa.h
33383 --- linux-3.0.7/drivers/scsi/hpsa.h 2011-09-02 18:11:21.000000000 -0400
33384 +++ linux-3.0.7/drivers/scsi/hpsa.h 2011-08-23 21:47:55.000000000 -0400
33385 @@ -73,7 +73,7 @@ struct ctlr_info {
33386 unsigned int msix_vector;
33387 unsigned int msi_vector;
33388 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
33389 - struct access_method access;
33390 + struct access_method *access;
33391
33392 /* queue and queue Info */
33393 struct list_head reqQ;
33394 diff -urNp linux-3.0.7/drivers/scsi/ips.h linux-3.0.7/drivers/scsi/ips.h
33395 --- linux-3.0.7/drivers/scsi/ips.h 2011-07-21 22:17:23.000000000 -0400
33396 +++ linux-3.0.7/drivers/scsi/ips.h 2011-08-23 21:47:55.000000000 -0400
33397 @@ -1027,7 +1027,7 @@ typedef struct {
33398 int (*intr)(struct ips_ha *);
33399 void (*enableint)(struct ips_ha *);
33400 uint32_t (*statupd)(struct ips_ha *);
33401 -} ips_hw_func_t;
33402 +} __no_const ips_hw_func_t;
33403
33404 typedef struct ips_ha {
33405 uint8_t ha_id[IPS_MAX_CHANNELS+1];
33406 diff -urNp linux-3.0.7/drivers/scsi/libfc/fc_exch.c linux-3.0.7/drivers/scsi/libfc/fc_exch.c
33407 --- linux-3.0.7/drivers/scsi/libfc/fc_exch.c 2011-07-21 22:17:23.000000000 -0400
33408 +++ linux-3.0.7/drivers/scsi/libfc/fc_exch.c 2011-08-23 21:47:55.000000000 -0400
33409 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
33410 * all together if not used XXX
33411 */
33412 struct {
33413 - atomic_t no_free_exch;
33414 - atomic_t no_free_exch_xid;
33415 - atomic_t xid_not_found;
33416 - atomic_t xid_busy;
33417 - atomic_t seq_not_found;
33418 - atomic_t non_bls_resp;
33419 + atomic_unchecked_t no_free_exch;
33420 + atomic_unchecked_t no_free_exch_xid;
33421 + atomic_unchecked_t xid_not_found;
33422 + atomic_unchecked_t xid_busy;
33423 + atomic_unchecked_t seq_not_found;
33424 + atomic_unchecked_t non_bls_resp;
33425 } stats;
33426 };
33427
33428 @@ -700,7 +700,7 @@ static struct fc_exch *fc_exch_em_alloc(
33429 /* allocate memory for exchange */
33430 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
33431 if (!ep) {
33432 - atomic_inc(&mp->stats.no_free_exch);
33433 + atomic_inc_unchecked(&mp->stats.no_free_exch);
33434 goto out;
33435 }
33436 memset(ep, 0, sizeof(*ep));
33437 @@ -761,7 +761,7 @@ out:
33438 return ep;
33439 err:
33440 spin_unlock_bh(&pool->lock);
33441 - atomic_inc(&mp->stats.no_free_exch_xid);
33442 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
33443 mempool_free(ep, mp->ep_pool);
33444 return NULL;
33445 }
33446 @@ -906,7 +906,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33447 xid = ntohs(fh->fh_ox_id); /* we originated exch */
33448 ep = fc_exch_find(mp, xid);
33449 if (!ep) {
33450 - atomic_inc(&mp->stats.xid_not_found);
33451 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33452 reject = FC_RJT_OX_ID;
33453 goto out;
33454 }
33455 @@ -936,7 +936,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33456 ep = fc_exch_find(mp, xid);
33457 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
33458 if (ep) {
33459 - atomic_inc(&mp->stats.xid_busy);
33460 + atomic_inc_unchecked(&mp->stats.xid_busy);
33461 reject = FC_RJT_RX_ID;
33462 goto rel;
33463 }
33464 @@ -947,7 +947,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33465 }
33466 xid = ep->xid; /* get our XID */
33467 } else if (!ep) {
33468 - atomic_inc(&mp->stats.xid_not_found);
33469 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33470 reject = FC_RJT_RX_ID; /* XID not found */
33471 goto out;
33472 }
33473 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33474 } else {
33475 sp = &ep->seq;
33476 if (sp->id != fh->fh_seq_id) {
33477 - atomic_inc(&mp->stats.seq_not_found);
33478 + atomic_inc_unchecked(&mp->stats.seq_not_found);
33479 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
33480 goto rel;
33481 }
33482 @@ -1392,22 +1392,22 @@ static void fc_exch_recv_seq_resp(struct
33483
33484 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
33485 if (!ep) {
33486 - atomic_inc(&mp->stats.xid_not_found);
33487 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33488 goto out;
33489 }
33490 if (ep->esb_stat & ESB_ST_COMPLETE) {
33491 - atomic_inc(&mp->stats.xid_not_found);
33492 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33493 goto rel;
33494 }
33495 if (ep->rxid == FC_XID_UNKNOWN)
33496 ep->rxid = ntohs(fh->fh_rx_id);
33497 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
33498 - atomic_inc(&mp->stats.xid_not_found);
33499 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33500 goto rel;
33501 }
33502 if (ep->did != ntoh24(fh->fh_s_id) &&
33503 ep->did != FC_FID_FLOGI) {
33504 - atomic_inc(&mp->stats.xid_not_found);
33505 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33506 goto rel;
33507 }
33508 sof = fr_sof(fp);
33509 @@ -1416,7 +1416,7 @@ static void fc_exch_recv_seq_resp(struct
33510 sp->ssb_stat |= SSB_ST_RESP;
33511 sp->id = fh->fh_seq_id;
33512 } else if (sp->id != fh->fh_seq_id) {
33513 - atomic_inc(&mp->stats.seq_not_found);
33514 + atomic_inc_unchecked(&mp->stats.seq_not_found);
33515 goto rel;
33516 }
33517
33518 @@ -1480,9 +1480,9 @@ static void fc_exch_recv_resp(struct fc_
33519 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
33520
33521 if (!sp)
33522 - atomic_inc(&mp->stats.xid_not_found);
33523 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33524 else
33525 - atomic_inc(&mp->stats.non_bls_resp);
33526 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
33527
33528 fc_frame_free(fp);
33529 }
33530 diff -urNp linux-3.0.7/drivers/scsi/libsas/sas_ata.c linux-3.0.7/drivers/scsi/libsas/sas_ata.c
33531 --- linux-3.0.7/drivers/scsi/libsas/sas_ata.c 2011-07-21 22:17:23.000000000 -0400
33532 +++ linux-3.0.7/drivers/scsi/libsas/sas_ata.c 2011-08-23 21:47:55.000000000 -0400
33533 @@ -368,7 +368,7 @@ static struct ata_port_operations sas_sa
33534 .postreset = ata_std_postreset,
33535 .error_handler = ata_std_error_handler,
33536 .post_internal_cmd = sas_ata_post_internal,
33537 - .qc_defer = ata_std_qc_defer,
33538 + .qc_defer = ata_std_qc_defer,
33539 .qc_prep = ata_noop_qc_prep,
33540 .qc_issue = sas_ata_qc_issue,
33541 .qc_fill_rtf = sas_ata_qc_fill_rtf,
33542 diff -urNp linux-3.0.7/drivers/scsi/lpfc/lpfc_debugfs.c linux-3.0.7/drivers/scsi/lpfc/lpfc_debugfs.c
33543 --- linux-3.0.7/drivers/scsi/lpfc/lpfc_debugfs.c 2011-07-21 22:17:23.000000000 -0400
33544 +++ linux-3.0.7/drivers/scsi/lpfc/lpfc_debugfs.c 2011-08-23 21:48:14.000000000 -0400
33545 @@ -104,7 +104,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
33546
33547 #include <linux/debugfs.h>
33548
33549 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
33550 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
33551 static unsigned long lpfc_debugfs_start_time = 0L;
33552
33553 /* iDiag */
33554 @@ -141,7 +141,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
33555 lpfc_debugfs_enable = 0;
33556
33557 len = 0;
33558 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
33559 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
33560 (lpfc_debugfs_max_disc_trc - 1);
33561 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
33562 dtp = vport->disc_trc + i;
33563 @@ -202,7 +202,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
33564 lpfc_debugfs_enable = 0;
33565
33566 len = 0;
33567 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
33568 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
33569 (lpfc_debugfs_max_slow_ring_trc - 1);
33570 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
33571 dtp = phba->slow_ring_trc + i;
33572 @@ -380,6 +380,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
33573 uint32_t *ptr;
33574 char buffer[1024];
33575
33576 + pax_track_stack();
33577 +
33578 off = 0;
33579 spin_lock_irq(&phba->hbalock);
33580
33581 @@ -617,14 +619,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
33582 !vport || !vport->disc_trc)
33583 return;
33584
33585 - index = atomic_inc_return(&vport->disc_trc_cnt) &
33586 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
33587 (lpfc_debugfs_max_disc_trc - 1);
33588 dtp = vport->disc_trc + index;
33589 dtp->fmt = fmt;
33590 dtp->data1 = data1;
33591 dtp->data2 = data2;
33592 dtp->data3 = data3;
33593 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
33594 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
33595 dtp->jif = jiffies;
33596 #endif
33597 return;
33598 @@ -655,14 +657,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
33599 !phba || !phba->slow_ring_trc)
33600 return;
33601
33602 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
33603 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
33604 (lpfc_debugfs_max_slow_ring_trc - 1);
33605 dtp = phba->slow_ring_trc + index;
33606 dtp->fmt = fmt;
33607 dtp->data1 = data1;
33608 dtp->data2 = data2;
33609 dtp->data3 = data3;
33610 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
33611 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
33612 dtp->jif = jiffies;
33613 #endif
33614 return;
33615 @@ -2606,7 +2608,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
33616 "slow_ring buffer\n");
33617 goto debug_failed;
33618 }
33619 - atomic_set(&phba->slow_ring_trc_cnt, 0);
33620 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
33621 memset(phba->slow_ring_trc, 0,
33622 (sizeof(struct lpfc_debugfs_trc) *
33623 lpfc_debugfs_max_slow_ring_trc));
33624 @@ -2652,7 +2654,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
33625 "buffer\n");
33626 goto debug_failed;
33627 }
33628 - atomic_set(&vport->disc_trc_cnt, 0);
33629 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
33630
33631 snprintf(name, sizeof(name), "discovery_trace");
33632 vport->debug_disc_trc =
33633 diff -urNp linux-3.0.7/drivers/scsi/lpfc/lpfc.h linux-3.0.7/drivers/scsi/lpfc/lpfc.h
33634 --- linux-3.0.7/drivers/scsi/lpfc/lpfc.h 2011-10-16 21:54:54.000000000 -0400
33635 +++ linux-3.0.7/drivers/scsi/lpfc/lpfc.h 2011-10-16 21:55:27.000000000 -0400
33636 @@ -425,7 +425,7 @@ struct lpfc_vport {
33637 struct dentry *debug_nodelist;
33638 struct dentry *vport_debugfs_root;
33639 struct lpfc_debugfs_trc *disc_trc;
33640 - atomic_t disc_trc_cnt;
33641 + atomic_unchecked_t disc_trc_cnt;
33642 #endif
33643 uint8_t stat_data_enabled;
33644 uint8_t stat_data_blocked;
33645 @@ -832,8 +832,8 @@ struct lpfc_hba {
33646 struct timer_list fabric_block_timer;
33647 unsigned long bit_flags;
33648 #define FABRIC_COMANDS_BLOCKED 0
33649 - atomic_t num_rsrc_err;
33650 - atomic_t num_cmd_success;
33651 + atomic_unchecked_t num_rsrc_err;
33652 + atomic_unchecked_t num_cmd_success;
33653 unsigned long last_rsrc_error_time;
33654 unsigned long last_ramp_down_time;
33655 unsigned long last_ramp_up_time;
33656 @@ -847,7 +847,7 @@ struct lpfc_hba {
33657 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
33658 struct dentry *debug_slow_ring_trc;
33659 struct lpfc_debugfs_trc *slow_ring_trc;
33660 - atomic_t slow_ring_trc_cnt;
33661 + atomic_unchecked_t slow_ring_trc_cnt;
33662 /* iDiag debugfs sub-directory */
33663 struct dentry *idiag_root;
33664 struct dentry *idiag_pci_cfg;
33665 diff -urNp linux-3.0.7/drivers/scsi/lpfc/lpfc_init.c linux-3.0.7/drivers/scsi/lpfc/lpfc_init.c
33666 --- linux-3.0.7/drivers/scsi/lpfc/lpfc_init.c 2011-10-16 21:54:54.000000000 -0400
33667 +++ linux-3.0.7/drivers/scsi/lpfc/lpfc_init.c 2011-10-16 21:55:27.000000000 -0400
33668 @@ -9971,8 +9971,10 @@ lpfc_init(void)
33669 printk(LPFC_COPYRIGHT "\n");
33670
33671 if (lpfc_enable_npiv) {
33672 - lpfc_transport_functions.vport_create = lpfc_vport_create;
33673 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
33674 + pax_open_kernel();
33675 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
33676 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
33677 + pax_close_kernel();
33678 }
33679 lpfc_transport_template =
33680 fc_attach_transport(&lpfc_transport_functions);
33681 diff -urNp linux-3.0.7/drivers/scsi/lpfc/lpfc_scsi.c linux-3.0.7/drivers/scsi/lpfc/lpfc_scsi.c
33682 --- linux-3.0.7/drivers/scsi/lpfc/lpfc_scsi.c 2011-10-16 21:54:54.000000000 -0400
33683 +++ linux-3.0.7/drivers/scsi/lpfc/lpfc_scsi.c 2011-10-16 21:55:27.000000000 -0400
33684 @@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
33685 uint32_t evt_posted;
33686
33687 spin_lock_irqsave(&phba->hbalock, flags);
33688 - atomic_inc(&phba->num_rsrc_err);
33689 + atomic_inc_unchecked(&phba->num_rsrc_err);
33690 phba->last_rsrc_error_time = jiffies;
33691
33692 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
33693 @@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
33694 unsigned long flags;
33695 struct lpfc_hba *phba = vport->phba;
33696 uint32_t evt_posted;
33697 - atomic_inc(&phba->num_cmd_success);
33698 + atomic_inc_unchecked(&phba->num_cmd_success);
33699
33700 if (vport->cfg_lun_queue_depth <= queue_depth)
33701 return;
33702 @@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
33703 unsigned long num_rsrc_err, num_cmd_success;
33704 int i;
33705
33706 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
33707 - num_cmd_success = atomic_read(&phba->num_cmd_success);
33708 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
33709 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
33710
33711 vports = lpfc_create_vport_work_array(phba);
33712 if (vports != NULL)
33713 @@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
33714 }
33715 }
33716 lpfc_destroy_vport_work_array(phba, vports);
33717 - atomic_set(&phba->num_rsrc_err, 0);
33718 - atomic_set(&phba->num_cmd_success, 0);
33719 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
33720 + atomic_set_unchecked(&phba->num_cmd_success, 0);
33721 }
33722
33723 /**
33724 @@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
33725 }
33726 }
33727 lpfc_destroy_vport_work_array(phba, vports);
33728 - atomic_set(&phba->num_rsrc_err, 0);
33729 - atomic_set(&phba->num_cmd_success, 0);
33730 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
33731 + atomic_set_unchecked(&phba->num_cmd_success, 0);
33732 }
33733
33734 /**
33735 diff -urNp linux-3.0.7/drivers/scsi/megaraid/megaraid_mbox.c linux-3.0.7/drivers/scsi/megaraid/megaraid_mbox.c
33736 --- linux-3.0.7/drivers/scsi/megaraid/megaraid_mbox.c 2011-07-21 22:17:23.000000000 -0400
33737 +++ linux-3.0.7/drivers/scsi/megaraid/megaraid_mbox.c 2011-08-23 21:48:14.000000000 -0400
33738 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
33739 int rval;
33740 int i;
33741
33742 + pax_track_stack();
33743 +
33744 // Allocate memory for the base list of scb for management module.
33745 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
33746
33747 diff -urNp linux-3.0.7/drivers/scsi/osd/osd_initiator.c linux-3.0.7/drivers/scsi/osd/osd_initiator.c
33748 --- linux-3.0.7/drivers/scsi/osd/osd_initiator.c 2011-07-21 22:17:23.000000000 -0400
33749 +++ linux-3.0.7/drivers/scsi/osd/osd_initiator.c 2011-08-23 21:48:14.000000000 -0400
33750 @@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
33751 int nelem = ARRAY_SIZE(get_attrs), a = 0;
33752 int ret;
33753
33754 + pax_track_stack();
33755 +
33756 or = osd_start_request(od, GFP_KERNEL);
33757 if (!or)
33758 return -ENOMEM;
33759 diff -urNp linux-3.0.7/drivers/scsi/pmcraid.c linux-3.0.7/drivers/scsi/pmcraid.c
33760 --- linux-3.0.7/drivers/scsi/pmcraid.c 2011-09-02 18:11:21.000000000 -0400
33761 +++ linux-3.0.7/drivers/scsi/pmcraid.c 2011-08-23 21:47:56.000000000 -0400
33762 @@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
33763 res->scsi_dev = scsi_dev;
33764 scsi_dev->hostdata = res;
33765 res->change_detected = 0;
33766 - atomic_set(&res->read_failures, 0);
33767 - atomic_set(&res->write_failures, 0);
33768 + atomic_set_unchecked(&res->read_failures, 0);
33769 + atomic_set_unchecked(&res->write_failures, 0);
33770 rc = 0;
33771 }
33772 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
33773 @@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
33774
33775 /* If this was a SCSI read/write command keep count of errors */
33776 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
33777 - atomic_inc(&res->read_failures);
33778 + atomic_inc_unchecked(&res->read_failures);
33779 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
33780 - atomic_inc(&res->write_failures);
33781 + atomic_inc_unchecked(&res->write_failures);
33782
33783 if (!RES_IS_GSCSI(res->cfg_entry) &&
33784 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
33785 @@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
33786 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
33787 * hrrq_id assigned here in queuecommand
33788 */
33789 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
33790 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
33791 pinstance->num_hrrq;
33792 cmd->cmd_done = pmcraid_io_done;
33793
33794 @@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
33795 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
33796 * hrrq_id assigned here in queuecommand
33797 */
33798 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
33799 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
33800 pinstance->num_hrrq;
33801
33802 if (request_size) {
33803 @@ -4498,7 +4498,7 @@ static void pmcraid_worker_function(stru
33804
33805 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
33806 /* add resources only after host is added into system */
33807 - if (!atomic_read(&pinstance->expose_resources))
33808 + if (!atomic_read_unchecked(&pinstance->expose_resources))
33809 return;
33810
33811 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
33812 @@ -5332,8 +5332,8 @@ static int __devinit pmcraid_init_instan
33813 init_waitqueue_head(&pinstance->reset_wait_q);
33814
33815 atomic_set(&pinstance->outstanding_cmds, 0);
33816 - atomic_set(&pinstance->last_message_id, 0);
33817 - atomic_set(&pinstance->expose_resources, 0);
33818 + atomic_set_unchecked(&pinstance->last_message_id, 0);
33819 + atomic_set_unchecked(&pinstance->expose_resources, 0);
33820
33821 INIT_LIST_HEAD(&pinstance->free_res_q);
33822 INIT_LIST_HEAD(&pinstance->used_res_q);
33823 @@ -6048,7 +6048,7 @@ static int __devinit pmcraid_probe(
33824 /* Schedule worker thread to handle CCN and take care of adding and
33825 * removing devices to OS
33826 */
33827 - atomic_set(&pinstance->expose_resources, 1);
33828 + atomic_set_unchecked(&pinstance->expose_resources, 1);
33829 schedule_work(&pinstance->worker_q);
33830 return rc;
33831
33832 diff -urNp linux-3.0.7/drivers/scsi/pmcraid.h linux-3.0.7/drivers/scsi/pmcraid.h
33833 --- linux-3.0.7/drivers/scsi/pmcraid.h 2011-07-21 22:17:23.000000000 -0400
33834 +++ linux-3.0.7/drivers/scsi/pmcraid.h 2011-08-23 21:47:56.000000000 -0400
33835 @@ -749,7 +749,7 @@ struct pmcraid_instance {
33836 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
33837
33838 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
33839 - atomic_t last_message_id;
33840 + atomic_unchecked_t last_message_id;
33841
33842 /* configuration table */
33843 struct pmcraid_config_table *cfg_table;
33844 @@ -778,7 +778,7 @@ struct pmcraid_instance {
33845 atomic_t outstanding_cmds;
33846
33847 /* should add/delete resources to mid-layer now ?*/
33848 - atomic_t expose_resources;
33849 + atomic_unchecked_t expose_resources;
33850
33851
33852
33853 @@ -814,8 +814,8 @@ struct pmcraid_resource_entry {
33854 struct pmcraid_config_table_entry_ext cfg_entry_ext;
33855 };
33856 struct scsi_device *scsi_dev; /* Link scsi_device structure */
33857 - atomic_t read_failures; /* count of failed READ commands */
33858 - atomic_t write_failures; /* count of failed WRITE commands */
33859 + atomic_unchecked_t read_failures; /* count of failed READ commands */
33860 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
33861
33862 /* To indicate add/delete/modify during CCN */
33863 u8 change_detected;
33864 diff -urNp linux-3.0.7/drivers/scsi/qla2xxx/qla_def.h linux-3.0.7/drivers/scsi/qla2xxx/qla_def.h
33865 --- linux-3.0.7/drivers/scsi/qla2xxx/qla_def.h 2011-07-21 22:17:23.000000000 -0400
33866 +++ linux-3.0.7/drivers/scsi/qla2xxx/qla_def.h 2011-08-23 21:47:56.000000000 -0400
33867 @@ -2244,7 +2244,7 @@ struct isp_operations {
33868 int (*get_flash_version) (struct scsi_qla_host *, void *);
33869 int (*start_scsi) (srb_t *);
33870 int (*abort_isp) (struct scsi_qla_host *);
33871 -};
33872 +} __no_const;
33873
33874 /* MSI-X Support *************************************************************/
33875
33876 diff -urNp linux-3.0.7/drivers/scsi/qla4xxx/ql4_def.h linux-3.0.7/drivers/scsi/qla4xxx/ql4_def.h
33877 --- linux-3.0.7/drivers/scsi/qla4xxx/ql4_def.h 2011-07-21 22:17:23.000000000 -0400
33878 +++ linux-3.0.7/drivers/scsi/qla4xxx/ql4_def.h 2011-08-23 21:47:56.000000000 -0400
33879 @@ -256,7 +256,7 @@ struct ddb_entry {
33880 atomic_t retry_relogin_timer; /* Min Time between relogins
33881 * (4000 only) */
33882 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
33883 - atomic_t relogin_retry_count; /* Num of times relogin has been
33884 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
33885 * retried */
33886
33887 uint16_t port;
33888 diff -urNp linux-3.0.7/drivers/scsi/qla4xxx/ql4_init.c linux-3.0.7/drivers/scsi/qla4xxx/ql4_init.c
33889 --- linux-3.0.7/drivers/scsi/qla4xxx/ql4_init.c 2011-07-21 22:17:23.000000000 -0400
33890 +++ linux-3.0.7/drivers/scsi/qla4xxx/ql4_init.c 2011-08-23 21:47:56.000000000 -0400
33891 @@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
33892 ddb_entry->fw_ddb_index = fw_ddb_index;
33893 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
33894 atomic_set(&ddb_entry->relogin_timer, 0);
33895 - atomic_set(&ddb_entry->relogin_retry_count, 0);
33896 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
33897 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
33898 list_add_tail(&ddb_entry->list, &ha->ddb_list);
33899 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
33900 @@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
33901 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
33902 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
33903 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
33904 - atomic_set(&ddb_entry->relogin_retry_count, 0);
33905 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
33906 atomic_set(&ddb_entry->relogin_timer, 0);
33907 clear_bit(DF_RELOGIN, &ddb_entry->flags);
33908 iscsi_unblock_session(ddb_entry->sess);
33909 diff -urNp linux-3.0.7/drivers/scsi/qla4xxx/ql4_os.c linux-3.0.7/drivers/scsi/qla4xxx/ql4_os.c
33910 --- linux-3.0.7/drivers/scsi/qla4xxx/ql4_os.c 2011-07-21 22:17:23.000000000 -0400
33911 +++ linux-3.0.7/drivers/scsi/qla4xxx/ql4_os.c 2011-08-23 21:47:56.000000000 -0400
33912 @@ -811,13 +811,13 @@ static void qla4xxx_timer(struct scsi_ql
33913 ddb_entry->fw_ddb_device_state ==
33914 DDB_DS_SESSION_FAILED) {
33915 /* Reset retry relogin timer */
33916 - atomic_inc(&ddb_entry->relogin_retry_count);
33917 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
33918 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
33919 " timed out-retrying"
33920 " relogin (%d)\n",
33921 ha->host_no,
33922 ddb_entry->fw_ddb_index,
33923 - atomic_read(&ddb_entry->
33924 + atomic_read_unchecked(&ddb_entry->
33925 relogin_retry_count))
33926 );
33927 start_dpc++;
33928 diff -urNp linux-3.0.7/drivers/scsi/scsi.c linux-3.0.7/drivers/scsi/scsi.c
33929 --- linux-3.0.7/drivers/scsi/scsi.c 2011-07-21 22:17:23.000000000 -0400
33930 +++ linux-3.0.7/drivers/scsi/scsi.c 2011-08-23 21:47:56.000000000 -0400
33931 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
33932 unsigned long timeout;
33933 int rtn = 0;
33934
33935 - atomic_inc(&cmd->device->iorequest_cnt);
33936 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
33937
33938 /* check if the device is still usable */
33939 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
33940 diff -urNp linux-3.0.7/drivers/scsi/scsi_debug.c linux-3.0.7/drivers/scsi/scsi_debug.c
33941 --- linux-3.0.7/drivers/scsi/scsi_debug.c 2011-07-21 22:17:23.000000000 -0400
33942 +++ linux-3.0.7/drivers/scsi/scsi_debug.c 2011-08-23 21:48:14.000000000 -0400
33943 @@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
33944 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
33945 unsigned char *cmd = (unsigned char *)scp->cmnd;
33946
33947 + pax_track_stack();
33948 +
33949 if ((errsts = check_readiness(scp, 1, devip)))
33950 return errsts;
33951 memset(arr, 0, sizeof(arr));
33952 @@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
33953 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
33954 unsigned char *cmd = (unsigned char *)scp->cmnd;
33955
33956 + pax_track_stack();
33957 +
33958 if ((errsts = check_readiness(scp, 1, devip)))
33959 return errsts;
33960 memset(arr, 0, sizeof(arr));
33961 diff -urNp linux-3.0.7/drivers/scsi/scsi_lib.c linux-3.0.7/drivers/scsi/scsi_lib.c
33962 --- linux-3.0.7/drivers/scsi/scsi_lib.c 2011-09-02 18:11:21.000000000 -0400
33963 +++ linux-3.0.7/drivers/scsi/scsi_lib.c 2011-08-23 21:47:56.000000000 -0400
33964 @@ -1412,7 +1412,7 @@ static void scsi_kill_request(struct req
33965 shost = sdev->host;
33966 scsi_init_cmd_errh(cmd);
33967 cmd->result = DID_NO_CONNECT << 16;
33968 - atomic_inc(&cmd->device->iorequest_cnt);
33969 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
33970
33971 /*
33972 * SCSI request completion path will do scsi_device_unbusy(),
33973 @@ -1438,9 +1438,9 @@ static void scsi_softirq_done(struct req
33974
33975 INIT_LIST_HEAD(&cmd->eh_entry);
33976
33977 - atomic_inc(&cmd->device->iodone_cnt);
33978 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
33979 if (cmd->result)
33980 - atomic_inc(&cmd->device->ioerr_cnt);
33981 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
33982
33983 disposition = scsi_decide_disposition(cmd);
33984 if (disposition != SUCCESS &&
33985 diff -urNp linux-3.0.7/drivers/scsi/scsi_sysfs.c linux-3.0.7/drivers/scsi/scsi_sysfs.c
33986 --- linux-3.0.7/drivers/scsi/scsi_sysfs.c 2011-07-21 22:17:23.000000000 -0400
33987 +++ linux-3.0.7/drivers/scsi/scsi_sysfs.c 2011-08-23 21:47:56.000000000 -0400
33988 @@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
33989 char *buf) \
33990 { \
33991 struct scsi_device *sdev = to_scsi_device(dev); \
33992 - unsigned long long count = atomic_read(&sdev->field); \
33993 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
33994 return snprintf(buf, 20, "0x%llx\n", count); \
33995 } \
33996 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
33997 diff -urNp linux-3.0.7/drivers/scsi/scsi_tgt_lib.c linux-3.0.7/drivers/scsi/scsi_tgt_lib.c
33998 --- linux-3.0.7/drivers/scsi/scsi_tgt_lib.c 2011-07-21 22:17:23.000000000 -0400
33999 +++ linux-3.0.7/drivers/scsi/scsi_tgt_lib.c 2011-10-06 04:17:55.000000000 -0400
34000 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct sc
34001 int err;
34002
34003 dprintk("%lx %u\n", uaddr, len);
34004 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
34005 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
34006 if (err) {
34007 /*
34008 * TODO: need to fixup sg_tablesize, max_segment_size,
34009 diff -urNp linux-3.0.7/drivers/scsi/scsi_transport_fc.c linux-3.0.7/drivers/scsi/scsi_transport_fc.c
34010 --- linux-3.0.7/drivers/scsi/scsi_transport_fc.c 2011-07-21 22:17:23.000000000 -0400
34011 +++ linux-3.0.7/drivers/scsi/scsi_transport_fc.c 2011-08-23 21:47:56.000000000 -0400
34012 @@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
34013 * Netlink Infrastructure
34014 */
34015
34016 -static atomic_t fc_event_seq;
34017 +static atomic_unchecked_t fc_event_seq;
34018
34019 /**
34020 * fc_get_event_number - Obtain the next sequential FC event number
34021 @@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
34022 u32
34023 fc_get_event_number(void)
34024 {
34025 - return atomic_add_return(1, &fc_event_seq);
34026 + return atomic_add_return_unchecked(1, &fc_event_seq);
34027 }
34028 EXPORT_SYMBOL(fc_get_event_number);
34029
34030 @@ -645,7 +645,7 @@ static __init int fc_transport_init(void
34031 {
34032 int error;
34033
34034 - atomic_set(&fc_event_seq, 0);
34035 + atomic_set_unchecked(&fc_event_seq, 0);
34036
34037 error = transport_class_register(&fc_host_class);
34038 if (error)
34039 @@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char
34040 char *cp;
34041
34042 *val = simple_strtoul(buf, &cp, 0);
34043 - if ((*cp && (*cp != '\n')) || (*val < 0))
34044 + if (*cp && (*cp != '\n'))
34045 return -EINVAL;
34046 /*
34047 * Check for overflow; dev_loss_tmo is u32
34048 diff -urNp linux-3.0.7/drivers/scsi/scsi_transport_iscsi.c linux-3.0.7/drivers/scsi/scsi_transport_iscsi.c
34049 --- linux-3.0.7/drivers/scsi/scsi_transport_iscsi.c 2011-07-21 22:17:23.000000000 -0400
34050 +++ linux-3.0.7/drivers/scsi/scsi_transport_iscsi.c 2011-08-23 21:47:56.000000000 -0400
34051 @@ -83,7 +83,7 @@ struct iscsi_internal {
34052 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
34053 };
34054
34055 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
34056 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
34057 static struct workqueue_struct *iscsi_eh_timer_workq;
34058
34059 /*
34060 @@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
34061 int err;
34062
34063 ihost = shost->shost_data;
34064 - session->sid = atomic_add_return(1, &iscsi_session_nr);
34065 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
34066
34067 if (id == ISCSI_MAX_TARGET) {
34068 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
34069 @@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
34070 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
34071 ISCSI_TRANSPORT_VERSION);
34072
34073 - atomic_set(&iscsi_session_nr, 0);
34074 + atomic_set_unchecked(&iscsi_session_nr, 0);
34075
34076 err = class_register(&iscsi_transport_class);
34077 if (err)
34078 diff -urNp linux-3.0.7/drivers/scsi/scsi_transport_srp.c linux-3.0.7/drivers/scsi/scsi_transport_srp.c
34079 --- linux-3.0.7/drivers/scsi/scsi_transport_srp.c 2011-07-21 22:17:23.000000000 -0400
34080 +++ linux-3.0.7/drivers/scsi/scsi_transport_srp.c 2011-08-23 21:47:56.000000000 -0400
34081 @@ -33,7 +33,7 @@
34082 #include "scsi_transport_srp_internal.h"
34083
34084 struct srp_host_attrs {
34085 - atomic_t next_port_id;
34086 + atomic_unchecked_t next_port_id;
34087 };
34088 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
34089
34090 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
34091 struct Scsi_Host *shost = dev_to_shost(dev);
34092 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
34093
34094 - atomic_set(&srp_host->next_port_id, 0);
34095 + atomic_set_unchecked(&srp_host->next_port_id, 0);
34096 return 0;
34097 }
34098
34099 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
34100 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
34101 rport->roles = ids->roles;
34102
34103 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
34104 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
34105 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
34106
34107 transport_setup_device(&rport->dev);
34108 diff -urNp linux-3.0.7/drivers/scsi/sg.c linux-3.0.7/drivers/scsi/sg.c
34109 --- linux-3.0.7/drivers/scsi/sg.c 2011-07-21 22:17:23.000000000 -0400
34110 +++ linux-3.0.7/drivers/scsi/sg.c 2011-10-06 04:17:55.000000000 -0400
34111 @@ -1075,7 +1075,7 @@ sg_ioctl(struct file *filp, unsigned int
34112 sdp->disk->disk_name,
34113 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
34114 NULL,
34115 - (char *)arg);
34116 + (char __user *)arg);
34117 case BLKTRACESTART:
34118 return blk_trace_startstop(sdp->device->request_queue, 1);
34119 case BLKTRACESTOP:
34120 @@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
34121 const struct file_operations * fops;
34122 };
34123
34124 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
34125 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
34126 {"allow_dio", &adio_fops},
34127 {"debug", &debug_fops},
34128 {"def_reserved_size", &dressz_fops},
34129 @@ -2325,7 +2325,7 @@ sg_proc_init(void)
34130 {
34131 int k, mask;
34132 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
34133 - struct sg_proc_leaf * leaf;
34134 + const struct sg_proc_leaf * leaf;
34135
34136 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
34137 if (!sg_proc_sgp)
34138 diff -urNp linux-3.0.7/drivers/scsi/sym53c8xx_2/sym_glue.c linux-3.0.7/drivers/scsi/sym53c8xx_2/sym_glue.c
34139 --- linux-3.0.7/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-07-21 22:17:23.000000000 -0400
34140 +++ linux-3.0.7/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-08-23 21:48:14.000000000 -0400
34141 @@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
34142 int do_iounmap = 0;
34143 int do_disable_device = 1;
34144
34145 + pax_track_stack();
34146 +
34147 memset(&sym_dev, 0, sizeof(sym_dev));
34148 memset(&nvram, 0, sizeof(nvram));
34149 sym_dev.pdev = pdev;
34150 diff -urNp linux-3.0.7/drivers/scsi/vmw_pvscsi.c linux-3.0.7/drivers/scsi/vmw_pvscsi.c
34151 --- linux-3.0.7/drivers/scsi/vmw_pvscsi.c 2011-07-21 22:17:23.000000000 -0400
34152 +++ linux-3.0.7/drivers/scsi/vmw_pvscsi.c 2011-08-23 21:48:14.000000000 -0400
34153 @@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
34154 dma_addr_t base;
34155 unsigned i;
34156
34157 + pax_track_stack();
34158 +
34159 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
34160 cmd.reqRingNumPages = adapter->req_pages;
34161 cmd.cmpRingNumPages = adapter->cmp_pages;
34162 diff -urNp linux-3.0.7/drivers/spi/dw_spi_pci.c linux-3.0.7/drivers/spi/dw_spi_pci.c
34163 --- linux-3.0.7/drivers/spi/dw_spi_pci.c 2011-07-21 22:17:23.000000000 -0400
34164 +++ linux-3.0.7/drivers/spi/dw_spi_pci.c 2011-10-11 10:44:33.000000000 -0400
34165 @@ -148,7 +148,7 @@ static int spi_resume(struct pci_dev *pd
34166 #define spi_resume NULL
34167 #endif
34168
34169 -static const struct pci_device_id pci_ids[] __devinitdata = {
34170 +static const struct pci_device_id pci_ids[] __devinitconst = {
34171 /* Intel MID platform SPI controller 0 */
34172 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
34173 {},
34174 diff -urNp linux-3.0.7/drivers/spi/spi.c linux-3.0.7/drivers/spi/spi.c
34175 --- linux-3.0.7/drivers/spi/spi.c 2011-07-21 22:17:23.000000000 -0400
34176 +++ linux-3.0.7/drivers/spi/spi.c 2011-08-23 21:47:56.000000000 -0400
34177 @@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
34178 EXPORT_SYMBOL_GPL(spi_bus_unlock);
34179
34180 /* portable code must never pass more than 32 bytes */
34181 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
34182 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
34183
34184 static u8 *buf;
34185
34186 diff -urNp linux-3.0.7/drivers/staging/ath6kl/os/linux/ar6000_drv.c linux-3.0.7/drivers/staging/ath6kl/os/linux/ar6000_drv.c
34187 --- linux-3.0.7/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-09-02 18:11:21.000000000 -0400
34188 +++ linux-3.0.7/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-23 21:48:14.000000000 -0400
34189 @@ -362,7 +362,7 @@ static struct ar_cookie s_ar_cookie_mem[
34190 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
34191
34192
34193 -static struct net_device_ops ar6000_netdev_ops = {
34194 +static net_device_ops_no_const ar6000_netdev_ops = {
34195 .ndo_init = NULL,
34196 .ndo_open = ar6000_open,
34197 .ndo_stop = ar6000_close,
34198 diff -urNp linux-3.0.7/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h linux-3.0.7/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
34199 --- linux-3.0.7/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-07-21 22:17:23.000000000 -0400
34200 +++ linux-3.0.7/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-08-23 21:47:56.000000000 -0400
34201 @@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void
34202 typedef struct ar6k_pal_config_s
34203 {
34204 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
34205 -}ar6k_pal_config_t;
34206 +} __no_const ar6k_pal_config_t;
34207
34208 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
34209 #endif /* _AR6K_PAL_H_ */
34210 diff -urNp linux-3.0.7/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-3.0.7/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
34211 --- linux-3.0.7/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-07-21 22:17:23.000000000 -0400
34212 +++ linux-3.0.7/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-08-23 21:47:56.000000000 -0400
34213 @@ -853,14 +853,14 @@ static void dhd_op_if(dhd_if_t *ifp)
34214 free_netdev(ifp->net);
34215 }
34216 /* Allocate etherdev, including space for private structure */
34217 - ifp->net = alloc_etherdev(sizeof(dhd));
34218 + ifp->net = alloc_etherdev(sizeof(*dhd));
34219 if (!ifp->net) {
34220 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
34221 ret = -ENOMEM;
34222 }
34223 if (ret == 0) {
34224 strcpy(ifp->net->name, ifp->name);
34225 - memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
34226 + memcpy(netdev_priv(ifp->net), dhd, sizeof(*dhd));
34227 err = dhd_net_attach(&dhd->pub, ifp->idx);
34228 if (err != 0) {
34229 DHD_ERROR(("%s: dhd_net_attach failed, "
34230 @@ -1872,7 +1872,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
34231 strcpy(nv_path, nvram_path);
34232
34233 /* Allocate etherdev, including space for private structure */
34234 - net = alloc_etherdev(sizeof(dhd));
34235 + net = alloc_etherdev(sizeof(*dhd));
34236 if (!net) {
34237 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
34238 goto fail;
34239 @@ -1888,7 +1888,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
34240 /*
34241 * Save the dhd_info into the priv
34242 */
34243 - memcpy(netdev_priv(net), &dhd, sizeof(dhd));
34244 + memcpy(netdev_priv(net), dhd, sizeof(*dhd));
34245
34246 /* Set network interface name if it was provided as module parameter */
34247 if (iface_name[0]) {
34248 @@ -2004,7 +2004,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
34249 /*
34250 * Save the dhd_info into the priv
34251 */
34252 - memcpy(netdev_priv(net), &dhd, sizeof(dhd));
34253 + memcpy(netdev_priv(net), dhd, sizeof(*dhd));
34254
34255 #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
34256 g_bus = bus;
34257 diff -urNp linux-3.0.7/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h linux-3.0.7/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h
34258 --- linux-3.0.7/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-07-21 22:17:23.000000000 -0400
34259 +++ linux-3.0.7/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-08-23 21:47:56.000000000 -0400
34260 @@ -593,7 +593,7 @@ struct phy_func_ptr {
34261 initfn_t carrsuppr;
34262 rxsigpwrfn_t rxsigpwr;
34263 detachfn_t detach;
34264 -};
34265 +} __no_const;
34266 typedef struct phy_func_ptr phy_func_ptr_t;
34267
34268 struct phy_info {
34269 diff -urNp linux-3.0.7/drivers/staging/brcm80211/include/bcmsdh.h linux-3.0.7/drivers/staging/brcm80211/include/bcmsdh.h
34270 --- linux-3.0.7/drivers/staging/brcm80211/include/bcmsdh.h 2011-07-21 22:17:23.000000000 -0400
34271 +++ linux-3.0.7/drivers/staging/brcm80211/include/bcmsdh.h 2011-08-23 21:47:56.000000000 -0400
34272 @@ -185,7 +185,7 @@ typedef struct {
34273 u16 func, uint bustype, void *regsva, void *param);
34274 /* detach from device */
34275 void (*detach) (void *ch);
34276 -} bcmsdh_driver_t;
34277 +} __no_const bcmsdh_driver_t;
34278
34279 /* platform specific/high level functions */
34280 extern int bcmsdh_register(bcmsdh_driver_t *driver);
34281 diff -urNp linux-3.0.7/drivers/staging/et131x/et1310_tx.c linux-3.0.7/drivers/staging/et131x/et1310_tx.c
34282 --- linux-3.0.7/drivers/staging/et131x/et1310_tx.c 2011-07-21 22:17:23.000000000 -0400
34283 +++ linux-3.0.7/drivers/staging/et131x/et1310_tx.c 2011-08-23 21:47:56.000000000 -0400
34284 @@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
34285 struct net_device_stats *stats = &etdev->net_stats;
34286
34287 if (tcb->flags & fMP_DEST_BROAD)
34288 - atomic_inc(&etdev->Stats.brdcstxmt);
34289 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
34290 else if (tcb->flags & fMP_DEST_MULTI)
34291 - atomic_inc(&etdev->Stats.multixmt);
34292 + atomic_inc_unchecked(&etdev->Stats.multixmt);
34293 else
34294 - atomic_inc(&etdev->Stats.unixmt);
34295 + atomic_inc_unchecked(&etdev->Stats.unixmt);
34296
34297 if (tcb->skb) {
34298 stats->tx_bytes += tcb->skb->len;
34299 diff -urNp linux-3.0.7/drivers/staging/et131x/et131x_adapter.h linux-3.0.7/drivers/staging/et131x/et131x_adapter.h
34300 --- linux-3.0.7/drivers/staging/et131x/et131x_adapter.h 2011-07-21 22:17:23.000000000 -0400
34301 +++ linux-3.0.7/drivers/staging/et131x/et131x_adapter.h 2011-08-23 21:47:56.000000000 -0400
34302 @@ -110,11 +110,11 @@ typedef struct _ce_stats_t {
34303 * operations
34304 */
34305 u32 unircv; /* # multicast packets received */
34306 - atomic_t unixmt; /* # multicast packets for Tx */
34307 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
34308 u32 multircv; /* # multicast packets received */
34309 - atomic_t multixmt; /* # multicast packets for Tx */
34310 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
34311 u32 brdcstrcv; /* # broadcast packets received */
34312 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
34313 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
34314 u32 norcvbuf; /* # Rx packets discarded */
34315 u32 noxmtbuf; /* # Tx packets discarded */
34316
34317 diff -urNp linux-3.0.7/drivers/staging/hv/channel.c linux-3.0.7/drivers/staging/hv/channel.c
34318 --- linux-3.0.7/drivers/staging/hv/channel.c 2011-09-02 18:11:21.000000000 -0400
34319 +++ linux-3.0.7/drivers/staging/hv/channel.c 2011-08-23 21:47:56.000000000 -0400
34320 @@ -433,8 +433,8 @@ int vmbus_establish_gpadl(struct vmbus_c
34321 int ret = 0;
34322 int t;
34323
34324 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
34325 - atomic_inc(&vmbus_connection.next_gpadl_handle);
34326 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
34327 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
34328
34329 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
34330 if (ret)
34331 diff -urNp linux-3.0.7/drivers/staging/hv/hv.c linux-3.0.7/drivers/staging/hv/hv.c
34332 --- linux-3.0.7/drivers/staging/hv/hv.c 2011-07-21 22:17:23.000000000 -0400
34333 +++ linux-3.0.7/drivers/staging/hv/hv.c 2011-08-23 21:47:56.000000000 -0400
34334 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, voi
34335 u64 output_address = (output) ? virt_to_phys(output) : 0;
34336 u32 output_address_hi = output_address >> 32;
34337 u32 output_address_lo = output_address & 0xFFFFFFFF;
34338 - volatile void *hypercall_page = hv_context.hypercall_page;
34339 + volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
34340
34341 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
34342 "=a"(hv_status_lo) : "d" (control_hi),
34343 diff -urNp linux-3.0.7/drivers/staging/hv/hv_mouse.c linux-3.0.7/drivers/staging/hv/hv_mouse.c
34344 --- linux-3.0.7/drivers/staging/hv/hv_mouse.c 2011-07-21 22:17:23.000000000 -0400
34345 +++ linux-3.0.7/drivers/staging/hv/hv_mouse.c 2011-08-23 21:47:56.000000000 -0400
34346 @@ -879,8 +879,10 @@ static void reportdesc_callback(struct h
34347 if (hid_dev) {
34348 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
34349
34350 - hid_dev->ll_driver->open = mousevsc_hid_open;
34351 - hid_dev->ll_driver->close = mousevsc_hid_close;
34352 + pax_open_kernel();
34353 + *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
34354 + *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
34355 + pax_close_kernel();
34356
34357 hid_dev->bus = BUS_VIRTUAL;
34358 hid_dev->vendor = input_device_ctx->device_info.vendor;
34359 diff -urNp linux-3.0.7/drivers/staging/hv/hyperv_vmbus.h linux-3.0.7/drivers/staging/hv/hyperv_vmbus.h
34360 --- linux-3.0.7/drivers/staging/hv/hyperv_vmbus.h 2011-07-21 22:17:23.000000000 -0400
34361 +++ linux-3.0.7/drivers/staging/hv/hyperv_vmbus.h 2011-08-23 21:47:56.000000000 -0400
34362 @@ -559,7 +559,7 @@ enum vmbus_connect_state {
34363 struct vmbus_connection {
34364 enum vmbus_connect_state conn_state;
34365
34366 - atomic_t next_gpadl_handle;
34367 + atomic_unchecked_t next_gpadl_handle;
34368
34369 /*
34370 * Represents channel interrupts. Each bit position represents a
34371 diff -urNp linux-3.0.7/drivers/staging/hv/rndis_filter.c linux-3.0.7/drivers/staging/hv/rndis_filter.c
34372 --- linux-3.0.7/drivers/staging/hv/rndis_filter.c 2011-09-02 18:11:21.000000000 -0400
34373 +++ linux-3.0.7/drivers/staging/hv/rndis_filter.c 2011-08-23 21:47:56.000000000 -0400
34374 @@ -43,7 +43,7 @@ struct rndis_device {
34375
34376 enum rndis_device_state state;
34377 u32 link_stat;
34378 - atomic_t new_req_id;
34379 + atomic_unchecked_t new_req_id;
34380
34381 spinlock_t request_lock;
34382 struct list_head req_list;
34383 @@ -117,7 +117,7 @@ static struct rndis_request *get_rndis_r
34384 * template
34385 */
34386 set = &rndis_msg->msg.set_req;
34387 - set->req_id = atomic_inc_return(&dev->new_req_id);
34388 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34389
34390 /* Add to the request list */
34391 spin_lock_irqsave(&dev->request_lock, flags);
34392 @@ -637,7 +637,7 @@ static void rndis_filter_halt_device(str
34393
34394 /* Setup the rndis set */
34395 halt = &request->request_msg.msg.halt_req;
34396 - halt->req_id = atomic_inc_return(&dev->new_req_id);
34397 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34398
34399 /* Ignore return since this msg is optional. */
34400 rndis_filter_send_request(dev, request);
34401 diff -urNp linux-3.0.7/drivers/staging/hv/vmbus_drv.c linux-3.0.7/drivers/staging/hv/vmbus_drv.c
34402 --- linux-3.0.7/drivers/staging/hv/vmbus_drv.c 2011-07-21 22:17:23.000000000 -0400
34403 +++ linux-3.0.7/drivers/staging/hv/vmbus_drv.c 2011-08-23 21:47:56.000000000 -0400
34404 @@ -668,11 +668,11 @@ int vmbus_child_device_register(struct h
34405 {
34406 int ret = 0;
34407
34408 - static atomic_t device_num = ATOMIC_INIT(0);
34409 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
34410
34411 /* Set the device name. Otherwise, device_register() will fail. */
34412 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
34413 - atomic_inc_return(&device_num));
34414 + atomic_inc_return_unchecked(&device_num));
34415
34416 /* The new device belongs to this bus */
34417 child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
34418 diff -urNp linux-3.0.7/drivers/staging/iio/ring_generic.h linux-3.0.7/drivers/staging/iio/ring_generic.h
34419 --- linux-3.0.7/drivers/staging/iio/ring_generic.h 2011-07-21 22:17:23.000000000 -0400
34420 +++ linux-3.0.7/drivers/staging/iio/ring_generic.h 2011-08-23 21:47:56.000000000 -0400
34421 @@ -62,7 +62,7 @@ struct iio_ring_access_funcs {
34422
34423 int (*is_enabled)(struct iio_ring_buffer *ring);
34424 int (*enable)(struct iio_ring_buffer *ring);
34425 -};
34426 +} __no_const;
34427
34428 struct iio_ring_setup_ops {
34429 int (*preenable)(struct iio_dev *);
34430 diff -urNp linux-3.0.7/drivers/staging/octeon/ethernet.c linux-3.0.7/drivers/staging/octeon/ethernet.c
34431 --- linux-3.0.7/drivers/staging/octeon/ethernet.c 2011-07-21 22:17:23.000000000 -0400
34432 +++ linux-3.0.7/drivers/staging/octeon/ethernet.c 2011-08-23 21:47:56.000000000 -0400
34433 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
34434 * since the RX tasklet also increments it.
34435 */
34436 #ifdef CONFIG_64BIT
34437 - atomic64_add(rx_status.dropped_packets,
34438 - (atomic64_t *)&priv->stats.rx_dropped);
34439 + atomic64_add_unchecked(rx_status.dropped_packets,
34440 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
34441 #else
34442 - atomic_add(rx_status.dropped_packets,
34443 - (atomic_t *)&priv->stats.rx_dropped);
34444 + atomic_add_unchecked(rx_status.dropped_packets,
34445 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
34446 #endif
34447 }
34448
34449 diff -urNp linux-3.0.7/drivers/staging/octeon/ethernet-rx.c linux-3.0.7/drivers/staging/octeon/ethernet-rx.c
34450 --- linux-3.0.7/drivers/staging/octeon/ethernet-rx.c 2011-07-21 22:17:23.000000000 -0400
34451 +++ linux-3.0.7/drivers/staging/octeon/ethernet-rx.c 2011-08-23 21:47:56.000000000 -0400
34452 @@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi
34453 /* Increment RX stats for virtual ports */
34454 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
34455 #ifdef CONFIG_64BIT
34456 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
34457 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
34458 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
34459 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
34460 #else
34461 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
34462 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
34463 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
34464 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
34465 #endif
34466 }
34467 netif_receive_skb(skb);
34468 @@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi
34469 dev->name);
34470 */
34471 #ifdef CONFIG_64BIT
34472 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
34473 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
34474 #else
34475 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
34476 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
34477 #endif
34478 dev_kfree_skb_irq(skb);
34479 }
34480 diff -urNp linux-3.0.7/drivers/staging/pohmelfs/inode.c linux-3.0.7/drivers/staging/pohmelfs/inode.c
34481 --- linux-3.0.7/drivers/staging/pohmelfs/inode.c 2011-07-21 22:17:23.000000000 -0400
34482 +++ linux-3.0.7/drivers/staging/pohmelfs/inode.c 2011-08-23 21:47:56.000000000 -0400
34483 @@ -1856,7 +1856,7 @@ static int pohmelfs_fill_super(struct su
34484 mutex_init(&psb->mcache_lock);
34485 psb->mcache_root = RB_ROOT;
34486 psb->mcache_timeout = msecs_to_jiffies(5000);
34487 - atomic_long_set(&psb->mcache_gen, 0);
34488 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
34489
34490 psb->trans_max_pages = 100;
34491
34492 @@ -1871,7 +1871,7 @@ static int pohmelfs_fill_super(struct su
34493 INIT_LIST_HEAD(&psb->crypto_ready_list);
34494 INIT_LIST_HEAD(&psb->crypto_active_list);
34495
34496 - atomic_set(&psb->trans_gen, 1);
34497 + atomic_set_unchecked(&psb->trans_gen, 1);
34498 atomic_long_set(&psb->total_inodes, 0);
34499
34500 mutex_init(&psb->state_lock);
34501 diff -urNp linux-3.0.7/drivers/staging/pohmelfs/mcache.c linux-3.0.7/drivers/staging/pohmelfs/mcache.c
34502 --- linux-3.0.7/drivers/staging/pohmelfs/mcache.c 2011-07-21 22:17:23.000000000 -0400
34503 +++ linux-3.0.7/drivers/staging/pohmelfs/mcache.c 2011-08-23 21:47:56.000000000 -0400
34504 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
34505 m->data = data;
34506 m->start = start;
34507 m->size = size;
34508 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
34509 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
34510
34511 mutex_lock(&psb->mcache_lock);
34512 err = pohmelfs_mcache_insert(psb, m);
34513 diff -urNp linux-3.0.7/drivers/staging/pohmelfs/netfs.h linux-3.0.7/drivers/staging/pohmelfs/netfs.h
34514 --- linux-3.0.7/drivers/staging/pohmelfs/netfs.h 2011-07-21 22:17:23.000000000 -0400
34515 +++ linux-3.0.7/drivers/staging/pohmelfs/netfs.h 2011-08-23 21:47:56.000000000 -0400
34516 @@ -571,14 +571,14 @@ struct pohmelfs_config;
34517 struct pohmelfs_sb {
34518 struct rb_root mcache_root;
34519 struct mutex mcache_lock;
34520 - atomic_long_t mcache_gen;
34521 + atomic_long_unchecked_t mcache_gen;
34522 unsigned long mcache_timeout;
34523
34524 unsigned int idx;
34525
34526 unsigned int trans_retries;
34527
34528 - atomic_t trans_gen;
34529 + atomic_unchecked_t trans_gen;
34530
34531 unsigned int crypto_attached_size;
34532 unsigned int crypto_align_size;
34533 diff -urNp linux-3.0.7/drivers/staging/pohmelfs/trans.c linux-3.0.7/drivers/staging/pohmelfs/trans.c
34534 --- linux-3.0.7/drivers/staging/pohmelfs/trans.c 2011-07-21 22:17:23.000000000 -0400
34535 +++ linux-3.0.7/drivers/staging/pohmelfs/trans.c 2011-08-23 21:47:56.000000000 -0400
34536 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
34537 int err;
34538 struct netfs_cmd *cmd = t->iovec.iov_base;
34539
34540 - t->gen = atomic_inc_return(&psb->trans_gen);
34541 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
34542
34543 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
34544 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
34545 diff -urNp linux-3.0.7/drivers/staging/rtl8712/rtl871x_io.h linux-3.0.7/drivers/staging/rtl8712/rtl871x_io.h
34546 --- linux-3.0.7/drivers/staging/rtl8712/rtl871x_io.h 2011-07-21 22:17:23.000000000 -0400
34547 +++ linux-3.0.7/drivers/staging/rtl8712/rtl871x_io.h 2011-08-23 21:47:56.000000000 -0400
34548 @@ -83,7 +83,7 @@ struct _io_ops {
34549 u8 *pmem);
34550 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
34551 u8 *pmem);
34552 -};
34553 +} __no_const;
34554
34555 struct io_req {
34556 struct list_head list;
34557 diff -urNp linux-3.0.7/drivers/staging/sbe-2t3e3/netdev.c linux-3.0.7/drivers/staging/sbe-2t3e3/netdev.c
34558 --- linux-3.0.7/drivers/staging/sbe-2t3e3/netdev.c 2011-07-21 22:17:23.000000000 -0400
34559 +++ linux-3.0.7/drivers/staging/sbe-2t3e3/netdev.c 2011-08-24 18:21:41.000000000 -0400
34560 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, s
34561 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
34562
34563 if (rlen)
34564 - if (copy_to_user(data, &resp, rlen))
34565 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
34566 return -EFAULT;
34567
34568 return 0;
34569 diff -urNp linux-3.0.7/drivers/staging/tty/stallion.c linux-3.0.7/drivers/staging/tty/stallion.c
34570 --- linux-3.0.7/drivers/staging/tty/stallion.c 2011-07-21 22:17:23.000000000 -0400
34571 +++ linux-3.0.7/drivers/staging/tty/stallion.c 2011-08-23 21:48:14.000000000 -0400
34572 @@ -2406,6 +2406,8 @@ static int stl_getportstruct(struct stlp
34573 struct stlport stl_dummyport;
34574 struct stlport *portp;
34575
34576 + pax_track_stack();
34577 +
34578 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
34579 return -EFAULT;
34580 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
34581 diff -urNp linux-3.0.7/drivers/staging/usbip/usbip_common.h linux-3.0.7/drivers/staging/usbip/usbip_common.h
34582 --- linux-3.0.7/drivers/staging/usbip/usbip_common.h 2011-07-21 22:17:23.000000000 -0400
34583 +++ linux-3.0.7/drivers/staging/usbip/usbip_common.h 2011-08-23 21:47:56.000000000 -0400
34584 @@ -315,7 +315,7 @@ struct usbip_device {
34585 void (*shutdown)(struct usbip_device *);
34586 void (*reset)(struct usbip_device *);
34587 void (*unusable)(struct usbip_device *);
34588 - } eh_ops;
34589 + } __no_const eh_ops;
34590 };
34591
34592 void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd,
34593 diff -urNp linux-3.0.7/drivers/staging/usbip/vhci.h linux-3.0.7/drivers/staging/usbip/vhci.h
34594 --- linux-3.0.7/drivers/staging/usbip/vhci.h 2011-07-21 22:17:23.000000000 -0400
34595 +++ linux-3.0.7/drivers/staging/usbip/vhci.h 2011-08-23 21:47:56.000000000 -0400
34596 @@ -94,7 +94,7 @@ struct vhci_hcd {
34597 unsigned resuming:1;
34598 unsigned long re_timeout;
34599
34600 - atomic_t seqnum;
34601 + atomic_unchecked_t seqnum;
34602
34603 /*
34604 * NOTE:
34605 diff -urNp linux-3.0.7/drivers/staging/usbip/vhci_hcd.c linux-3.0.7/drivers/staging/usbip/vhci_hcd.c
34606 --- linux-3.0.7/drivers/staging/usbip/vhci_hcd.c 2011-09-02 18:11:21.000000000 -0400
34607 +++ linux-3.0.7/drivers/staging/usbip/vhci_hcd.c 2011-08-23 21:47:56.000000000 -0400
34608 @@ -511,7 +511,7 @@ static void vhci_tx_urb(struct urb *urb)
34609 return;
34610 }
34611
34612 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
34613 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
34614 if (priv->seqnum == 0xffff)
34615 dev_info(&urb->dev->dev, "seqnum max\n");
34616
34617 @@ -765,7 +765,7 @@ static int vhci_urb_dequeue(struct usb_h
34618 return -ENOMEM;
34619 }
34620
34621 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
34622 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
34623 if (unlink->seqnum == 0xffff)
34624 pr_info("seqnum max\n");
34625
34626 @@ -955,7 +955,7 @@ static int vhci_start(struct usb_hcd *hc
34627 vdev->rhport = rhport;
34628 }
34629
34630 - atomic_set(&vhci->seqnum, 0);
34631 + atomic_set_unchecked(&vhci->seqnum, 0);
34632 spin_lock_init(&vhci->lock);
34633
34634 hcd->power_budget = 0; /* no limit */
34635 diff -urNp linux-3.0.7/drivers/staging/usbip/vhci_rx.c linux-3.0.7/drivers/staging/usbip/vhci_rx.c
34636 --- linux-3.0.7/drivers/staging/usbip/vhci_rx.c 2011-07-21 22:17:23.000000000 -0400
34637 +++ linux-3.0.7/drivers/staging/usbip/vhci_rx.c 2011-08-23 21:47:56.000000000 -0400
34638 @@ -76,7 +76,7 @@ static void vhci_recv_ret_submit(struct
34639 if (!urb) {
34640 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
34641 pr_info("max seqnum %d\n",
34642 - atomic_read(&the_controller->seqnum));
34643 + atomic_read_unchecked(&the_controller->seqnum));
34644 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
34645 return;
34646 }
34647 diff -urNp linux-3.0.7/drivers/staging/vt6655/hostap.c linux-3.0.7/drivers/staging/vt6655/hostap.c
34648 --- linux-3.0.7/drivers/staging/vt6655/hostap.c 2011-07-21 22:17:23.000000000 -0400
34649 +++ linux-3.0.7/drivers/staging/vt6655/hostap.c 2011-08-23 21:47:56.000000000 -0400
34650 @@ -79,14 +79,13 @@ static int msglevel
34651 *
34652 */
34653
34654 +static net_device_ops_no_const apdev_netdev_ops;
34655 +
34656 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
34657 {
34658 PSDevice apdev_priv;
34659 struct net_device *dev = pDevice->dev;
34660 int ret;
34661 - const struct net_device_ops apdev_netdev_ops = {
34662 - .ndo_start_xmit = pDevice->tx_80211,
34663 - };
34664
34665 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
34666
34667 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevic
34668 *apdev_priv = *pDevice;
34669 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
34670
34671 + /* only half broken now */
34672 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
34673 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
34674
34675 pDevice->apdev->type = ARPHRD_IEEE80211;
34676 diff -urNp linux-3.0.7/drivers/staging/vt6656/hostap.c linux-3.0.7/drivers/staging/vt6656/hostap.c
34677 --- linux-3.0.7/drivers/staging/vt6656/hostap.c 2011-07-21 22:17:23.000000000 -0400
34678 +++ linux-3.0.7/drivers/staging/vt6656/hostap.c 2011-08-23 21:47:56.000000000 -0400
34679 @@ -80,14 +80,13 @@ static int msglevel
34680 *
34681 */
34682
34683 +static net_device_ops_no_const apdev_netdev_ops;
34684 +
34685 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
34686 {
34687 PSDevice apdev_priv;
34688 struct net_device *dev = pDevice->dev;
34689 int ret;
34690 - const struct net_device_ops apdev_netdev_ops = {
34691 - .ndo_start_xmit = pDevice->tx_80211,
34692 - };
34693
34694 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
34695
34696 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevic
34697 *apdev_priv = *pDevice;
34698 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
34699
34700 + /* only half broken now */
34701 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
34702 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
34703
34704 pDevice->apdev->type = ARPHRD_IEEE80211;
34705 diff -urNp linux-3.0.7/drivers/staging/wlan-ng/hfa384x_usb.c linux-3.0.7/drivers/staging/wlan-ng/hfa384x_usb.c
34706 --- linux-3.0.7/drivers/staging/wlan-ng/hfa384x_usb.c 2011-07-21 22:17:23.000000000 -0400
34707 +++ linux-3.0.7/drivers/staging/wlan-ng/hfa384x_usb.c 2011-08-23 21:47:56.000000000 -0400
34708 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
34709
34710 struct usbctlx_completor {
34711 int (*complete) (struct usbctlx_completor *);
34712 -};
34713 +} __no_const;
34714
34715 static int
34716 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
34717 diff -urNp linux-3.0.7/drivers/staging/zcache/tmem.c linux-3.0.7/drivers/staging/zcache/tmem.c
34718 --- linux-3.0.7/drivers/staging/zcache/tmem.c 2011-07-21 22:17:23.000000000 -0400
34719 +++ linux-3.0.7/drivers/staging/zcache/tmem.c 2011-08-23 21:47:56.000000000 -0400
34720 @@ -39,7 +39,7 @@
34721 * A tmem host implementation must use this function to register callbacks
34722 * for memory allocation.
34723 */
34724 -static struct tmem_hostops tmem_hostops;
34725 +static tmem_hostops_no_const tmem_hostops;
34726
34727 static void tmem_objnode_tree_init(void);
34728
34729 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_h
34730 * A tmem host implementation must use this function to register
34731 * callbacks for a page-accessible memory (PAM) implementation
34732 */
34733 -static struct tmem_pamops tmem_pamops;
34734 +static tmem_pamops_no_const tmem_pamops;
34735
34736 void tmem_register_pamops(struct tmem_pamops *m)
34737 {
34738 diff -urNp linux-3.0.7/drivers/staging/zcache/tmem.h linux-3.0.7/drivers/staging/zcache/tmem.h
34739 --- linux-3.0.7/drivers/staging/zcache/tmem.h 2011-07-21 22:17:23.000000000 -0400
34740 +++ linux-3.0.7/drivers/staging/zcache/tmem.h 2011-08-23 21:47:56.000000000 -0400
34741 @@ -171,6 +171,7 @@ struct tmem_pamops {
34742 int (*get_data)(struct page *, void *, struct tmem_pool *);
34743 void (*free)(void *, struct tmem_pool *);
34744 };
34745 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
34746 extern void tmem_register_pamops(struct tmem_pamops *m);
34747
34748 /* memory allocation methods provided by the host implementation */
34749 @@ -180,6 +181,7 @@ struct tmem_hostops {
34750 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
34751 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
34752 };
34753 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
34754 extern void tmem_register_hostops(struct tmem_hostops *m);
34755
34756 /* core tmem accessor functions */
34757 diff -urNp linux-3.0.7/drivers/target/target_core_alua.c linux-3.0.7/drivers/target/target_core_alua.c
34758 --- linux-3.0.7/drivers/target/target_core_alua.c 2011-07-21 22:17:23.000000000 -0400
34759 +++ linux-3.0.7/drivers/target/target_core_alua.c 2011-08-23 21:48:14.000000000 -0400
34760 @@ -675,6 +675,8 @@ static int core_alua_update_tpg_primary_
34761 char path[ALUA_METADATA_PATH_LEN];
34762 int len;
34763
34764 + pax_track_stack();
34765 +
34766 memset(path, 0, ALUA_METADATA_PATH_LEN);
34767
34768 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
34769 @@ -938,6 +940,8 @@ static int core_alua_update_tpg_secondar
34770 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
34771 int len;
34772
34773 + pax_track_stack();
34774 +
34775 memset(path, 0, ALUA_METADATA_PATH_LEN);
34776 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
34777
34778 diff -urNp linux-3.0.7/drivers/target/target_core_cdb.c linux-3.0.7/drivers/target/target_core_cdb.c
34779 --- linux-3.0.7/drivers/target/target_core_cdb.c 2011-07-21 22:17:23.000000000 -0400
34780 +++ linux-3.0.7/drivers/target/target_core_cdb.c 2011-08-23 21:48:14.000000000 -0400
34781 @@ -838,6 +838,8 @@ target_emulate_modesense(struct se_cmd *
34782 int length = 0;
34783 unsigned char buf[SE_MODE_PAGE_BUF];
34784
34785 + pax_track_stack();
34786 +
34787 memset(buf, 0, SE_MODE_PAGE_BUF);
34788
34789 switch (cdb[2] & 0x3f) {
34790 diff -urNp linux-3.0.7/drivers/target/target_core_configfs.c linux-3.0.7/drivers/target/target_core_configfs.c
34791 --- linux-3.0.7/drivers/target/target_core_configfs.c 2011-07-21 22:17:23.000000000 -0400
34792 +++ linux-3.0.7/drivers/target/target_core_configfs.c 2011-08-23 21:48:14.000000000 -0400
34793 @@ -1276,6 +1276,8 @@ static ssize_t target_core_dev_pr_show_a
34794 ssize_t len = 0;
34795 int reg_count = 0, prf_isid;
34796
34797 + pax_track_stack();
34798 +
34799 if (!(su_dev->se_dev_ptr))
34800 return -ENODEV;
34801
34802 diff -urNp linux-3.0.7/drivers/target/target_core_pr.c linux-3.0.7/drivers/target/target_core_pr.c
34803 --- linux-3.0.7/drivers/target/target_core_pr.c 2011-07-21 22:17:23.000000000 -0400
34804 +++ linux-3.0.7/drivers/target/target_core_pr.c 2011-08-23 21:48:14.000000000 -0400
34805 @@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
34806 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
34807 u16 tpgt;
34808
34809 + pax_track_stack();
34810 +
34811 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
34812 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
34813 /*
34814 @@ -1861,6 +1863,8 @@ static int __core_scsi3_update_aptpl_buf
34815 ssize_t len = 0;
34816 int reg_count = 0;
34817
34818 + pax_track_stack();
34819 +
34820 memset(buf, 0, pr_aptpl_buf_len);
34821 /*
34822 * Called to clear metadata once APTPL has been deactivated.
34823 @@ -1983,6 +1987,8 @@ static int __core_scsi3_write_aptpl_to_f
34824 char path[512];
34825 int ret;
34826
34827 + pax_track_stack();
34828 +
34829 memset(iov, 0, sizeof(struct iovec));
34830 memset(path, 0, 512);
34831
34832 diff -urNp linux-3.0.7/drivers/target/target_core_tmr.c linux-3.0.7/drivers/target/target_core_tmr.c
34833 --- linux-3.0.7/drivers/target/target_core_tmr.c 2011-07-21 22:17:23.000000000 -0400
34834 +++ linux-3.0.7/drivers/target/target_core_tmr.c 2011-08-23 21:47:56.000000000 -0400
34835 @@ -269,7 +269,7 @@ int core_tmr_lun_reset(
34836 CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
34837 T_TASK(cmd)->t_task_cdbs,
34838 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
34839 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
34840 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
34841 atomic_read(&T_TASK(cmd)->t_transport_active),
34842 atomic_read(&T_TASK(cmd)->t_transport_stop),
34843 atomic_read(&T_TASK(cmd)->t_transport_sent));
34844 @@ -311,7 +311,7 @@ int core_tmr_lun_reset(
34845 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
34846 " task: %p, t_fe_count: %d dev: %p\n", task,
34847 fe_count, dev);
34848 - atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
34849 + atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
34850 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
34851 flags);
34852 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
34853 @@ -321,7 +321,7 @@ int core_tmr_lun_reset(
34854 }
34855 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
34856 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
34857 - atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
34858 + atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
34859 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
34860 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
34861
34862 diff -urNp linux-3.0.7/drivers/target/target_core_transport.c linux-3.0.7/drivers/target/target_core_transport.c
34863 --- linux-3.0.7/drivers/target/target_core_transport.c 2011-07-21 22:17:23.000000000 -0400
34864 +++ linux-3.0.7/drivers/target/target_core_transport.c 2011-08-23 21:47:56.000000000 -0400
34865 @@ -1681,7 +1681,7 @@ struct se_device *transport_add_device_t
34866
34867 dev->queue_depth = dev_limits->queue_depth;
34868 atomic_set(&dev->depth_left, dev->queue_depth);
34869 - atomic_set(&dev->dev_ordered_id, 0);
34870 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
34871
34872 se_dev_set_default_attribs(dev, dev_limits);
34873
34874 @@ -1882,7 +1882,7 @@ static int transport_check_alloc_task_at
34875 * Used to determine when ORDERED commands should go from
34876 * Dormant to Active status.
34877 */
34878 - cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
34879 + cmd->se_ordered_id = atomic_inc_return_unchecked(&SE_DEV(cmd)->dev_ordered_id);
34880 smp_mb__after_atomic_inc();
34881 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
34882 cmd->se_ordered_id, cmd->sam_task_attr,
34883 @@ -2169,7 +2169,7 @@ static void transport_generic_request_fa
34884 " t_transport_active: %d t_transport_stop: %d"
34885 " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
34886 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
34887 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
34888 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
34889 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
34890 atomic_read(&T_TASK(cmd)->t_transport_active),
34891 atomic_read(&T_TASK(cmd)->t_transport_stop),
34892 @@ -2673,9 +2673,9 @@ check_depth:
34893 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
34894 atomic_set(&task->task_active, 1);
34895 atomic_set(&task->task_sent, 1);
34896 - atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
34897 + atomic_inc_unchecked(&T_TASK(cmd)->t_task_cdbs_sent);
34898
34899 - if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
34900 + if (atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent) ==
34901 T_TASK(cmd)->t_task_cdbs)
34902 atomic_set(&cmd->transport_sent, 1);
34903
34904 @@ -5568,7 +5568,7 @@ static void transport_generic_wait_for_t
34905 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
34906 }
34907 if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
34908 - atomic_read(&T_TASK(cmd)->t_transport_aborted))
34909 + atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted))
34910 goto remove;
34911
34912 atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
34913 @@ -5797,7 +5797,7 @@ int transport_check_aborted_status(struc
34914 {
34915 int ret = 0;
34916
34917 - if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
34918 + if (atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted) != 0) {
34919 if (!(send_status) ||
34920 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
34921 return 1;
34922 @@ -5825,7 +5825,7 @@ void transport_send_task_abort(struct se
34923 */
34924 if (cmd->data_direction == DMA_TO_DEVICE) {
34925 if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
34926 - atomic_inc(&T_TASK(cmd)->t_transport_aborted);
34927 + atomic_inc_unchecked(&T_TASK(cmd)->t_transport_aborted);
34928 smp_mb__after_atomic_inc();
34929 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
34930 transport_new_cmd_failure(cmd);
34931 @@ -5949,7 +5949,7 @@ static void transport_processing_shutdow
34932 CMD_TFO(cmd)->get_task_tag(cmd),
34933 T_TASK(cmd)->t_task_cdbs,
34934 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
34935 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
34936 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
34937 atomic_read(&T_TASK(cmd)->t_transport_active),
34938 atomic_read(&T_TASK(cmd)->t_transport_stop),
34939 atomic_read(&T_TASK(cmd)->t_transport_sent));
34940 diff -urNp linux-3.0.7/drivers/telephony/ixj.c linux-3.0.7/drivers/telephony/ixj.c
34941 --- linux-3.0.7/drivers/telephony/ixj.c 2011-07-21 22:17:23.000000000 -0400
34942 +++ linux-3.0.7/drivers/telephony/ixj.c 2011-08-23 21:48:14.000000000 -0400
34943 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
34944 bool mContinue;
34945 char *pIn, *pOut;
34946
34947 + pax_track_stack();
34948 +
34949 if (!SCI_Prepare(j))
34950 return 0;
34951
34952 diff -urNp linux-3.0.7/drivers/tty/hvc/hvcs.c linux-3.0.7/drivers/tty/hvc/hvcs.c
34953 --- linux-3.0.7/drivers/tty/hvc/hvcs.c 2011-07-21 22:17:23.000000000 -0400
34954 +++ linux-3.0.7/drivers/tty/hvc/hvcs.c 2011-08-23 21:47:56.000000000 -0400
34955 @@ -83,6 +83,7 @@
34956 #include <asm/hvcserver.h>
34957 #include <asm/uaccess.h>
34958 #include <asm/vio.h>
34959 +#include <asm/local.h>
34960
34961 /*
34962 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
34963 @@ -270,7 +271,7 @@ struct hvcs_struct {
34964 unsigned int index;
34965
34966 struct tty_struct *tty;
34967 - int open_count;
34968 + local_t open_count;
34969
34970 /*
34971 * Used to tell the driver kernel_thread what operations need to take
34972 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
34973
34974 spin_lock_irqsave(&hvcsd->lock, flags);
34975
34976 - if (hvcsd->open_count > 0) {
34977 + if (local_read(&hvcsd->open_count) > 0) {
34978 spin_unlock_irqrestore(&hvcsd->lock, flags);
34979 printk(KERN_INFO "HVCS: vterm state unchanged. "
34980 "The hvcs device node is still in use.\n");
34981 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
34982 if ((retval = hvcs_partner_connect(hvcsd)))
34983 goto error_release;
34984
34985 - hvcsd->open_count = 1;
34986 + local_set(&hvcsd->open_count, 1);
34987 hvcsd->tty = tty;
34988 tty->driver_data = hvcsd;
34989
34990 @@ -1179,7 +1180,7 @@ fast_open:
34991
34992 spin_lock_irqsave(&hvcsd->lock, flags);
34993 kref_get(&hvcsd->kref);
34994 - hvcsd->open_count++;
34995 + local_inc(&hvcsd->open_count);
34996 hvcsd->todo_mask |= HVCS_SCHED_READ;
34997 spin_unlock_irqrestore(&hvcsd->lock, flags);
34998
34999 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
35000 hvcsd = tty->driver_data;
35001
35002 spin_lock_irqsave(&hvcsd->lock, flags);
35003 - if (--hvcsd->open_count == 0) {
35004 + if (local_dec_and_test(&hvcsd->open_count)) {
35005
35006 vio_disable_interrupts(hvcsd->vdev);
35007
35008 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
35009 free_irq(irq, hvcsd);
35010 kref_put(&hvcsd->kref, destroy_hvcs_struct);
35011 return;
35012 - } else if (hvcsd->open_count < 0) {
35013 + } else if (local_read(&hvcsd->open_count) < 0) {
35014 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
35015 " is missmanaged.\n",
35016 - hvcsd->vdev->unit_address, hvcsd->open_count);
35017 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
35018 }
35019
35020 spin_unlock_irqrestore(&hvcsd->lock, flags);
35021 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
35022
35023 spin_lock_irqsave(&hvcsd->lock, flags);
35024 /* Preserve this so that we know how many kref refs to put */
35025 - temp_open_count = hvcsd->open_count;
35026 + temp_open_count = local_read(&hvcsd->open_count);
35027
35028 /*
35029 * Don't kref put inside the spinlock because the destruction
35030 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
35031 hvcsd->tty->driver_data = NULL;
35032 hvcsd->tty = NULL;
35033
35034 - hvcsd->open_count = 0;
35035 + local_set(&hvcsd->open_count, 0);
35036
35037 /* This will drop any buffered data on the floor which is OK in a hangup
35038 * scenario. */
35039 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
35040 * the middle of a write operation? This is a crummy place to do this
35041 * but we want to keep it all in the spinlock.
35042 */
35043 - if (hvcsd->open_count <= 0) {
35044 + if (local_read(&hvcsd->open_count) <= 0) {
35045 spin_unlock_irqrestore(&hvcsd->lock, flags);
35046 return -ENODEV;
35047 }
35048 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
35049 {
35050 struct hvcs_struct *hvcsd = tty->driver_data;
35051
35052 - if (!hvcsd || hvcsd->open_count <= 0)
35053 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
35054 return 0;
35055
35056 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
35057 diff -urNp linux-3.0.7/drivers/tty/ipwireless/tty.c linux-3.0.7/drivers/tty/ipwireless/tty.c
35058 --- linux-3.0.7/drivers/tty/ipwireless/tty.c 2011-07-21 22:17:23.000000000 -0400
35059 +++ linux-3.0.7/drivers/tty/ipwireless/tty.c 2011-08-23 21:47:56.000000000 -0400
35060 @@ -29,6 +29,7 @@
35061 #include <linux/tty_driver.h>
35062 #include <linux/tty_flip.h>
35063 #include <linux/uaccess.h>
35064 +#include <asm/local.h>
35065
35066 #include "tty.h"
35067 #include "network.h"
35068 @@ -51,7 +52,7 @@ struct ipw_tty {
35069 int tty_type;
35070 struct ipw_network *network;
35071 struct tty_struct *linux_tty;
35072 - int open_count;
35073 + local_t open_count;
35074 unsigned int control_lines;
35075 struct mutex ipw_tty_mutex;
35076 int tx_bytes_queued;
35077 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
35078 mutex_unlock(&tty->ipw_tty_mutex);
35079 return -ENODEV;
35080 }
35081 - if (tty->open_count == 0)
35082 + if (local_read(&tty->open_count) == 0)
35083 tty->tx_bytes_queued = 0;
35084
35085 - tty->open_count++;
35086 + local_inc(&tty->open_count);
35087
35088 tty->linux_tty = linux_tty;
35089 linux_tty->driver_data = tty;
35090 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
35091
35092 static void do_ipw_close(struct ipw_tty *tty)
35093 {
35094 - tty->open_count--;
35095 -
35096 - if (tty->open_count == 0) {
35097 + if (local_dec_return(&tty->open_count) == 0) {
35098 struct tty_struct *linux_tty = tty->linux_tty;
35099
35100 if (linux_tty != NULL) {
35101 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
35102 return;
35103
35104 mutex_lock(&tty->ipw_tty_mutex);
35105 - if (tty->open_count == 0) {
35106 + if (local_read(&tty->open_count) == 0) {
35107 mutex_unlock(&tty->ipw_tty_mutex);
35108 return;
35109 }
35110 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
35111 return;
35112 }
35113
35114 - if (!tty->open_count) {
35115 + if (!local_read(&tty->open_count)) {
35116 mutex_unlock(&tty->ipw_tty_mutex);
35117 return;
35118 }
35119 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
35120 return -ENODEV;
35121
35122 mutex_lock(&tty->ipw_tty_mutex);
35123 - if (!tty->open_count) {
35124 + if (!local_read(&tty->open_count)) {
35125 mutex_unlock(&tty->ipw_tty_mutex);
35126 return -EINVAL;
35127 }
35128 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
35129 if (!tty)
35130 return -ENODEV;
35131
35132 - if (!tty->open_count)
35133 + if (!local_read(&tty->open_count))
35134 return -EINVAL;
35135
35136 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
35137 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
35138 if (!tty)
35139 return 0;
35140
35141 - if (!tty->open_count)
35142 + if (!local_read(&tty->open_count))
35143 return 0;
35144
35145 return tty->tx_bytes_queued;
35146 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
35147 if (!tty)
35148 return -ENODEV;
35149
35150 - if (!tty->open_count)
35151 + if (!local_read(&tty->open_count))
35152 return -EINVAL;
35153
35154 return get_control_lines(tty);
35155 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
35156 if (!tty)
35157 return -ENODEV;
35158
35159 - if (!tty->open_count)
35160 + if (!local_read(&tty->open_count))
35161 return -EINVAL;
35162
35163 return set_control_lines(tty, set, clear);
35164 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
35165 if (!tty)
35166 return -ENODEV;
35167
35168 - if (!tty->open_count)
35169 + if (!local_read(&tty->open_count))
35170 return -EINVAL;
35171
35172 /* FIXME: Exactly how is the tty object locked here .. */
35173 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
35174 against a parallel ioctl etc */
35175 mutex_lock(&ttyj->ipw_tty_mutex);
35176 }
35177 - while (ttyj->open_count)
35178 + while (local_read(&ttyj->open_count))
35179 do_ipw_close(ttyj);
35180 ipwireless_disassociate_network_ttys(network,
35181 ttyj->channel_idx);
35182 diff -urNp linux-3.0.7/drivers/tty/n_gsm.c linux-3.0.7/drivers/tty/n_gsm.c
35183 --- linux-3.0.7/drivers/tty/n_gsm.c 2011-09-02 18:11:21.000000000 -0400
35184 +++ linux-3.0.7/drivers/tty/n_gsm.c 2011-08-23 21:47:56.000000000 -0400
35185 @@ -1589,7 +1589,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
35186 return NULL;
35187 spin_lock_init(&dlci->lock);
35188 dlci->fifo = &dlci->_fifo;
35189 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
35190 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
35191 kfree(dlci);
35192 return NULL;
35193 }
35194 diff -urNp linux-3.0.7/drivers/tty/n_tty.c linux-3.0.7/drivers/tty/n_tty.c
35195 --- linux-3.0.7/drivers/tty/n_tty.c 2011-07-21 22:17:23.000000000 -0400
35196 +++ linux-3.0.7/drivers/tty/n_tty.c 2011-08-23 21:47:56.000000000 -0400
35197 @@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
35198 {
35199 *ops = tty_ldisc_N_TTY;
35200 ops->owner = NULL;
35201 - ops->refcount = ops->flags = 0;
35202 + atomic_set(&ops->refcount, 0);
35203 + ops->flags = 0;
35204 }
35205 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
35206 diff -urNp linux-3.0.7/drivers/tty/pty.c linux-3.0.7/drivers/tty/pty.c
35207 --- linux-3.0.7/drivers/tty/pty.c 2011-10-16 21:54:54.000000000 -0400
35208 +++ linux-3.0.7/drivers/tty/pty.c 2011-10-16 21:55:28.000000000 -0400
35209 @@ -767,8 +767,10 @@ static void __init unix98_pty_init(void)
35210 register_sysctl_table(pty_root_table);
35211
35212 /* Now create the /dev/ptmx special device */
35213 + pax_open_kernel();
35214 tty_default_fops(&ptmx_fops);
35215 - ptmx_fops.open = ptmx_open;
35216 + *(void **)&ptmx_fops.open = ptmx_open;
35217 + pax_close_kernel();
35218
35219 cdev_init(&ptmx_cdev, &ptmx_fops);
35220 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
35221 diff -urNp linux-3.0.7/drivers/tty/rocket.c linux-3.0.7/drivers/tty/rocket.c
35222 --- linux-3.0.7/drivers/tty/rocket.c 2011-07-21 22:17:23.000000000 -0400
35223 +++ linux-3.0.7/drivers/tty/rocket.c 2011-08-23 21:48:14.000000000 -0400
35224 @@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
35225 struct rocket_ports tmp;
35226 int board;
35227
35228 + pax_track_stack();
35229 +
35230 if (!retports)
35231 return -EFAULT;
35232 memset(&tmp, 0, sizeof (tmp));
35233 diff -urNp linux-3.0.7/drivers/tty/serial/kgdboc.c linux-3.0.7/drivers/tty/serial/kgdboc.c
35234 --- linux-3.0.7/drivers/tty/serial/kgdboc.c 2011-07-21 22:17:23.000000000 -0400
35235 +++ linux-3.0.7/drivers/tty/serial/kgdboc.c 2011-08-23 21:47:56.000000000 -0400
35236 @@ -23,8 +23,9 @@
35237 #define MAX_CONFIG_LEN 40
35238
35239 static struct kgdb_io kgdboc_io_ops;
35240 +static struct kgdb_io kgdboc_io_ops_console;
35241
35242 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
35243 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
35244 static int configured = -1;
35245
35246 static char config[MAX_CONFIG_LEN];
35247 @@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
35248 kgdboc_unregister_kbd();
35249 if (configured == 1)
35250 kgdb_unregister_io_module(&kgdboc_io_ops);
35251 + else if (configured == 2)
35252 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
35253 }
35254
35255 static int configure_kgdboc(void)
35256 @@ -156,13 +159,13 @@ static int configure_kgdboc(void)
35257 int err;
35258 char *cptr = config;
35259 struct console *cons;
35260 + int is_console = 0;
35261
35262 err = kgdboc_option_setup(config);
35263 if (err || !strlen(config) || isspace(config[0]))
35264 goto noconfig;
35265
35266 err = -ENODEV;
35267 - kgdboc_io_ops.is_console = 0;
35268 kgdb_tty_driver = NULL;
35269
35270 kgdboc_use_kms = 0;
35271 @@ -183,7 +186,7 @@ static int configure_kgdboc(void)
35272 int idx;
35273 if (cons->device && cons->device(cons, &idx) == p &&
35274 idx == tty_line) {
35275 - kgdboc_io_ops.is_console = 1;
35276 + is_console = 1;
35277 break;
35278 }
35279 cons = cons->next;
35280 @@ -193,12 +196,16 @@ static int configure_kgdboc(void)
35281 kgdb_tty_line = tty_line;
35282
35283 do_register:
35284 - err = kgdb_register_io_module(&kgdboc_io_ops);
35285 + if (is_console) {
35286 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
35287 + configured = 2;
35288 + } else {
35289 + err = kgdb_register_io_module(&kgdboc_io_ops);
35290 + configured = 1;
35291 + }
35292 if (err)
35293 goto noconfig;
35294
35295 - configured = 1;
35296 -
35297 return 0;
35298
35299 noconfig:
35300 @@ -212,7 +219,7 @@ noconfig:
35301 static int __init init_kgdboc(void)
35302 {
35303 /* Already configured? */
35304 - if (configured == 1)
35305 + if (configured >= 1)
35306 return 0;
35307
35308 return configure_kgdboc();
35309 @@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
35310 if (config[len - 1] == '\n')
35311 config[len - 1] = '\0';
35312
35313 - if (configured == 1)
35314 + if (configured >= 1)
35315 cleanup_kgdboc();
35316
35317 /* Go and configure with the new params. */
35318 @@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
35319 .post_exception = kgdboc_post_exp_handler,
35320 };
35321
35322 +static struct kgdb_io kgdboc_io_ops_console = {
35323 + .name = "kgdboc",
35324 + .read_char = kgdboc_get_char,
35325 + .write_char = kgdboc_put_char,
35326 + .pre_exception = kgdboc_pre_exp_handler,
35327 + .post_exception = kgdboc_post_exp_handler,
35328 + .is_console = 1
35329 +};
35330 +
35331 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
35332 /* This is only available if kgdboc is a built in for early debugging */
35333 static int __init kgdboc_early_init(char *opt)
35334 diff -urNp linux-3.0.7/drivers/tty/serial/mfd.c linux-3.0.7/drivers/tty/serial/mfd.c
35335 --- linux-3.0.7/drivers/tty/serial/mfd.c 2011-07-21 22:17:23.000000000 -0400
35336 +++ linux-3.0.7/drivers/tty/serial/mfd.c 2011-10-11 10:44:33.000000000 -0400
35337 @@ -1423,7 +1423,7 @@ static void serial_hsu_remove(struct pci
35338 }
35339
35340 /* First 3 are UART ports, and the 4th is the DMA */
35341 -static const struct pci_device_id pci_ids[] __devinitdata = {
35342 +static const struct pci_device_id pci_ids[] __devinitconst = {
35343 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081B) },
35344 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081C) },
35345 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081D) },
35346 diff -urNp linux-3.0.7/drivers/tty/serial/mrst_max3110.c linux-3.0.7/drivers/tty/serial/mrst_max3110.c
35347 --- linux-3.0.7/drivers/tty/serial/mrst_max3110.c 2011-10-16 21:54:54.000000000 -0400
35348 +++ linux-3.0.7/drivers/tty/serial/mrst_max3110.c 2011-10-16 21:55:28.000000000 -0400
35349 @@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
35350 int loop = 1, num, total = 0;
35351 u8 recv_buf[512], *pbuf;
35352
35353 + pax_track_stack();
35354 +
35355 pbuf = recv_buf;
35356 do {
35357 num = max3110_read_multi(max, pbuf);
35358 diff -urNp linux-3.0.7/drivers/tty/tty_io.c linux-3.0.7/drivers/tty/tty_io.c
35359 --- linux-3.0.7/drivers/tty/tty_io.c 2011-10-16 21:54:54.000000000 -0400
35360 +++ linux-3.0.7/drivers/tty/tty_io.c 2011-10-16 21:55:28.000000000 -0400
35361 @@ -3214,7 +3214,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
35362
35363 void tty_default_fops(struct file_operations *fops)
35364 {
35365 - *fops = tty_fops;
35366 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
35367 }
35368
35369 /*
35370 diff -urNp linux-3.0.7/drivers/tty/tty_ldisc.c linux-3.0.7/drivers/tty/tty_ldisc.c
35371 --- linux-3.0.7/drivers/tty/tty_ldisc.c 2011-07-21 22:17:23.000000000 -0400
35372 +++ linux-3.0.7/drivers/tty/tty_ldisc.c 2011-08-23 21:47:56.000000000 -0400
35373 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
35374 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
35375 struct tty_ldisc_ops *ldo = ld->ops;
35376
35377 - ldo->refcount--;
35378 + atomic_dec(&ldo->refcount);
35379 module_put(ldo->owner);
35380 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35381
35382 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
35383 spin_lock_irqsave(&tty_ldisc_lock, flags);
35384 tty_ldiscs[disc] = new_ldisc;
35385 new_ldisc->num = disc;
35386 - new_ldisc->refcount = 0;
35387 + atomic_set(&new_ldisc->refcount, 0);
35388 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35389
35390 return ret;
35391 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
35392 return -EINVAL;
35393
35394 spin_lock_irqsave(&tty_ldisc_lock, flags);
35395 - if (tty_ldiscs[disc]->refcount)
35396 + if (atomic_read(&tty_ldiscs[disc]->refcount))
35397 ret = -EBUSY;
35398 else
35399 tty_ldiscs[disc] = NULL;
35400 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
35401 if (ldops) {
35402 ret = ERR_PTR(-EAGAIN);
35403 if (try_module_get(ldops->owner)) {
35404 - ldops->refcount++;
35405 + atomic_inc(&ldops->refcount);
35406 ret = ldops;
35407 }
35408 }
35409 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
35410 unsigned long flags;
35411
35412 spin_lock_irqsave(&tty_ldisc_lock, flags);
35413 - ldops->refcount--;
35414 + atomic_dec(&ldops->refcount);
35415 module_put(ldops->owner);
35416 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35417 }
35418 diff -urNp linux-3.0.7/drivers/tty/vt/keyboard.c linux-3.0.7/drivers/tty/vt/keyboard.c
35419 --- linux-3.0.7/drivers/tty/vt/keyboard.c 2011-07-21 22:17:23.000000000 -0400
35420 +++ linux-3.0.7/drivers/tty/vt/keyboard.c 2011-08-23 21:48:14.000000000 -0400
35421 @@ -656,6 +656,16 @@ static void k_spec(struct vc_data *vc, u
35422 kbd->kbdmode == VC_OFF) &&
35423 value != KVAL(K_SAK))
35424 return; /* SAK is allowed even in raw mode */
35425 +
35426 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
35427 + {
35428 + void *func = fn_handler[value];
35429 + if (func == fn_show_state || func == fn_show_ptregs ||
35430 + func == fn_show_mem)
35431 + return;
35432 + }
35433 +#endif
35434 +
35435 fn_handler[value](vc);
35436 }
35437
35438 diff -urNp linux-3.0.7/drivers/tty/vt/vt.c linux-3.0.7/drivers/tty/vt/vt.c
35439 --- linux-3.0.7/drivers/tty/vt/vt.c 2011-07-21 22:17:23.000000000 -0400
35440 +++ linux-3.0.7/drivers/tty/vt/vt.c 2011-08-23 21:47:56.000000000 -0400
35441 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
35442
35443 static void notify_write(struct vc_data *vc, unsigned int unicode)
35444 {
35445 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
35446 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
35447 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
35448 }
35449
35450 diff -urNp linux-3.0.7/drivers/tty/vt/vt_ioctl.c linux-3.0.7/drivers/tty/vt/vt_ioctl.c
35451 --- linux-3.0.7/drivers/tty/vt/vt_ioctl.c 2011-07-21 22:17:23.000000000 -0400
35452 +++ linux-3.0.7/drivers/tty/vt/vt_ioctl.c 2011-08-23 21:48:14.000000000 -0400
35453 @@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
35454 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
35455 return -EFAULT;
35456
35457 - if (!capable(CAP_SYS_TTY_CONFIG))
35458 - perm = 0;
35459 -
35460 switch (cmd) {
35461 case KDGKBENT:
35462 key_map = key_maps[s];
35463 @@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
35464 val = (i ? K_HOLE : K_NOSUCHMAP);
35465 return put_user(val, &user_kbe->kb_value);
35466 case KDSKBENT:
35467 + if (!capable(CAP_SYS_TTY_CONFIG))
35468 + perm = 0;
35469 +
35470 if (!perm)
35471 return -EPERM;
35472 if (!i && v == K_NOSUCHMAP) {
35473 @@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
35474 int i, j, k;
35475 int ret;
35476
35477 - if (!capable(CAP_SYS_TTY_CONFIG))
35478 - perm = 0;
35479 -
35480 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
35481 if (!kbs) {
35482 ret = -ENOMEM;
35483 @@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
35484 kfree(kbs);
35485 return ((p && *p) ? -EOVERFLOW : 0);
35486 case KDSKBSENT:
35487 + if (!capable(CAP_SYS_TTY_CONFIG))
35488 + perm = 0;
35489 +
35490 if (!perm) {
35491 ret = -EPERM;
35492 goto reterr;
35493 diff -urNp linux-3.0.7/drivers/uio/uio.c linux-3.0.7/drivers/uio/uio.c
35494 --- linux-3.0.7/drivers/uio/uio.c 2011-07-21 22:17:23.000000000 -0400
35495 +++ linux-3.0.7/drivers/uio/uio.c 2011-08-23 21:47:56.000000000 -0400
35496 @@ -25,6 +25,7 @@
35497 #include <linux/kobject.h>
35498 #include <linux/cdev.h>
35499 #include <linux/uio_driver.h>
35500 +#include <asm/local.h>
35501
35502 #define UIO_MAX_DEVICES (1U << MINORBITS)
35503
35504 @@ -32,10 +33,10 @@ struct uio_device {
35505 struct module *owner;
35506 struct device *dev;
35507 int minor;
35508 - atomic_t event;
35509 + atomic_unchecked_t event;
35510 struct fasync_struct *async_queue;
35511 wait_queue_head_t wait;
35512 - int vma_count;
35513 + local_t vma_count;
35514 struct uio_info *info;
35515 struct kobject *map_dir;
35516 struct kobject *portio_dir;
35517 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device
35518 struct device_attribute *attr, char *buf)
35519 {
35520 struct uio_device *idev = dev_get_drvdata(dev);
35521 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
35522 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
35523 }
35524
35525 static struct device_attribute uio_class_attributes[] = {
35526 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *i
35527 {
35528 struct uio_device *idev = info->uio_dev;
35529
35530 - atomic_inc(&idev->event);
35531 + atomic_inc_unchecked(&idev->event);
35532 wake_up_interruptible(&idev->wait);
35533 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
35534 }
35535 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode,
35536 }
35537
35538 listener->dev = idev;
35539 - listener->event_count = atomic_read(&idev->event);
35540 + listener->event_count = atomic_read_unchecked(&idev->event);
35541 filep->private_data = listener;
35542
35543 if (idev->info->open) {
35544 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file
35545 return -EIO;
35546
35547 poll_wait(filep, &idev->wait, wait);
35548 - if (listener->event_count != atomic_read(&idev->event))
35549 + if (listener->event_count != atomic_read_unchecked(&idev->event))
35550 return POLLIN | POLLRDNORM;
35551 return 0;
35552 }
35553 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *fil
35554 do {
35555 set_current_state(TASK_INTERRUPTIBLE);
35556
35557 - event_count = atomic_read(&idev->event);
35558 + event_count = atomic_read_unchecked(&idev->event);
35559 if (event_count != listener->event_count) {
35560 if (copy_to_user(buf, &event_count, count))
35561 retval = -EFAULT;
35562 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_
35563 static void uio_vma_open(struct vm_area_struct *vma)
35564 {
35565 struct uio_device *idev = vma->vm_private_data;
35566 - idev->vma_count++;
35567 + local_inc(&idev->vma_count);
35568 }
35569
35570 static void uio_vma_close(struct vm_area_struct *vma)
35571 {
35572 struct uio_device *idev = vma->vm_private_data;
35573 - idev->vma_count--;
35574 + local_dec(&idev->vma_count);
35575 }
35576
35577 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35578 @@ -823,7 +824,7 @@ int __uio_register_device(struct module
35579 idev->owner = owner;
35580 idev->info = info;
35581 init_waitqueue_head(&idev->wait);
35582 - atomic_set(&idev->event, 0);
35583 + atomic_set_unchecked(&idev->event, 0);
35584
35585 ret = uio_get_minor(idev);
35586 if (ret)
35587 diff -urNp linux-3.0.7/drivers/usb/atm/cxacru.c linux-3.0.7/drivers/usb/atm/cxacru.c
35588 --- linux-3.0.7/drivers/usb/atm/cxacru.c 2011-07-21 22:17:23.000000000 -0400
35589 +++ linux-3.0.7/drivers/usb/atm/cxacru.c 2011-08-23 21:47:56.000000000 -0400
35590 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
35591 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
35592 if (ret < 2)
35593 return -EINVAL;
35594 - if (index < 0 || index > 0x7f)
35595 + if (index > 0x7f)
35596 return -EINVAL;
35597 pos += tmp;
35598
35599 diff -urNp linux-3.0.7/drivers/usb/atm/usbatm.c linux-3.0.7/drivers/usb/atm/usbatm.c
35600 --- linux-3.0.7/drivers/usb/atm/usbatm.c 2011-07-21 22:17:23.000000000 -0400
35601 +++ linux-3.0.7/drivers/usb/atm/usbatm.c 2011-08-23 21:47:56.000000000 -0400
35602 @@ -332,7 +332,7 @@ static void usbatm_extract_one_cell(stru
35603 if (printk_ratelimit())
35604 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
35605 __func__, vpi, vci);
35606 - atomic_inc(&vcc->stats->rx_err);
35607 + atomic_inc_unchecked(&vcc->stats->rx_err);
35608 return;
35609 }
35610
35611 @@ -360,7 +360,7 @@ static void usbatm_extract_one_cell(stru
35612 if (length > ATM_MAX_AAL5_PDU) {
35613 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
35614 __func__, length, vcc);
35615 - atomic_inc(&vcc->stats->rx_err);
35616 + atomic_inc_unchecked(&vcc->stats->rx_err);
35617 goto out;
35618 }
35619
35620 @@ -369,14 +369,14 @@ static void usbatm_extract_one_cell(stru
35621 if (sarb->len < pdu_length) {
35622 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
35623 __func__, pdu_length, sarb->len, vcc);
35624 - atomic_inc(&vcc->stats->rx_err);
35625 + atomic_inc_unchecked(&vcc->stats->rx_err);
35626 goto out;
35627 }
35628
35629 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
35630 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
35631 __func__, vcc);
35632 - atomic_inc(&vcc->stats->rx_err);
35633 + atomic_inc_unchecked(&vcc->stats->rx_err);
35634 goto out;
35635 }
35636
35637 @@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(stru
35638 if (printk_ratelimit())
35639 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
35640 __func__, length);
35641 - atomic_inc(&vcc->stats->rx_drop);
35642 + atomic_inc_unchecked(&vcc->stats->rx_drop);
35643 goto out;
35644 }
35645
35646 @@ -411,7 +411,7 @@ static void usbatm_extract_one_cell(stru
35647
35648 vcc->push(vcc, skb);
35649
35650 - atomic_inc(&vcc->stats->rx);
35651 + atomic_inc_unchecked(&vcc->stats->rx);
35652 out:
35653 skb_trim(sarb, 0);
35654 }
35655 @@ -614,7 +614,7 @@ static void usbatm_tx_process(unsigned l
35656 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
35657
35658 usbatm_pop(vcc, skb);
35659 - atomic_inc(&vcc->stats->tx);
35660 + atomic_inc_unchecked(&vcc->stats->tx);
35661
35662 skb = skb_dequeue(&instance->sndqueue);
35663 }
35664 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
35665 if (!left--)
35666 return sprintf(page,
35667 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
35668 - atomic_read(&atm_dev->stats.aal5.tx),
35669 - atomic_read(&atm_dev->stats.aal5.tx_err),
35670 - atomic_read(&atm_dev->stats.aal5.rx),
35671 - atomic_read(&atm_dev->stats.aal5.rx_err),
35672 - atomic_read(&atm_dev->stats.aal5.rx_drop));
35673 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
35674 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
35675 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
35676 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
35677 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
35678
35679 if (!left--) {
35680 if (instance->disconnected)
35681 diff -urNp linux-3.0.7/drivers/usb/core/devices.c linux-3.0.7/drivers/usb/core/devices.c
35682 --- linux-3.0.7/drivers/usb/core/devices.c 2011-07-21 22:17:23.000000000 -0400
35683 +++ linux-3.0.7/drivers/usb/core/devices.c 2011-08-23 21:47:56.000000000 -0400
35684 @@ -126,7 +126,7 @@ static const char format_endpt[] =
35685 * time it gets called.
35686 */
35687 static struct device_connect_event {
35688 - atomic_t count;
35689 + atomic_unchecked_t count;
35690 wait_queue_head_t wait;
35691 } device_event = {
35692 .count = ATOMIC_INIT(1),
35693 @@ -164,7 +164,7 @@ static const struct class_info clas_info
35694
35695 void usbfs_conn_disc_event(void)
35696 {
35697 - atomic_add(2, &device_event.count);
35698 + atomic_add_unchecked(2, &device_event.count);
35699 wake_up(&device_event.wait);
35700 }
35701
35702 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
35703
35704 poll_wait(file, &device_event.wait, wait);
35705
35706 - event_count = atomic_read(&device_event.count);
35707 + event_count = atomic_read_unchecked(&device_event.count);
35708 if (file->f_version != event_count) {
35709 file->f_version = event_count;
35710 return POLLIN | POLLRDNORM;
35711 diff -urNp linux-3.0.7/drivers/usb/core/message.c linux-3.0.7/drivers/usb/core/message.c
35712 --- linux-3.0.7/drivers/usb/core/message.c 2011-07-21 22:17:23.000000000 -0400
35713 +++ linux-3.0.7/drivers/usb/core/message.c 2011-08-23 21:47:56.000000000 -0400
35714 @@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
35715 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
35716 if (buf) {
35717 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
35718 - if (len > 0) {
35719 - smallbuf = kmalloc(++len, GFP_NOIO);
35720 + if (len++ > 0) {
35721 + smallbuf = kmalloc(len, GFP_NOIO);
35722 if (!smallbuf)
35723 return buf;
35724 memcpy(smallbuf, buf, len);
35725 diff -urNp linux-3.0.7/drivers/usb/early/ehci-dbgp.c linux-3.0.7/drivers/usb/early/ehci-dbgp.c
35726 --- linux-3.0.7/drivers/usb/early/ehci-dbgp.c 2011-07-21 22:17:23.000000000 -0400
35727 +++ linux-3.0.7/drivers/usb/early/ehci-dbgp.c 2011-08-23 21:47:56.000000000 -0400
35728 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
35729
35730 #ifdef CONFIG_KGDB
35731 static struct kgdb_io kgdbdbgp_io_ops;
35732 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
35733 +static struct kgdb_io kgdbdbgp_io_ops_console;
35734 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
35735 #else
35736 #define dbgp_kgdb_mode (0)
35737 #endif
35738 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
35739 .write_char = kgdbdbgp_write_char,
35740 };
35741
35742 +static struct kgdb_io kgdbdbgp_io_ops_console = {
35743 + .name = "kgdbdbgp",
35744 + .read_char = kgdbdbgp_read_char,
35745 + .write_char = kgdbdbgp_write_char,
35746 + .is_console = 1
35747 +};
35748 +
35749 static int kgdbdbgp_wait_time;
35750
35751 static int __init kgdbdbgp_parse_config(char *str)
35752 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(
35753 ptr++;
35754 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
35755 }
35756 - kgdb_register_io_module(&kgdbdbgp_io_ops);
35757 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
35758 + if (early_dbgp_console.index != -1)
35759 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
35760 + else
35761 + kgdb_register_io_module(&kgdbdbgp_io_ops);
35762
35763 return 0;
35764 }
35765 diff -urNp linux-3.0.7/drivers/usb/host/xhci-mem.c linux-3.0.7/drivers/usb/host/xhci-mem.c
35766 --- linux-3.0.7/drivers/usb/host/xhci-mem.c 2011-07-21 22:17:23.000000000 -0400
35767 +++ linux-3.0.7/drivers/usb/host/xhci-mem.c 2011-08-23 21:48:14.000000000 -0400
35768 @@ -1685,6 +1685,8 @@ static int xhci_check_trb_in_td_math(str
35769 unsigned int num_tests;
35770 int i, ret;
35771
35772 + pax_track_stack();
35773 +
35774 num_tests = ARRAY_SIZE(simple_test_vector);
35775 for (i = 0; i < num_tests; i++) {
35776 ret = xhci_test_trb_in_td(xhci,
35777 diff -urNp linux-3.0.7/drivers/usb/wusbcore/wa-hc.h linux-3.0.7/drivers/usb/wusbcore/wa-hc.h
35778 --- linux-3.0.7/drivers/usb/wusbcore/wa-hc.h 2011-07-21 22:17:23.000000000 -0400
35779 +++ linux-3.0.7/drivers/usb/wusbcore/wa-hc.h 2011-08-23 21:47:56.000000000 -0400
35780 @@ -192,7 +192,7 @@ struct wahc {
35781 struct list_head xfer_delayed_list;
35782 spinlock_t xfer_list_lock;
35783 struct work_struct xfer_work;
35784 - atomic_t xfer_id_count;
35785 + atomic_unchecked_t xfer_id_count;
35786 };
35787
35788
35789 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
35790 INIT_LIST_HEAD(&wa->xfer_delayed_list);
35791 spin_lock_init(&wa->xfer_list_lock);
35792 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
35793 - atomic_set(&wa->xfer_id_count, 1);
35794 + atomic_set_unchecked(&wa->xfer_id_count, 1);
35795 }
35796
35797 /**
35798 diff -urNp linux-3.0.7/drivers/usb/wusbcore/wa-xfer.c linux-3.0.7/drivers/usb/wusbcore/wa-xfer.c
35799 --- linux-3.0.7/drivers/usb/wusbcore/wa-xfer.c 2011-07-21 22:17:23.000000000 -0400
35800 +++ linux-3.0.7/drivers/usb/wusbcore/wa-xfer.c 2011-08-23 21:47:56.000000000 -0400
35801 @@ -294,7 +294,7 @@ out:
35802 */
35803 static void wa_xfer_id_init(struct wa_xfer *xfer)
35804 {
35805 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
35806 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
35807 }
35808
35809 /*
35810 diff -urNp linux-3.0.7/drivers/vhost/vhost.c linux-3.0.7/drivers/vhost/vhost.c
35811 --- linux-3.0.7/drivers/vhost/vhost.c 2011-07-21 22:17:23.000000000 -0400
35812 +++ linux-3.0.7/drivers/vhost/vhost.c 2011-08-23 21:47:56.000000000 -0400
35813 @@ -589,7 +589,7 @@ static int init_used(struct vhost_virtqu
35814 return get_user(vq->last_used_idx, &used->idx);
35815 }
35816
35817 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
35818 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
35819 {
35820 struct file *eventfp, *filep = NULL,
35821 *pollstart = NULL, *pollstop = NULL;
35822 diff -urNp linux-3.0.7/drivers/video/aty/aty128fb.c linux-3.0.7/drivers/video/aty/aty128fb.c
35823 --- linux-3.0.7/drivers/video/aty/aty128fb.c 2011-07-21 22:17:23.000000000 -0400
35824 +++ linux-3.0.7/drivers/video/aty/aty128fb.c 2011-10-11 10:44:33.000000000 -0400
35825 @@ -148,7 +148,7 @@ enum {
35826 };
35827
35828 /* Must match above enum */
35829 -static const char *r128_family[] __devinitdata = {
35830 +static const char *r128_family[] __devinitconst = {
35831 "AGP",
35832 "PCI",
35833 "PRO AGP",
35834 diff -urNp linux-3.0.7/drivers/video/fbcmap.c linux-3.0.7/drivers/video/fbcmap.c
35835 --- linux-3.0.7/drivers/video/fbcmap.c 2011-07-21 22:17:23.000000000 -0400
35836 +++ linux-3.0.7/drivers/video/fbcmap.c 2011-08-23 21:47:56.000000000 -0400
35837 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
35838 rc = -ENODEV;
35839 goto out;
35840 }
35841 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
35842 - !info->fbops->fb_setcmap)) {
35843 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
35844 rc = -EINVAL;
35845 goto out1;
35846 }
35847 diff -urNp linux-3.0.7/drivers/video/fbmem.c linux-3.0.7/drivers/video/fbmem.c
35848 --- linux-3.0.7/drivers/video/fbmem.c 2011-07-21 22:17:23.000000000 -0400
35849 +++ linux-3.0.7/drivers/video/fbmem.c 2011-08-23 21:48:14.000000000 -0400
35850 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
35851 image->dx += image->width + 8;
35852 }
35853 } else if (rotate == FB_ROTATE_UD) {
35854 - for (x = 0; x < num && image->dx >= 0; x++) {
35855 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
35856 info->fbops->fb_imageblit(info, image);
35857 image->dx -= image->width + 8;
35858 }
35859 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
35860 image->dy += image->height + 8;
35861 }
35862 } else if (rotate == FB_ROTATE_CCW) {
35863 - for (x = 0; x < num && image->dy >= 0; x++) {
35864 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
35865 info->fbops->fb_imageblit(info, image);
35866 image->dy -= image->height + 8;
35867 }
35868 @@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
35869 int flags = info->flags;
35870 int ret = 0;
35871
35872 + pax_track_stack();
35873 +
35874 if (var->activate & FB_ACTIVATE_INV_MODE) {
35875 struct fb_videomode mode1, mode2;
35876
35877 @@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
35878 void __user *argp = (void __user *)arg;
35879 long ret = 0;
35880
35881 + pax_track_stack();
35882 +
35883 switch (cmd) {
35884 case FBIOGET_VSCREENINFO:
35885 if (!lock_fb_info(info))
35886 @@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
35887 return -EFAULT;
35888 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
35889 return -EINVAL;
35890 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
35891 + if (con2fb.framebuffer >= FB_MAX)
35892 return -EINVAL;
35893 if (!registered_fb[con2fb.framebuffer])
35894 request_module("fb%d", con2fb.framebuffer);
35895 diff -urNp linux-3.0.7/drivers/video/geode/gx1fb_core.c linux-3.0.7/drivers/video/geode/gx1fb_core.c
35896 --- linux-3.0.7/drivers/video/geode/gx1fb_core.c 2011-07-21 22:17:23.000000000 -0400
35897 +++ linux-3.0.7/drivers/video/geode/gx1fb_core.c 2011-10-11 10:44:33.000000000 -0400
35898 @@ -29,7 +29,7 @@ static int crt_option = 1;
35899 static char panel_option[32] = "";
35900
35901 /* Modes relevant to the GX1 (taken from modedb.c) */
35902 -static const struct fb_videomode __devinitdata gx1_modedb[] = {
35903 +static const struct fb_videomode __devinitconst gx1_modedb[] = {
35904 /* 640x480-60 VESA */
35905 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
35906 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
35907 diff -urNp linux-3.0.7/drivers/video/gxt4500.c linux-3.0.7/drivers/video/gxt4500.c
35908 --- linux-3.0.7/drivers/video/gxt4500.c 2011-07-21 22:17:23.000000000 -0400
35909 +++ linux-3.0.7/drivers/video/gxt4500.c 2011-10-11 10:44:33.000000000 -0400
35910 @@ -156,7 +156,7 @@ struct gxt4500_par {
35911 static char *mode_option;
35912
35913 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
35914 -static const struct fb_videomode defaultmode __devinitdata = {
35915 +static const struct fb_videomode defaultmode __devinitconst = {
35916 .refresh = 60,
35917 .xres = 1280,
35918 .yres = 1024,
35919 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, stru
35920 return 0;
35921 }
35922
35923 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
35924 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
35925 .id = "IBM GXT4500P",
35926 .type = FB_TYPE_PACKED_PIXELS,
35927 .visual = FB_VISUAL_PSEUDOCOLOR,
35928 diff -urNp linux-3.0.7/drivers/video/i810/i810_accel.c linux-3.0.7/drivers/video/i810/i810_accel.c
35929 --- linux-3.0.7/drivers/video/i810/i810_accel.c 2011-07-21 22:17:23.000000000 -0400
35930 +++ linux-3.0.7/drivers/video/i810/i810_accel.c 2011-08-23 21:47:56.000000000 -0400
35931 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct
35932 }
35933 }
35934 printk("ringbuffer lockup!!!\n");
35935 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
35936 i810_report_error(mmio);
35937 par->dev_flags |= LOCKUP;
35938 info->pixmap.scan_align = 1;
35939 diff -urNp linux-3.0.7/drivers/video/i810/i810_main.c linux-3.0.7/drivers/video/i810/i810_main.c
35940 --- linux-3.0.7/drivers/video/i810/i810_main.c 2011-07-21 22:17:23.000000000 -0400
35941 +++ linux-3.0.7/drivers/video/i810/i810_main.c 2011-10-11 10:44:33.000000000 -0400
35942 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_
35943 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
35944
35945 /* PCI */
35946 -static const char *i810_pci_list[] __devinitdata = {
35947 +static const char *i810_pci_list[] __devinitconst = {
35948 "Intel(R) 810 Framebuffer Device" ,
35949 "Intel(R) 810-DC100 Framebuffer Device" ,
35950 "Intel(R) 810E Framebuffer Device" ,
35951 diff -urNp linux-3.0.7/drivers/video/jz4740_fb.c linux-3.0.7/drivers/video/jz4740_fb.c
35952 --- linux-3.0.7/drivers/video/jz4740_fb.c 2011-07-21 22:17:23.000000000 -0400
35953 +++ linux-3.0.7/drivers/video/jz4740_fb.c 2011-10-11 10:44:33.000000000 -0400
35954 @@ -136,7 +136,7 @@ struct jzfb {
35955 uint32_t pseudo_palette[16];
35956 };
35957
35958 -static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
35959 +static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
35960 .id = "JZ4740 FB",
35961 .type = FB_TYPE_PACKED_PIXELS,
35962 .visual = FB_VISUAL_TRUECOLOR,
35963 diff -urNp linux-3.0.7/drivers/video/logo/logo_linux_clut224.ppm linux-3.0.7/drivers/video/logo/logo_linux_clut224.ppm
35964 --- linux-3.0.7/drivers/video/logo/logo_linux_clut224.ppm 2011-07-21 22:17:23.000000000 -0400
35965 +++ linux-3.0.7/drivers/video/logo/logo_linux_clut224.ppm 2011-08-29 23:49:40.000000000 -0400
35966 @@ -1,1604 +1,1123 @@
35967 P3
35968 -# Standard 224-color Linux logo
35969 80 80
35970 255
35971 - 0 0 0 0 0 0 0 0 0 0 0 0
35972 - 0 0 0 0 0 0 0 0 0 0 0 0
35973 - 0 0 0 0 0 0 0 0 0 0 0 0
35974 - 0 0 0 0 0 0 0 0 0 0 0 0
35975 - 0 0 0 0 0 0 0 0 0 0 0 0
35976 - 0 0 0 0 0 0 0 0 0 0 0 0
35977 - 0 0 0 0 0 0 0 0 0 0 0 0
35978 - 0 0 0 0 0 0 0 0 0 0 0 0
35979 - 0 0 0 0 0 0 0 0 0 0 0 0
35980 - 6 6 6 6 6 6 10 10 10 10 10 10
35981 - 10 10 10 6 6 6 6 6 6 6 6 6
35982 - 0 0 0 0 0 0 0 0 0 0 0 0
35983 - 0 0 0 0 0 0 0 0 0 0 0 0
35984 - 0 0 0 0 0 0 0 0 0 0 0 0
35985 - 0 0 0 0 0 0 0 0 0 0 0 0
35986 - 0 0 0 0 0 0 0 0 0 0 0 0
35987 - 0 0 0 0 0 0 0 0 0 0 0 0
35988 - 0 0 0 0 0 0 0 0 0 0 0 0
35989 - 0 0 0 0 0 0 0 0 0 0 0 0
35990 - 0 0 0 0 0 0 0 0 0 0 0 0
35991 - 0 0 0 0 0 0 0 0 0 0 0 0
35992 - 0 0 0 0 0 0 0 0 0 0 0 0
35993 - 0 0 0 0 0 0 0 0 0 0 0 0
35994 - 0 0 0 0 0 0 0 0 0 0 0 0
35995 - 0 0 0 0 0 0 0 0 0 0 0 0
35996 - 0 0 0 0 0 0 0 0 0 0 0 0
35997 - 0 0 0 0 0 0 0 0 0 0 0 0
35998 - 0 0 0 0 0 0 0 0 0 0 0 0
35999 - 0 0 0 6 6 6 10 10 10 14 14 14
36000 - 22 22 22 26 26 26 30 30 30 34 34 34
36001 - 30 30 30 30 30 30 26 26 26 18 18 18
36002 - 14 14 14 10 10 10 6 6 6 0 0 0
36003 - 0 0 0 0 0 0 0 0 0 0 0 0
36004 - 0 0 0 0 0 0 0 0 0 0 0 0
36005 - 0 0 0 0 0 0 0 0 0 0 0 0
36006 - 0 0 0 0 0 0 0 0 0 0 0 0
36007 - 0 0 0 0 0 0 0 0 0 0 0 0
36008 - 0 0 0 0 0 0 0 0 0 0 0 0
36009 - 0 0 0 0 0 0 0 0 0 0 0 0
36010 - 0 0 0 0 0 0 0 0 0 0 0 0
36011 - 0 0 0 0 0 0 0 0 0 0 0 0
36012 - 0 0 0 0 0 1 0 0 1 0 0 0
36013 - 0 0 0 0 0 0 0 0 0 0 0 0
36014 - 0 0 0 0 0 0 0 0 0 0 0 0
36015 - 0 0 0 0 0 0 0 0 0 0 0 0
36016 - 0 0 0 0 0 0 0 0 0 0 0 0
36017 - 0 0 0 0 0 0 0 0 0 0 0 0
36018 - 0 0 0 0 0 0 0 0 0 0 0 0
36019 - 6 6 6 14 14 14 26 26 26 42 42 42
36020 - 54 54 54 66 66 66 78 78 78 78 78 78
36021 - 78 78 78 74 74 74 66 66 66 54 54 54
36022 - 42 42 42 26 26 26 18 18 18 10 10 10
36023 - 6 6 6 0 0 0 0 0 0 0 0 0
36024 - 0 0 0 0 0 0 0 0 0 0 0 0
36025 - 0 0 0 0 0 0 0 0 0 0 0 0
36026 - 0 0 0 0 0 0 0 0 0 0 0 0
36027 - 0 0 0 0 0 0 0 0 0 0 0 0
36028 - 0 0 0 0 0 0 0 0 0 0 0 0
36029 - 0 0 0 0 0 0 0 0 0 0 0 0
36030 - 0 0 0 0 0 0 0 0 0 0 0 0
36031 - 0 0 0 0 0 0 0 0 0 0 0 0
36032 - 0 0 1 0 0 0 0 0 0 0 0 0
36033 - 0 0 0 0 0 0 0 0 0 0 0 0
36034 - 0 0 0 0 0 0 0 0 0 0 0 0
36035 - 0 0 0 0 0 0 0 0 0 0 0 0
36036 - 0 0 0 0 0 0 0 0 0 0 0 0
36037 - 0 0 0 0 0 0 0 0 0 0 0 0
36038 - 0 0 0 0 0 0 0 0 0 10 10 10
36039 - 22 22 22 42 42 42 66 66 66 86 86 86
36040 - 66 66 66 38 38 38 38 38 38 22 22 22
36041 - 26 26 26 34 34 34 54 54 54 66 66 66
36042 - 86 86 86 70 70 70 46 46 46 26 26 26
36043 - 14 14 14 6 6 6 0 0 0 0 0 0
36044 - 0 0 0 0 0 0 0 0 0 0 0 0
36045 - 0 0 0 0 0 0 0 0 0 0 0 0
36046 - 0 0 0 0 0 0 0 0 0 0 0 0
36047 - 0 0 0 0 0 0 0 0 0 0 0 0
36048 - 0 0 0 0 0 0 0 0 0 0 0 0
36049 - 0 0 0 0 0 0 0 0 0 0 0 0
36050 - 0 0 0 0 0 0 0 0 0 0 0 0
36051 - 0 0 0 0 0 0 0 0 0 0 0 0
36052 - 0 0 1 0 0 1 0 0 1 0 0 0
36053 - 0 0 0 0 0 0 0 0 0 0 0 0
36054 - 0 0 0 0 0 0 0 0 0 0 0 0
36055 - 0 0 0 0 0 0 0 0 0 0 0 0
36056 - 0 0 0 0 0 0 0 0 0 0 0 0
36057 - 0 0 0 0 0 0 0 0 0 0 0 0
36058 - 0 0 0 0 0 0 10 10 10 26 26 26
36059 - 50 50 50 82 82 82 58 58 58 6 6 6
36060 - 2 2 6 2 2 6 2 2 6 2 2 6
36061 - 2 2 6 2 2 6 2 2 6 2 2 6
36062 - 6 6 6 54 54 54 86 86 86 66 66 66
36063 - 38 38 38 18 18 18 6 6 6 0 0 0
36064 - 0 0 0 0 0 0 0 0 0 0 0 0
36065 - 0 0 0 0 0 0 0 0 0 0 0 0
36066 - 0 0 0 0 0 0 0 0 0 0 0 0
36067 - 0 0 0 0 0 0 0 0 0 0 0 0
36068 - 0 0 0 0 0 0 0 0 0 0 0 0
36069 - 0 0 0 0 0 0 0 0 0 0 0 0
36070 - 0 0 0 0 0 0 0 0 0 0 0 0
36071 - 0 0 0 0 0 0 0 0 0 0 0 0
36072 - 0 0 0 0 0 0 0 0 0 0 0 0
36073 - 0 0 0 0 0 0 0 0 0 0 0 0
36074 - 0 0 0 0 0 0 0 0 0 0 0 0
36075 - 0 0 0 0 0 0 0 0 0 0 0 0
36076 - 0 0 0 0 0 0 0 0 0 0 0 0
36077 - 0 0 0 0 0 0 0 0 0 0 0 0
36078 - 0 0 0 6 6 6 22 22 22 50 50 50
36079 - 78 78 78 34 34 34 2 2 6 2 2 6
36080 - 2 2 6 2 2 6 2 2 6 2 2 6
36081 - 2 2 6 2 2 6 2 2 6 2 2 6
36082 - 2 2 6 2 2 6 6 6 6 70 70 70
36083 - 78 78 78 46 46 46 22 22 22 6 6 6
36084 - 0 0 0 0 0 0 0 0 0 0 0 0
36085 - 0 0 0 0 0 0 0 0 0 0 0 0
36086 - 0 0 0 0 0 0 0 0 0 0 0 0
36087 - 0 0 0 0 0 0 0 0 0 0 0 0
36088 - 0 0 0 0 0 0 0 0 0 0 0 0
36089 - 0 0 0 0 0 0 0 0 0 0 0 0
36090 - 0 0 0 0 0 0 0 0 0 0 0 0
36091 - 0 0 0 0 0 0 0 0 0 0 0 0
36092 - 0 0 1 0 0 1 0 0 1 0 0 0
36093 - 0 0 0 0 0 0 0 0 0 0 0 0
36094 - 0 0 0 0 0 0 0 0 0 0 0 0
36095 - 0 0 0 0 0 0 0 0 0 0 0 0
36096 - 0 0 0 0 0 0 0 0 0 0 0 0
36097 - 0 0 0 0 0 0 0 0 0 0 0 0
36098 - 6 6 6 18 18 18 42 42 42 82 82 82
36099 - 26 26 26 2 2 6 2 2 6 2 2 6
36100 - 2 2 6 2 2 6 2 2 6 2 2 6
36101 - 2 2 6 2 2 6 2 2 6 14 14 14
36102 - 46 46 46 34 34 34 6 6 6 2 2 6
36103 - 42 42 42 78 78 78 42 42 42 18 18 18
36104 - 6 6 6 0 0 0 0 0 0 0 0 0
36105 - 0 0 0 0 0 0 0 0 0 0 0 0
36106 - 0 0 0 0 0 0 0 0 0 0 0 0
36107 - 0 0 0 0 0 0 0 0 0 0 0 0
36108 - 0 0 0 0 0 0 0 0 0 0 0 0
36109 - 0 0 0 0 0 0 0 0 0 0 0 0
36110 - 0 0 0 0 0 0 0 0 0 0 0 0
36111 - 0 0 0 0 0 0 0 0 0 0 0 0
36112 - 0 0 1 0 0 0 0 0 1 0 0 0
36113 - 0 0 0 0 0 0 0 0 0 0 0 0
36114 - 0 0 0 0 0 0 0 0 0 0 0 0
36115 - 0 0 0 0 0 0 0 0 0 0 0 0
36116 - 0 0 0 0 0 0 0 0 0 0 0 0
36117 - 0 0 0 0 0 0 0 0 0 0 0 0
36118 - 10 10 10 30 30 30 66 66 66 58 58 58
36119 - 2 2 6 2 2 6 2 2 6 2 2 6
36120 - 2 2 6 2 2 6 2 2 6 2 2 6
36121 - 2 2 6 2 2 6 2 2 6 26 26 26
36122 - 86 86 86 101 101 101 46 46 46 10 10 10
36123 - 2 2 6 58 58 58 70 70 70 34 34 34
36124 - 10 10 10 0 0 0 0 0 0 0 0 0
36125 - 0 0 0 0 0 0 0 0 0 0 0 0
36126 - 0 0 0 0 0 0 0 0 0 0 0 0
36127 - 0 0 0 0 0 0 0 0 0 0 0 0
36128 - 0 0 0 0 0 0 0 0 0 0 0 0
36129 - 0 0 0 0 0 0 0 0 0 0 0 0
36130 - 0 0 0 0 0 0 0 0 0 0 0 0
36131 - 0 0 0 0 0 0 0 0 0 0 0 0
36132 - 0 0 1 0 0 1 0 0 1 0 0 0
36133 - 0 0 0 0 0 0 0 0 0 0 0 0
36134 - 0 0 0 0 0 0 0 0 0 0 0 0
36135 - 0 0 0 0 0 0 0 0 0 0 0 0
36136 - 0 0 0 0 0 0 0 0 0 0 0 0
36137 - 0 0 0 0 0 0 0 0 0 0 0 0
36138 - 14 14 14 42 42 42 86 86 86 10 10 10
36139 - 2 2 6 2 2 6 2 2 6 2 2 6
36140 - 2 2 6 2 2 6 2 2 6 2 2 6
36141 - 2 2 6 2 2 6 2 2 6 30 30 30
36142 - 94 94 94 94 94 94 58 58 58 26 26 26
36143 - 2 2 6 6 6 6 78 78 78 54 54 54
36144 - 22 22 22 6 6 6 0 0 0 0 0 0
36145 - 0 0 0 0 0 0 0 0 0 0 0 0
36146 - 0 0 0 0 0 0 0 0 0 0 0 0
36147 - 0 0 0 0 0 0 0 0 0 0 0 0
36148 - 0 0 0 0 0 0 0 0 0 0 0 0
36149 - 0 0 0 0 0 0 0 0 0 0 0 0
36150 - 0 0 0 0 0 0 0 0 0 0 0 0
36151 - 0 0 0 0 0 0 0 0 0 0 0 0
36152 - 0 0 0 0 0 0 0 0 0 0 0 0
36153 - 0 0 0 0 0 0 0 0 0 0 0 0
36154 - 0 0 0 0 0 0 0 0 0 0 0 0
36155 - 0 0 0 0 0 0 0 0 0 0 0 0
36156 - 0 0 0 0 0 0 0 0 0 0 0 0
36157 - 0 0 0 0 0 0 0 0 0 6 6 6
36158 - 22 22 22 62 62 62 62 62 62 2 2 6
36159 - 2 2 6 2 2 6 2 2 6 2 2 6
36160 - 2 2 6 2 2 6 2 2 6 2 2 6
36161 - 2 2 6 2 2 6 2 2 6 26 26 26
36162 - 54 54 54 38 38 38 18 18 18 10 10 10
36163 - 2 2 6 2 2 6 34 34 34 82 82 82
36164 - 38 38 38 14 14 14 0 0 0 0 0 0
36165 - 0 0 0 0 0 0 0 0 0 0 0 0
36166 - 0 0 0 0 0 0 0 0 0 0 0 0
36167 - 0 0 0 0 0 0 0 0 0 0 0 0
36168 - 0 0 0 0 0 0 0 0 0 0 0 0
36169 - 0 0 0 0 0 0 0 0 0 0 0 0
36170 - 0 0 0 0 0 0 0 0 0 0 0 0
36171 - 0 0 0 0 0 0 0 0 0 0 0 0
36172 - 0 0 0 0 0 1 0 0 1 0 0 0
36173 - 0 0 0 0 0 0 0 0 0 0 0 0
36174 - 0 0 0 0 0 0 0 0 0 0 0 0
36175 - 0 0 0 0 0 0 0 0 0 0 0 0
36176 - 0 0 0 0 0 0 0 0 0 0 0 0
36177 - 0 0 0 0 0 0 0 0 0 6 6 6
36178 - 30 30 30 78 78 78 30 30 30 2 2 6
36179 - 2 2 6 2 2 6 2 2 6 2 2 6
36180 - 2 2 6 2 2 6 2 2 6 2 2 6
36181 - 2 2 6 2 2 6 2 2 6 10 10 10
36182 - 10 10 10 2 2 6 2 2 6 2 2 6
36183 - 2 2 6 2 2 6 2 2 6 78 78 78
36184 - 50 50 50 18 18 18 6 6 6 0 0 0
36185 - 0 0 0 0 0 0 0 0 0 0 0 0
36186 - 0 0 0 0 0 0 0 0 0 0 0 0
36187 - 0 0 0 0 0 0 0 0 0 0 0 0
36188 - 0 0 0 0 0 0 0 0 0 0 0 0
36189 - 0 0 0 0 0 0 0 0 0 0 0 0
36190 - 0 0 0 0 0 0 0 0 0 0 0 0
36191 - 0 0 0 0 0 0 0 0 0 0 0 0
36192 - 0 0 1 0 0 0 0 0 0 0 0 0
36193 - 0 0 0 0 0 0 0 0 0 0 0 0
36194 - 0 0 0 0 0 0 0 0 0 0 0 0
36195 - 0 0 0 0 0 0 0 0 0 0 0 0
36196 - 0 0 0 0 0 0 0 0 0 0 0 0
36197 - 0 0 0 0 0 0 0 0 0 10 10 10
36198 - 38 38 38 86 86 86 14 14 14 2 2 6
36199 - 2 2 6 2 2 6 2 2 6 2 2 6
36200 - 2 2 6 2 2 6 2 2 6 2 2 6
36201 - 2 2 6 2 2 6 2 2 6 2 2 6
36202 - 2 2 6 2 2 6 2 2 6 2 2 6
36203 - 2 2 6 2 2 6 2 2 6 54 54 54
36204 - 66 66 66 26 26 26 6 6 6 0 0 0
36205 - 0 0 0 0 0 0 0 0 0 0 0 0
36206 - 0 0 0 0 0 0 0 0 0 0 0 0
36207 - 0 0 0 0 0 0 0 0 0 0 0 0
36208 - 0 0 0 0 0 0 0 0 0 0 0 0
36209 - 0 0 0 0 0 0 0 0 0 0 0 0
36210 - 0 0 0 0 0 0 0 0 0 0 0 0
36211 - 0 0 0 0 0 0 0 0 0 0 0 0
36212 - 0 0 0 0 0 1 0 0 1 0 0 0
36213 - 0 0 0 0 0 0 0 0 0 0 0 0
36214 - 0 0 0 0 0 0 0 0 0 0 0 0
36215 - 0 0 0 0 0 0 0 0 0 0 0 0
36216 - 0 0 0 0 0 0 0 0 0 0 0 0
36217 - 0 0 0 0 0 0 0 0 0 14 14 14
36218 - 42 42 42 82 82 82 2 2 6 2 2 6
36219 - 2 2 6 6 6 6 10 10 10 2 2 6
36220 - 2 2 6 2 2 6 2 2 6 2 2 6
36221 - 2 2 6 2 2 6 2 2 6 6 6 6
36222 - 14 14 14 10 10 10 2 2 6 2 2 6
36223 - 2 2 6 2 2 6 2 2 6 18 18 18
36224 - 82 82 82 34 34 34 10 10 10 0 0 0
36225 - 0 0 0 0 0 0 0 0 0 0 0 0
36226 - 0 0 0 0 0 0 0 0 0 0 0 0
36227 - 0 0 0 0 0 0 0 0 0 0 0 0
36228 - 0 0 0 0 0 0 0 0 0 0 0 0
36229 - 0 0 0 0 0 0 0 0 0 0 0 0
36230 - 0 0 0 0 0 0 0 0 0 0 0 0
36231 - 0 0 0 0 0 0 0 0 0 0 0 0
36232 - 0 0 1 0 0 0 0 0 0 0 0 0
36233 - 0 0 0 0 0 0 0 0 0 0 0 0
36234 - 0 0 0 0 0 0 0 0 0 0 0 0
36235 - 0 0 0 0 0 0 0 0 0 0 0 0
36236 - 0 0 0 0 0 0 0 0 0 0 0 0
36237 - 0 0 0 0 0 0 0 0 0 14 14 14
36238 - 46 46 46 86 86 86 2 2 6 2 2 6
36239 - 6 6 6 6 6 6 22 22 22 34 34 34
36240 - 6 6 6 2 2 6 2 2 6 2 2 6
36241 - 2 2 6 2 2 6 18 18 18 34 34 34
36242 - 10 10 10 50 50 50 22 22 22 2 2 6
36243 - 2 2 6 2 2 6 2 2 6 10 10 10
36244 - 86 86 86 42 42 42 14 14 14 0 0 0
36245 - 0 0 0 0 0 0 0 0 0 0 0 0
36246 - 0 0 0 0 0 0 0 0 0 0 0 0
36247 - 0 0 0 0 0 0 0 0 0 0 0 0
36248 - 0 0 0 0 0 0 0 0 0 0 0 0
36249 - 0 0 0 0 0 0 0 0 0 0 0 0
36250 - 0 0 0 0 0 0 0 0 0 0 0 0
36251 - 0 0 0 0 0 0 0 0 0 0 0 0
36252 - 0 0 1 0 0 1 0 0 1 0 0 0
36253 - 0 0 0 0 0 0 0 0 0 0 0 0
36254 - 0 0 0 0 0 0 0 0 0 0 0 0
36255 - 0 0 0 0 0 0 0 0 0 0 0 0
36256 - 0 0 0 0 0 0 0 0 0 0 0 0
36257 - 0 0 0 0 0 0 0 0 0 14 14 14
36258 - 46 46 46 86 86 86 2 2 6 2 2 6
36259 - 38 38 38 116 116 116 94 94 94 22 22 22
36260 - 22 22 22 2 2 6 2 2 6 2 2 6
36261 - 14 14 14 86 86 86 138 138 138 162 162 162
36262 -154 154 154 38 38 38 26 26 26 6 6 6
36263 - 2 2 6 2 2 6 2 2 6 2 2 6
36264 - 86 86 86 46 46 46 14 14 14 0 0 0
36265 - 0 0 0 0 0 0 0 0 0 0 0 0
36266 - 0 0 0 0 0 0 0 0 0 0 0 0
36267 - 0 0 0 0 0 0 0 0 0 0 0 0
36268 - 0 0 0 0 0 0 0 0 0 0 0 0
36269 - 0 0 0 0 0 0 0 0 0 0 0 0
36270 - 0 0 0 0 0 0 0 0 0 0 0 0
36271 - 0 0 0 0 0 0 0 0 0 0 0 0
36272 - 0 0 0 0 0 0 0 0 0 0 0 0
36273 - 0 0 0 0 0 0 0 0 0 0 0 0
36274 - 0 0 0 0 0 0 0 0 0 0 0 0
36275 - 0 0 0 0 0 0 0 0 0 0 0 0
36276 - 0 0 0 0 0 0 0 0 0 0 0 0
36277 - 0 0 0 0 0 0 0 0 0 14 14 14
36278 - 46 46 46 86 86 86 2 2 6 14 14 14
36279 -134 134 134 198 198 198 195 195 195 116 116 116
36280 - 10 10 10 2 2 6 2 2 6 6 6 6
36281 -101 98 89 187 187 187 210 210 210 218 218 218
36282 -214 214 214 134 134 134 14 14 14 6 6 6
36283 - 2 2 6 2 2 6 2 2 6 2 2 6
36284 - 86 86 86 50 50 50 18 18 18 6 6 6
36285 - 0 0 0 0 0 0 0 0 0 0 0 0
36286 - 0 0 0 0 0 0 0 0 0 0 0 0
36287 - 0 0 0 0 0 0 0 0 0 0 0 0
36288 - 0 0 0 0 0 0 0 0 0 0 0 0
36289 - 0 0 0 0 0 0 0 0 0 0 0 0
36290 - 0 0 0 0 0 0 0 0 0 0 0 0
36291 - 0 0 0 0 0 0 0 0 1 0 0 0
36292 - 0 0 1 0 0 1 0 0 1 0 0 0
36293 - 0 0 0 0 0 0 0 0 0 0 0 0
36294 - 0 0 0 0 0 0 0 0 0 0 0 0
36295 - 0 0 0 0 0 0 0 0 0 0 0 0
36296 - 0 0 0 0 0 0 0 0 0 0 0 0
36297 - 0 0 0 0 0 0 0 0 0 14 14 14
36298 - 46 46 46 86 86 86 2 2 6 54 54 54
36299 -218 218 218 195 195 195 226 226 226 246 246 246
36300 - 58 58 58 2 2 6 2 2 6 30 30 30
36301 -210 210 210 253 253 253 174 174 174 123 123 123
36302 -221 221 221 234 234 234 74 74 74 2 2 6
36303 - 2 2 6 2 2 6 2 2 6 2 2 6
36304 - 70 70 70 58 58 58 22 22 22 6 6 6
36305 - 0 0 0 0 0 0 0 0 0 0 0 0
36306 - 0 0 0 0 0 0 0 0 0 0 0 0
36307 - 0 0 0 0 0 0 0 0 0 0 0 0
36308 - 0 0 0 0 0 0 0 0 0 0 0 0
36309 - 0 0 0 0 0 0 0 0 0 0 0 0
36310 - 0 0 0 0 0 0 0 0 0 0 0 0
36311 - 0 0 0 0 0 0 0 0 0 0 0 0
36312 - 0 0 0 0 0 0 0 0 0 0 0 0
36313 - 0 0 0 0 0 0 0 0 0 0 0 0
36314 - 0 0 0 0 0 0 0 0 0 0 0 0
36315 - 0 0 0 0 0 0 0 0 0 0 0 0
36316 - 0 0 0 0 0 0 0 0 0 0 0 0
36317 - 0 0 0 0 0 0 0 0 0 14 14 14
36318 - 46 46 46 82 82 82 2 2 6 106 106 106
36319 -170 170 170 26 26 26 86 86 86 226 226 226
36320 -123 123 123 10 10 10 14 14 14 46 46 46
36321 -231 231 231 190 190 190 6 6 6 70 70 70
36322 - 90 90 90 238 238 238 158 158 158 2 2 6
36323 - 2 2 6 2 2 6 2 2 6 2 2 6
36324 - 70 70 70 58 58 58 22 22 22 6 6 6
36325 - 0 0 0 0 0 0 0 0 0 0 0 0
36326 - 0 0 0 0 0 0 0 0 0 0 0 0
36327 - 0 0 0 0 0 0 0 0 0 0 0 0
36328 - 0 0 0 0 0 0 0 0 0 0 0 0
36329 - 0 0 0 0 0 0 0 0 0 0 0 0
36330 - 0 0 0 0 0 0 0 0 0 0 0 0
36331 - 0 0 0 0 0 0 0 0 1 0 0 0
36332 - 0 0 1 0 0 1 0 0 1 0 0 0
36333 - 0 0 0 0 0 0 0 0 0 0 0 0
36334 - 0 0 0 0 0 0 0 0 0 0 0 0
36335 - 0 0 0 0 0 0 0 0 0 0 0 0
36336 - 0 0 0 0 0 0 0 0 0 0 0 0
36337 - 0 0 0 0 0 0 0 0 0 14 14 14
36338 - 42 42 42 86 86 86 6 6 6 116 116 116
36339 -106 106 106 6 6 6 70 70 70 149 149 149
36340 -128 128 128 18 18 18 38 38 38 54 54 54
36341 -221 221 221 106 106 106 2 2 6 14 14 14
36342 - 46 46 46 190 190 190 198 198 198 2 2 6
36343 - 2 2 6 2 2 6 2 2 6 2 2 6
36344 - 74 74 74 62 62 62 22 22 22 6 6 6
36345 - 0 0 0 0 0 0 0 0 0 0 0 0
36346 - 0 0 0 0 0 0 0 0 0 0 0 0
36347 - 0 0 0 0 0 0 0 0 0 0 0 0
36348 - 0 0 0 0 0 0 0 0 0 0 0 0
36349 - 0 0 0 0 0 0 0 0 0 0 0 0
36350 - 0 0 0 0 0 0 0 0 0 0 0 0
36351 - 0 0 0 0 0 0 0 0 1 0 0 0
36352 - 0 0 1 0 0 0 0 0 1 0 0 0
36353 - 0 0 0 0 0 0 0 0 0 0 0 0
36354 - 0 0 0 0 0 0 0 0 0 0 0 0
36355 - 0 0 0 0 0 0 0 0 0 0 0 0
36356 - 0 0 0 0 0 0 0 0 0 0 0 0
36357 - 0 0 0 0 0 0 0 0 0 14 14 14
36358 - 42 42 42 94 94 94 14 14 14 101 101 101
36359 -128 128 128 2 2 6 18 18 18 116 116 116
36360 -118 98 46 121 92 8 121 92 8 98 78 10
36361 -162 162 162 106 106 106 2 2 6 2 2 6
36362 - 2 2 6 195 195 195 195 195 195 6 6 6
36363 - 2 2 6 2 2 6 2 2 6 2 2 6
36364 - 74 74 74 62 62 62 22 22 22 6 6 6
36365 - 0 0 0 0 0 0 0 0 0 0 0 0
36366 - 0 0 0 0 0 0 0 0 0 0 0 0
36367 - 0 0 0 0 0 0 0 0 0 0 0 0
36368 - 0 0 0 0 0 0 0 0 0 0 0 0
36369 - 0 0 0 0 0 0 0 0 0 0 0 0
36370 - 0 0 0 0 0 0 0 0 0 0 0 0
36371 - 0 0 0 0 0 0 0 0 1 0 0 1
36372 - 0 0 1 0 0 0 0 0 1 0 0 0
36373 - 0 0 0 0 0 0 0 0 0 0 0 0
36374 - 0 0 0 0 0 0 0 0 0 0 0 0
36375 - 0 0 0 0 0 0 0 0 0 0 0 0
36376 - 0 0 0 0 0 0 0 0 0 0 0 0
36377 - 0 0 0 0 0 0 0 0 0 10 10 10
36378 - 38 38 38 90 90 90 14 14 14 58 58 58
36379 -210 210 210 26 26 26 54 38 6 154 114 10
36380 -226 170 11 236 186 11 225 175 15 184 144 12
36381 -215 174 15 175 146 61 37 26 9 2 2 6
36382 - 70 70 70 246 246 246 138 138 138 2 2 6
36383 - 2 2 6 2 2 6 2 2 6 2 2 6
36384 - 70 70 70 66 66 66 26 26 26 6 6 6
36385 - 0 0 0 0 0 0 0 0 0 0 0 0
36386 - 0 0 0 0 0 0 0 0 0 0 0 0
36387 - 0 0 0 0 0 0 0 0 0 0 0 0
36388 - 0 0 0 0 0 0 0 0 0 0 0 0
36389 - 0 0 0 0 0 0 0 0 0 0 0 0
36390 - 0 0 0 0 0 0 0 0 0 0 0 0
36391 - 0 0 0 0 0 0 0 0 0 0 0 0
36392 - 0 0 0 0 0 0 0 0 0 0 0 0
36393 - 0 0 0 0 0 0 0 0 0 0 0 0
36394 - 0 0 0 0 0 0 0 0 0 0 0 0
36395 - 0 0 0 0 0 0 0 0 0 0 0 0
36396 - 0 0 0 0 0 0 0 0 0 0 0 0
36397 - 0 0 0 0 0 0 0 0 0 10 10 10
36398 - 38 38 38 86 86 86 14 14 14 10 10 10
36399 -195 195 195 188 164 115 192 133 9 225 175 15
36400 -239 182 13 234 190 10 232 195 16 232 200 30
36401 -245 207 45 241 208 19 232 195 16 184 144 12
36402 -218 194 134 211 206 186 42 42 42 2 2 6
36403 - 2 2 6 2 2 6 2 2 6 2 2 6
36404 - 50 50 50 74 74 74 30 30 30 6 6 6
36405 - 0 0 0 0 0 0 0 0 0 0 0 0
36406 - 0 0 0 0 0 0 0 0 0 0 0 0
36407 - 0 0 0 0 0 0 0 0 0 0 0 0
36408 - 0 0 0 0 0 0 0 0 0 0 0 0
36409 - 0 0 0 0 0 0 0 0 0 0 0 0
36410 - 0 0 0 0 0 0 0 0 0 0 0 0
36411 - 0 0 0 0 0 0 0 0 0 0 0 0
36412 - 0 0 0 0 0 0 0 0 0 0 0 0
36413 - 0 0 0 0 0 0 0 0 0 0 0 0
36414 - 0 0 0 0 0 0 0 0 0 0 0 0
36415 - 0 0 0 0 0 0 0 0 0 0 0 0
36416 - 0 0 0 0 0 0 0 0 0 0 0 0
36417 - 0 0 0 0 0 0 0 0 0 10 10 10
36418 - 34 34 34 86 86 86 14 14 14 2 2 6
36419 -121 87 25 192 133 9 219 162 10 239 182 13
36420 -236 186 11 232 195 16 241 208 19 244 214 54
36421 -246 218 60 246 218 38 246 215 20 241 208 19
36422 -241 208 19 226 184 13 121 87 25 2 2 6
36423 - 2 2 6 2 2 6 2 2 6 2 2 6
36424 - 50 50 50 82 82 82 34 34 34 10 10 10
36425 - 0 0 0 0 0 0 0 0 0 0 0 0
36426 - 0 0 0 0 0 0 0 0 0 0 0 0
36427 - 0 0 0 0 0 0 0 0 0 0 0 0
36428 - 0 0 0 0 0 0 0 0 0 0 0 0
36429 - 0 0 0 0 0 0 0 0 0 0 0 0
36430 - 0 0 0 0 0 0 0 0 0 0 0 0
36431 - 0 0 0 0 0 0 0 0 0 0 0 0
36432 - 0 0 0 0 0 0 0 0 0 0 0 0
36433 - 0 0 0 0 0 0 0 0 0 0 0 0
36434 - 0 0 0 0 0 0 0 0 0 0 0 0
36435 - 0 0 0 0 0 0 0 0 0 0 0 0
36436 - 0 0 0 0 0 0 0 0 0 0 0 0
36437 - 0 0 0 0 0 0 0 0 0 10 10 10
36438 - 34 34 34 82 82 82 30 30 30 61 42 6
36439 -180 123 7 206 145 10 230 174 11 239 182 13
36440 -234 190 10 238 202 15 241 208 19 246 218 74
36441 -246 218 38 246 215 20 246 215 20 246 215 20
36442 -226 184 13 215 174 15 184 144 12 6 6 6
36443 - 2 2 6 2 2 6 2 2 6 2 2 6
36444 - 26 26 26 94 94 94 42 42 42 14 14 14
36445 - 0 0 0 0 0 0 0 0 0 0 0 0
36446 - 0 0 0 0 0 0 0 0 0 0 0 0
36447 - 0 0 0 0 0 0 0 0 0 0 0 0
36448 - 0 0 0 0 0 0 0 0 0 0 0 0
36449 - 0 0 0 0 0 0 0 0 0 0 0 0
36450 - 0 0 0 0 0 0 0 0 0 0 0 0
36451 - 0 0 0 0 0 0 0 0 0 0 0 0
36452 - 0 0 0 0 0 0 0 0 0 0 0 0
36453 - 0 0 0 0 0 0 0 0 0 0 0 0
36454 - 0 0 0 0 0 0 0 0 0 0 0 0
36455 - 0 0 0 0 0 0 0 0 0 0 0 0
36456 - 0 0 0 0 0 0 0 0 0 0 0 0
36457 - 0 0 0 0 0 0 0 0 0 10 10 10
36458 - 30 30 30 78 78 78 50 50 50 104 69 6
36459 -192 133 9 216 158 10 236 178 12 236 186 11
36460 -232 195 16 241 208 19 244 214 54 245 215 43
36461 -246 215 20 246 215 20 241 208 19 198 155 10
36462 -200 144 11 216 158 10 156 118 10 2 2 6
36463 - 2 2 6 2 2 6 2 2 6 2 2 6
36464 - 6 6 6 90 90 90 54 54 54 18 18 18
36465 - 6 6 6 0 0 0 0 0 0 0 0 0
36466 - 0 0 0 0 0 0 0 0 0 0 0 0
36467 - 0 0 0 0 0 0 0 0 0 0 0 0
36468 - 0 0 0 0 0 0 0 0 0 0 0 0
36469 - 0 0 0 0 0 0 0 0 0 0 0 0
36470 - 0 0 0 0 0 0 0 0 0 0 0 0
36471 - 0 0 0 0 0 0 0 0 0 0 0 0
36472 - 0 0 0 0 0 0 0 0 0 0 0 0
36473 - 0 0 0 0 0 0 0 0 0 0 0 0
36474 - 0 0 0 0 0 0 0 0 0 0 0 0
36475 - 0 0 0 0 0 0 0 0 0 0 0 0
36476 - 0 0 0 0 0 0 0 0 0 0 0 0
36477 - 0 0 0 0 0 0 0 0 0 10 10 10
36478 - 30 30 30 78 78 78 46 46 46 22 22 22
36479 -137 92 6 210 162 10 239 182 13 238 190 10
36480 -238 202 15 241 208 19 246 215 20 246 215 20
36481 -241 208 19 203 166 17 185 133 11 210 150 10
36482 -216 158 10 210 150 10 102 78 10 2 2 6
36483 - 6 6 6 54 54 54 14 14 14 2 2 6
36484 - 2 2 6 62 62 62 74 74 74 30 30 30
36485 - 10 10 10 0 0 0 0 0 0 0 0 0
36486 - 0 0 0 0 0 0 0 0 0 0 0 0
36487 - 0 0 0 0 0 0 0 0 0 0 0 0
36488 - 0 0 0 0 0 0 0 0 0 0 0 0
36489 - 0 0 0 0 0 0 0 0 0 0 0 0
36490 - 0 0 0 0 0 0 0 0 0 0 0 0
36491 - 0 0 0 0 0 0 0 0 0 0 0 0
36492 - 0 0 0 0 0 0 0 0 0 0 0 0
36493 - 0 0 0 0 0 0 0 0 0 0 0 0
36494 - 0 0 0 0 0 0 0 0 0 0 0 0
36495 - 0 0 0 0 0 0 0 0 0 0 0 0
36496 - 0 0 0 0 0 0 0 0 0 0 0 0
36497 - 0 0 0 0 0 0 0 0 0 10 10 10
36498 - 34 34 34 78 78 78 50 50 50 6 6 6
36499 - 94 70 30 139 102 15 190 146 13 226 184 13
36500 -232 200 30 232 195 16 215 174 15 190 146 13
36501 -168 122 10 192 133 9 210 150 10 213 154 11
36502 -202 150 34 182 157 106 101 98 89 2 2 6
36503 - 2 2 6 78 78 78 116 116 116 58 58 58
36504 - 2 2 6 22 22 22 90 90 90 46 46 46
36505 - 18 18 18 6 6 6 0 0 0 0 0 0
36506 - 0 0 0 0 0 0 0 0 0 0 0 0
36507 - 0 0 0 0 0 0 0 0 0 0 0 0
36508 - 0 0 0 0 0 0 0 0 0 0 0 0
36509 - 0 0 0 0 0 0 0 0 0 0 0 0
36510 - 0 0 0 0 0 0 0 0 0 0 0 0
36511 - 0 0 0 0 0 0 0 0 0 0 0 0
36512 - 0 0 0 0 0 0 0 0 0 0 0 0
36513 - 0 0 0 0 0 0 0 0 0 0 0 0
36514 - 0 0 0 0 0 0 0 0 0 0 0 0
36515 - 0 0 0 0 0 0 0 0 0 0 0 0
36516 - 0 0 0 0 0 0 0 0 0 0 0 0
36517 - 0 0 0 0 0 0 0 0 0 10 10 10
36518 - 38 38 38 86 86 86 50 50 50 6 6 6
36519 -128 128 128 174 154 114 156 107 11 168 122 10
36520 -198 155 10 184 144 12 197 138 11 200 144 11
36521 -206 145 10 206 145 10 197 138 11 188 164 115
36522 -195 195 195 198 198 198 174 174 174 14 14 14
36523 - 2 2 6 22 22 22 116 116 116 116 116 116
36524 - 22 22 22 2 2 6 74 74 74 70 70 70
36525 - 30 30 30 10 10 10 0 0 0 0 0 0
36526 - 0 0 0 0 0 0 0 0 0 0 0 0
36527 - 0 0 0 0 0 0 0 0 0 0 0 0
36528 - 0 0 0 0 0 0 0 0 0 0 0 0
36529 - 0 0 0 0 0 0 0 0 0 0 0 0
36530 - 0 0 0 0 0 0 0 0 0 0 0 0
36531 - 0 0 0 0 0 0 0 0 0 0 0 0
36532 - 0 0 0 0 0 0 0 0 0 0 0 0
36533 - 0 0 0 0 0 0 0 0 0 0 0 0
36534 - 0 0 0 0 0 0 0 0 0 0 0 0
36535 - 0 0 0 0 0 0 0 0 0 0 0 0
36536 - 0 0 0 0 0 0 0 0 0 0 0 0
36537 - 0 0 0 0 0 0 6 6 6 18 18 18
36538 - 50 50 50 101 101 101 26 26 26 10 10 10
36539 -138 138 138 190 190 190 174 154 114 156 107 11
36540 -197 138 11 200 144 11 197 138 11 192 133 9
36541 -180 123 7 190 142 34 190 178 144 187 187 187
36542 -202 202 202 221 221 221 214 214 214 66 66 66
36543 - 2 2 6 2 2 6 50 50 50 62 62 62
36544 - 6 6 6 2 2 6 10 10 10 90 90 90
36545 - 50 50 50 18 18 18 6 6 6 0 0 0
36546 - 0 0 0 0 0 0 0 0 0 0 0 0
36547 - 0 0 0 0 0 0 0 0 0 0 0 0
36548 - 0 0 0 0 0 0 0 0 0 0 0 0
36549 - 0 0 0 0 0 0 0 0 0 0 0 0
36550 - 0 0 0 0 0 0 0 0 0 0 0 0
36551 - 0 0 0 0 0 0 0 0 0 0 0 0
36552 - 0 0 0 0 0 0 0 0 0 0 0 0
36553 - 0 0 0 0 0 0 0 0 0 0 0 0
36554 - 0 0 0 0 0 0 0 0 0 0 0 0
36555 - 0 0 0 0 0 0 0 0 0 0 0 0
36556 - 0 0 0 0 0 0 0 0 0 0 0 0
36557 - 0 0 0 0 0 0 10 10 10 34 34 34
36558 - 74 74 74 74 74 74 2 2 6 6 6 6
36559 -144 144 144 198 198 198 190 190 190 178 166 146
36560 -154 121 60 156 107 11 156 107 11 168 124 44
36561 -174 154 114 187 187 187 190 190 190 210 210 210
36562 -246 246 246 253 253 253 253 253 253 182 182 182
36563 - 6 6 6 2 2 6 2 2 6 2 2 6
36564 - 2 2 6 2 2 6 2 2 6 62 62 62
36565 - 74 74 74 34 34 34 14 14 14 0 0 0
36566 - 0 0 0 0 0 0 0 0 0 0 0 0
36567 - 0 0 0 0 0 0 0 0 0 0 0 0
36568 - 0 0 0 0 0 0 0 0 0 0 0 0
36569 - 0 0 0 0 0 0 0 0 0 0 0 0
36570 - 0 0 0 0 0 0 0 0 0 0 0 0
36571 - 0 0 0 0 0 0 0 0 0 0 0 0
36572 - 0 0 0 0 0 0 0 0 0 0 0 0
36573 - 0 0 0 0 0 0 0 0 0 0 0 0
36574 - 0 0 0 0 0 0 0 0 0 0 0 0
36575 - 0 0 0 0 0 0 0 0 0 0 0 0
36576 - 0 0 0 0 0 0 0 0 0 0 0 0
36577 - 0 0 0 10 10 10 22 22 22 54 54 54
36578 - 94 94 94 18 18 18 2 2 6 46 46 46
36579 -234 234 234 221 221 221 190 190 190 190 190 190
36580 -190 190 190 187 187 187 187 187 187 190 190 190
36581 -190 190 190 195 195 195 214 214 214 242 242 242
36582 -253 253 253 253 253 253 253 253 253 253 253 253
36583 - 82 82 82 2 2 6 2 2 6 2 2 6
36584 - 2 2 6 2 2 6 2 2 6 14 14 14
36585 - 86 86 86 54 54 54 22 22 22 6 6 6
36586 - 0 0 0 0 0 0 0 0 0 0 0 0
36587 - 0 0 0 0 0 0 0 0 0 0 0 0
36588 - 0 0 0 0 0 0 0 0 0 0 0 0
36589 - 0 0 0 0 0 0 0 0 0 0 0 0
36590 - 0 0 0 0 0 0 0 0 0 0 0 0
36591 - 0 0 0 0 0 0 0 0 0 0 0 0
36592 - 0 0 0 0 0 0 0 0 0 0 0 0
36593 - 0 0 0 0 0 0 0 0 0 0 0 0
36594 - 0 0 0 0 0 0 0 0 0 0 0 0
36595 - 0 0 0 0 0 0 0 0 0 0 0 0
36596 - 0 0 0 0 0 0 0 0 0 0 0 0
36597 - 6 6 6 18 18 18 46 46 46 90 90 90
36598 - 46 46 46 18 18 18 6 6 6 182 182 182
36599 -253 253 253 246 246 246 206 206 206 190 190 190
36600 -190 190 190 190 190 190 190 190 190 190 190 190
36601 -206 206 206 231 231 231 250 250 250 253 253 253
36602 -253 253 253 253 253 253 253 253 253 253 253 253
36603 -202 202 202 14 14 14 2 2 6 2 2 6
36604 - 2 2 6 2 2 6 2 2 6 2 2 6
36605 - 42 42 42 86 86 86 42 42 42 18 18 18
36606 - 6 6 6 0 0 0 0 0 0 0 0 0
36607 - 0 0 0 0 0 0 0 0 0 0 0 0
36608 - 0 0 0 0 0 0 0 0 0 0 0 0
36609 - 0 0 0 0 0 0 0 0 0 0 0 0
36610 - 0 0 0 0 0 0 0 0 0 0 0 0
36611 - 0 0 0 0 0 0 0 0 0 0 0 0
36612 - 0 0 0 0 0 0 0 0 0 0 0 0
36613 - 0 0 0 0 0 0 0 0 0 0 0 0
36614 - 0 0 0 0 0 0 0 0 0 0 0 0
36615 - 0 0 0 0 0 0 0 0 0 0 0 0
36616 - 0 0 0 0 0 0 0 0 0 6 6 6
36617 - 14 14 14 38 38 38 74 74 74 66 66 66
36618 - 2 2 6 6 6 6 90 90 90 250 250 250
36619 -253 253 253 253 253 253 238 238 238 198 198 198
36620 -190 190 190 190 190 190 195 195 195 221 221 221
36621 -246 246 246 253 253 253 253 253 253 253 253 253
36622 -253 253 253 253 253 253 253 253 253 253 253 253
36623 -253 253 253 82 82 82 2 2 6 2 2 6
36624 - 2 2 6 2 2 6 2 2 6 2 2 6
36625 - 2 2 6 78 78 78 70 70 70 34 34 34
36626 - 14 14 14 6 6 6 0 0 0 0 0 0
36627 - 0 0 0 0 0 0 0 0 0 0 0 0
36628 - 0 0 0 0 0 0 0 0 0 0 0 0
36629 - 0 0 0 0 0 0 0 0 0 0 0 0
36630 - 0 0 0 0 0 0 0 0 0 0 0 0
36631 - 0 0 0 0 0 0 0 0 0 0 0 0
36632 - 0 0 0 0 0 0 0 0 0 0 0 0
36633 - 0 0 0 0 0 0 0 0 0 0 0 0
36634 - 0 0 0 0 0 0 0 0 0 0 0 0
36635 - 0 0 0 0 0 0 0 0 0 0 0 0
36636 - 0 0 0 0 0 0 0 0 0 14 14 14
36637 - 34 34 34 66 66 66 78 78 78 6 6 6
36638 - 2 2 6 18 18 18 218 218 218 253 253 253
36639 -253 253 253 253 253 253 253 253 253 246 246 246
36640 -226 226 226 231 231 231 246 246 246 253 253 253
36641 -253 253 253 253 253 253 253 253 253 253 253 253
36642 -253 253 253 253 253 253 253 253 253 253 253 253
36643 -253 253 253 178 178 178 2 2 6 2 2 6
36644 - 2 2 6 2 2 6 2 2 6 2 2 6
36645 - 2 2 6 18 18 18 90 90 90 62 62 62
36646 - 30 30 30 10 10 10 0 0 0 0 0 0
36647 - 0 0 0 0 0 0 0 0 0 0 0 0
36648 - 0 0 0 0 0 0 0 0 0 0 0 0
36649 - 0 0 0 0 0 0 0 0 0 0 0 0
36650 - 0 0 0 0 0 0 0 0 0 0 0 0
36651 - 0 0 0 0 0 0 0 0 0 0 0 0
36652 - 0 0 0 0 0 0 0 0 0 0 0 0
36653 - 0 0 0 0 0 0 0 0 0 0 0 0
36654 - 0 0 0 0 0 0 0 0 0 0 0 0
36655 - 0 0 0 0 0 0 0 0 0 0 0 0
36656 - 0 0 0 0 0 0 10 10 10 26 26 26
36657 - 58 58 58 90 90 90 18 18 18 2 2 6
36658 - 2 2 6 110 110 110 253 253 253 253 253 253
36659 -253 253 253 253 253 253 253 253 253 253 253 253
36660 -250 250 250 253 253 253 253 253 253 253 253 253
36661 -253 253 253 253 253 253 253 253 253 253 253 253
36662 -253 253 253 253 253 253 253 253 253 253 253 253
36663 -253 253 253 231 231 231 18 18 18 2 2 6
36664 - 2 2 6 2 2 6 2 2 6 2 2 6
36665 - 2 2 6 2 2 6 18 18 18 94 94 94
36666 - 54 54 54 26 26 26 10 10 10 0 0 0
36667 - 0 0 0 0 0 0 0 0 0 0 0 0
36668 - 0 0 0 0 0 0 0 0 0 0 0 0
36669 - 0 0 0 0 0 0 0 0 0 0 0 0
36670 - 0 0 0 0 0 0 0 0 0 0 0 0
36671 - 0 0 0 0 0 0 0 0 0 0 0 0
36672 - 0 0 0 0 0 0 0 0 0 0 0 0
36673 - 0 0 0 0 0 0 0 0 0 0 0 0
36674 - 0 0 0 0 0 0 0 0 0 0 0 0
36675 - 0 0 0 0 0 0 0 0 0 0 0 0
36676 - 0 0 0 6 6 6 22 22 22 50 50 50
36677 - 90 90 90 26 26 26 2 2 6 2 2 6
36678 - 14 14 14 195 195 195 250 250 250 253 253 253
36679 -253 253 253 253 253 253 253 253 253 253 253 253
36680 -253 253 253 253 253 253 253 253 253 253 253 253
36681 -253 253 253 253 253 253 253 253 253 253 253 253
36682 -253 253 253 253 253 253 253 253 253 253 253 253
36683 -250 250 250 242 242 242 54 54 54 2 2 6
36684 - 2 2 6 2 2 6 2 2 6 2 2 6
36685 - 2 2 6 2 2 6 2 2 6 38 38 38
36686 - 86 86 86 50 50 50 22 22 22 6 6 6
36687 - 0 0 0 0 0 0 0 0 0 0 0 0
36688 - 0 0 0 0 0 0 0 0 0 0 0 0
36689 - 0 0 0 0 0 0 0 0 0 0 0 0
36690 - 0 0 0 0 0 0 0 0 0 0 0 0
36691 - 0 0 0 0 0 0 0 0 0 0 0 0
36692 - 0 0 0 0 0 0 0 0 0 0 0 0
36693 - 0 0 0 0 0 0 0 0 0 0 0 0
36694 - 0 0 0 0 0 0 0 0 0 0 0 0
36695 - 0 0 0 0 0 0 0 0 0 0 0 0
36696 - 6 6 6 14 14 14 38 38 38 82 82 82
36697 - 34 34 34 2 2 6 2 2 6 2 2 6
36698 - 42 42 42 195 195 195 246 246 246 253 253 253
36699 -253 253 253 253 253 253 253 253 253 250 250 250
36700 -242 242 242 242 242 242 250 250 250 253 253 253
36701 -253 253 253 253 253 253 253 253 253 253 253 253
36702 -253 253 253 250 250 250 246 246 246 238 238 238
36703 -226 226 226 231 231 231 101 101 101 6 6 6
36704 - 2 2 6 2 2 6 2 2 6 2 2 6
36705 - 2 2 6 2 2 6 2 2 6 2 2 6
36706 - 38 38 38 82 82 82 42 42 42 14 14 14
36707 - 6 6 6 0 0 0 0 0 0 0 0 0
36708 - 0 0 0 0 0 0 0 0 0 0 0 0
36709 - 0 0 0 0 0 0 0 0 0 0 0 0
36710 - 0 0 0 0 0 0 0 0 0 0 0 0
36711 - 0 0 0 0 0 0 0 0 0 0 0 0
36712 - 0 0 0 0 0 0 0 0 0 0 0 0
36713 - 0 0 0 0 0 0 0 0 0 0 0 0
36714 - 0 0 0 0 0 0 0 0 0 0 0 0
36715 - 0 0 0 0 0 0 0 0 0 0 0 0
36716 - 10 10 10 26 26 26 62 62 62 66 66 66
36717 - 2 2 6 2 2 6 2 2 6 6 6 6
36718 - 70 70 70 170 170 170 206 206 206 234 234 234
36719 -246 246 246 250 250 250 250 250 250 238 238 238
36720 -226 226 226 231 231 231 238 238 238 250 250 250
36721 -250 250 250 250 250 250 246 246 246 231 231 231
36722 -214 214 214 206 206 206 202 202 202 202 202 202
36723 -198 198 198 202 202 202 182 182 182 18 18 18
36724 - 2 2 6 2 2 6 2 2 6 2 2 6
36725 - 2 2 6 2 2 6 2 2 6 2 2 6
36726 - 2 2 6 62 62 62 66 66 66 30 30 30
36727 - 10 10 10 0 0 0 0 0 0 0 0 0
36728 - 0 0 0 0 0 0 0 0 0 0 0 0
36729 - 0 0 0 0 0 0 0 0 0 0 0 0
36730 - 0 0 0 0 0 0 0 0 0 0 0 0
36731 - 0 0 0 0 0 0 0 0 0 0 0 0
36732 - 0 0 0 0 0 0 0 0 0 0 0 0
36733 - 0 0 0 0 0 0 0 0 0 0 0 0
36734 - 0 0 0 0 0 0 0 0 0 0 0 0
36735 - 0 0 0 0 0 0 0 0 0 0 0 0
36736 - 14 14 14 42 42 42 82 82 82 18 18 18
36737 - 2 2 6 2 2 6 2 2 6 10 10 10
36738 - 94 94 94 182 182 182 218 218 218 242 242 242
36739 -250 250 250 253 253 253 253 253 253 250 250 250
36740 -234 234 234 253 253 253 253 253 253 253 253 253
36741 -253 253 253 253 253 253 253 253 253 246 246 246
36742 -238 238 238 226 226 226 210 210 210 202 202 202
36743 -195 195 195 195 195 195 210 210 210 158 158 158
36744 - 6 6 6 14 14 14 50 50 50 14 14 14
36745 - 2 2 6 2 2 6 2 2 6 2 2 6
36746 - 2 2 6 6 6 6 86 86 86 46 46 46
36747 - 18 18 18 6 6 6 0 0 0 0 0 0
36748 - 0 0 0 0 0 0 0 0 0 0 0 0
36749 - 0 0 0 0 0 0 0 0 0 0 0 0
36750 - 0 0 0 0 0 0 0 0 0 0 0 0
36751 - 0 0 0 0 0 0 0 0 0 0 0 0
36752 - 0 0 0 0 0 0 0 0 0 0 0 0
36753 - 0 0 0 0 0 0 0 0 0 0 0 0
36754 - 0 0 0 0 0 0 0 0 0 0 0 0
36755 - 0 0 0 0 0 0 0 0 0 6 6 6
36756 - 22 22 22 54 54 54 70 70 70 2 2 6
36757 - 2 2 6 10 10 10 2 2 6 22 22 22
36758 -166 166 166 231 231 231 250 250 250 253 253 253
36759 -253 253 253 253 253 253 253 253 253 250 250 250
36760 -242 242 242 253 253 253 253 253 253 253 253 253
36761 -253 253 253 253 253 253 253 253 253 253 253 253
36762 -253 253 253 253 253 253 253 253 253 246 246 246
36763 -231 231 231 206 206 206 198 198 198 226 226 226
36764 - 94 94 94 2 2 6 6 6 6 38 38 38
36765 - 30 30 30 2 2 6 2 2 6 2 2 6
36766 - 2 2 6 2 2 6 62 62 62 66 66 66
36767 - 26 26 26 10 10 10 0 0 0 0 0 0
36768 - 0 0 0 0 0 0 0 0 0 0 0 0
36769 - 0 0 0 0 0 0 0 0 0 0 0 0
36770 - 0 0 0 0 0 0 0 0 0 0 0 0
36771 - 0 0 0 0 0 0 0 0 0 0 0 0
36772 - 0 0 0 0 0 0 0 0 0 0 0 0
36773 - 0 0 0 0 0 0 0 0 0 0 0 0
36774 - 0 0 0 0 0 0 0 0 0 0 0 0
36775 - 0 0 0 0 0 0 0 0 0 10 10 10
36776 - 30 30 30 74 74 74 50 50 50 2 2 6
36777 - 26 26 26 26 26 26 2 2 6 106 106 106
36778 -238 238 238 253 253 253 253 253 253 253 253 253
36779 -253 253 253 253 253 253 253 253 253 253 253 253
36780 -253 253 253 253 253 253 253 253 253 253 253 253
36781 -253 253 253 253 253 253 253 253 253 253 253 253
36782 -253 253 253 253 253 253 253 253 253 253 253 253
36783 -253 253 253 246 246 246 218 218 218 202 202 202
36784 -210 210 210 14 14 14 2 2 6 2 2 6
36785 - 30 30 30 22 22 22 2 2 6 2 2 6
36786 - 2 2 6 2 2 6 18 18 18 86 86 86
36787 - 42 42 42 14 14 14 0 0 0 0 0 0
36788 - 0 0 0 0 0 0 0 0 0 0 0 0
36789 - 0 0 0 0 0 0 0 0 0 0 0 0
36790 - 0 0 0 0 0 0 0 0 0 0 0 0
36791 - 0 0 0 0 0 0 0 0 0 0 0 0
36792 - 0 0 0 0 0 0 0 0 0 0 0 0
36793 - 0 0 0 0 0 0 0 0 0 0 0 0
36794 - 0 0 0 0 0 0 0 0 0 0 0 0
36795 - 0 0 0 0 0 0 0 0 0 14 14 14
36796 - 42 42 42 90 90 90 22 22 22 2 2 6
36797 - 42 42 42 2 2 6 18 18 18 218 218 218
36798 -253 253 253 253 253 253 253 253 253 253 253 253
36799 -253 253 253 253 253 253 253 253 253 253 253 253
36800 -253 253 253 253 253 253 253 253 253 253 253 253
36801 -253 253 253 253 253 253 253 253 253 253 253 253
36802 -253 253 253 253 253 253 253 253 253 253 253 253
36803 -253 253 253 253 253 253 250 250 250 221 221 221
36804 -218 218 218 101 101 101 2 2 6 14 14 14
36805 - 18 18 18 38 38 38 10 10 10 2 2 6
36806 - 2 2 6 2 2 6 2 2 6 78 78 78
36807 - 58 58 58 22 22 22 6 6 6 0 0 0
36808 - 0 0 0 0 0 0 0 0 0 0 0 0
36809 - 0 0 0 0 0 0 0 0 0 0 0 0
36810 - 0 0 0 0 0 0 0 0 0 0 0 0
36811 - 0 0 0 0 0 0 0 0 0 0 0 0
36812 - 0 0 0 0 0 0 0 0 0 0 0 0
36813 - 0 0 0 0 0 0 0 0 0 0 0 0
36814 - 0 0 0 0 0 0 0 0 0 0 0 0
36815 - 0 0 0 0 0 0 6 6 6 18 18 18
36816 - 54 54 54 82 82 82 2 2 6 26 26 26
36817 - 22 22 22 2 2 6 123 123 123 253 253 253
36818 -253 253 253 253 253 253 253 253 253 253 253 253
36819 -253 253 253 253 253 253 253 253 253 253 253 253
36820 -253 253 253 253 253 253 253 253 253 253 253 253
36821 -253 253 253 253 253 253 253 253 253 253 253 253
36822 -253 253 253 253 253 253 253 253 253 253 253 253
36823 -253 253 253 253 253 253 253 253 253 250 250 250
36824 -238 238 238 198 198 198 6 6 6 38 38 38
36825 - 58 58 58 26 26 26 38 38 38 2 2 6
36826 - 2 2 6 2 2 6 2 2 6 46 46 46
36827 - 78 78 78 30 30 30 10 10 10 0 0 0
36828 - 0 0 0 0 0 0 0 0 0 0 0 0
36829 - 0 0 0 0 0 0 0 0 0 0 0 0
36830 - 0 0 0 0 0 0 0 0 0 0 0 0
36831 - 0 0 0 0 0 0 0 0 0 0 0 0
36832 - 0 0 0 0 0 0 0 0 0 0 0 0
36833 - 0 0 0 0 0 0 0 0 0 0 0 0
36834 - 0 0 0 0 0 0 0 0 0 0 0 0
36835 - 0 0 0 0 0 0 10 10 10 30 30 30
36836 - 74 74 74 58 58 58 2 2 6 42 42 42
36837 - 2 2 6 22 22 22 231 231 231 253 253 253
36838 -253 253 253 253 253 253 253 253 253 253 253 253
36839 -253 253 253 253 253 253 253 253 253 250 250 250
36840 -253 253 253 253 253 253 253 253 253 253 253 253
36841 -253 253 253 253 253 253 253 253 253 253 253 253
36842 -253 253 253 253 253 253 253 253 253 253 253 253
36843 -253 253 253 253 253 253 253 253 253 253 253 253
36844 -253 253 253 246 246 246 46 46 46 38 38 38
36845 - 42 42 42 14 14 14 38 38 38 14 14 14
36846 - 2 2 6 2 2 6 2 2 6 6 6 6
36847 - 86 86 86 46 46 46 14 14 14 0 0 0
36848 - 0 0 0 0 0 0 0 0 0 0 0 0
36849 - 0 0 0 0 0 0 0 0 0 0 0 0
36850 - 0 0 0 0 0 0 0 0 0 0 0 0
36851 - 0 0 0 0 0 0 0 0 0 0 0 0
36852 - 0 0 0 0 0 0 0 0 0 0 0 0
36853 - 0 0 0 0 0 0 0 0 0 0 0 0
36854 - 0 0 0 0 0 0 0 0 0 0 0 0
36855 - 0 0 0 6 6 6 14 14 14 42 42 42
36856 - 90 90 90 18 18 18 18 18 18 26 26 26
36857 - 2 2 6 116 116 116 253 253 253 253 253 253
36858 -253 253 253 253 253 253 253 253 253 253 253 253
36859 -253 253 253 253 253 253 250 250 250 238 238 238
36860 -253 253 253 253 253 253 253 253 253 253 253 253
36861 -253 253 253 253 253 253 253 253 253 253 253 253
36862 -253 253 253 253 253 253 253 253 253 253 253 253
36863 -253 253 253 253 253 253 253 253 253 253 253 253
36864 -253 253 253 253 253 253 94 94 94 6 6 6
36865 - 2 2 6 2 2 6 10 10 10 34 34 34
36866 - 2 2 6 2 2 6 2 2 6 2 2 6
36867 - 74 74 74 58 58 58 22 22 22 6 6 6
36868 - 0 0 0 0 0 0 0 0 0 0 0 0
36869 - 0 0 0 0 0 0 0 0 0 0 0 0
36870 - 0 0 0 0 0 0 0 0 0 0 0 0
36871 - 0 0 0 0 0 0 0 0 0 0 0 0
36872 - 0 0 0 0 0 0 0 0 0 0 0 0
36873 - 0 0 0 0 0 0 0 0 0 0 0 0
36874 - 0 0 0 0 0 0 0 0 0 0 0 0
36875 - 0 0 0 10 10 10 26 26 26 66 66 66
36876 - 82 82 82 2 2 6 38 38 38 6 6 6
36877 - 14 14 14 210 210 210 253 253 253 253 253 253
36878 -253 253 253 253 253 253 253 253 253 253 253 253
36879 -253 253 253 253 253 253 246 246 246 242 242 242
36880 -253 253 253 253 253 253 253 253 253 253 253 253
36881 -253 253 253 253 253 253 253 253 253 253 253 253
36882 -253 253 253 253 253 253 253 253 253 253 253 253
36883 -253 253 253 253 253 253 253 253 253 253 253 253
36884 -253 253 253 253 253 253 144 144 144 2 2 6
36885 - 2 2 6 2 2 6 2 2 6 46 46 46
36886 - 2 2 6 2 2 6 2 2 6 2 2 6
36887 - 42 42 42 74 74 74 30 30 30 10 10 10
36888 - 0 0 0 0 0 0 0 0 0 0 0 0
36889 - 0 0 0 0 0 0 0 0 0 0 0 0
36890 - 0 0 0 0 0 0 0 0 0 0 0 0
36891 - 0 0 0 0 0 0 0 0 0 0 0 0
36892 - 0 0 0 0 0 0 0 0 0 0 0 0
36893 - 0 0 0 0 0 0 0 0 0 0 0 0
36894 - 0 0 0 0 0 0 0 0 0 0 0 0
36895 - 6 6 6 14 14 14 42 42 42 90 90 90
36896 - 26 26 26 6 6 6 42 42 42 2 2 6
36897 - 74 74 74 250 250 250 253 253 253 253 253 253
36898 -253 253 253 253 253 253 253 253 253 253 253 253
36899 -253 253 253 253 253 253 242 242 242 242 242 242
36900 -253 253 253 253 253 253 253 253 253 253 253 253
36901 -253 253 253 253 253 253 253 253 253 253 253 253
36902 -253 253 253 253 253 253 253 253 253 253 253 253
36903 -253 253 253 253 253 253 253 253 253 253 253 253
36904 -253 253 253 253 253 253 182 182 182 2 2 6
36905 - 2 2 6 2 2 6 2 2 6 46 46 46
36906 - 2 2 6 2 2 6 2 2 6 2 2 6
36907 - 10 10 10 86 86 86 38 38 38 10 10 10
36908 - 0 0 0 0 0 0 0 0 0 0 0 0
36909 - 0 0 0 0 0 0 0 0 0 0 0 0
36910 - 0 0 0 0 0 0 0 0 0 0 0 0
36911 - 0 0 0 0 0 0 0 0 0 0 0 0
36912 - 0 0 0 0 0 0 0 0 0 0 0 0
36913 - 0 0 0 0 0 0 0 0 0 0 0 0
36914 - 0 0 0 0 0 0 0 0 0 0 0 0
36915 - 10 10 10 26 26 26 66 66 66 82 82 82
36916 - 2 2 6 22 22 22 18 18 18 2 2 6
36917 -149 149 149 253 253 253 253 253 253 253 253 253
36918 -253 253 253 253 253 253 253 253 253 253 253 253
36919 -253 253 253 253 253 253 234 234 234 242 242 242
36920 -253 253 253 253 253 253 253 253 253 253 253 253
36921 -253 253 253 253 253 253 253 253 253 253 253 253
36922 -253 253 253 253 253 253 253 253 253 253 253 253
36923 -253 253 253 253 253 253 253 253 253 253 253 253
36924 -253 253 253 253 253 253 206 206 206 2 2 6
36925 - 2 2 6 2 2 6 2 2 6 38 38 38
36926 - 2 2 6 2 2 6 2 2 6 2 2 6
36927 - 6 6 6 86 86 86 46 46 46 14 14 14
36928 - 0 0 0 0 0 0 0 0 0 0 0 0
36929 - 0 0 0 0 0 0 0 0 0 0 0 0
36930 - 0 0 0 0 0 0 0 0 0 0 0 0
36931 - 0 0 0 0 0 0 0 0 0 0 0 0
36932 - 0 0 0 0 0 0 0 0 0 0 0 0
36933 - 0 0 0 0 0 0 0 0 0 0 0 0
36934 - 0 0 0 0 0 0 0 0 0 6 6 6
36935 - 18 18 18 46 46 46 86 86 86 18 18 18
36936 - 2 2 6 34 34 34 10 10 10 6 6 6
36937 -210 210 210 253 253 253 253 253 253 253 253 253
36938 -253 253 253 253 253 253 253 253 253 253 253 253
36939 -253 253 253 253 253 253 234 234 234 242 242 242
36940 -253 253 253 253 253 253 253 253 253 253 253 253
36941 -253 253 253 253 253 253 253 253 253 253 253 253
36942 -253 253 253 253 253 253 253 253 253 253 253 253
36943 -253 253 253 253 253 253 253 253 253 253 253 253
36944 -253 253 253 253 253 253 221 221 221 6 6 6
36945 - 2 2 6 2 2 6 6 6 6 30 30 30
36946 - 2 2 6 2 2 6 2 2 6 2 2 6
36947 - 2 2 6 82 82 82 54 54 54 18 18 18
36948 - 6 6 6 0 0 0 0 0 0 0 0 0
36949 - 0 0 0 0 0 0 0 0 0 0 0 0
36950 - 0 0 0 0 0 0 0 0 0 0 0 0
36951 - 0 0 0 0 0 0 0 0 0 0 0 0
36952 - 0 0 0 0 0 0 0 0 0 0 0 0
36953 - 0 0 0 0 0 0 0 0 0 0 0 0
36954 - 0 0 0 0 0 0 0 0 0 10 10 10
36955 - 26 26 26 66 66 66 62 62 62 2 2 6
36956 - 2 2 6 38 38 38 10 10 10 26 26 26
36957 -238 238 238 253 253 253 253 253 253 253 253 253
36958 -253 253 253 253 253 253 253 253 253 253 253 253
36959 -253 253 253 253 253 253 231 231 231 238 238 238
36960 -253 253 253 253 253 253 253 253 253 253 253 253
36961 -253 253 253 253 253 253 253 253 253 253 253 253
36962 -253 253 253 253 253 253 253 253 253 253 253 253
36963 -253 253 253 253 253 253 253 253 253 253 253 253
36964 -253 253 253 253 253 253 231 231 231 6 6 6
36965 - 2 2 6 2 2 6 10 10 10 30 30 30
36966 - 2 2 6 2 2 6 2 2 6 2 2 6
36967 - 2 2 6 66 66 66 58 58 58 22 22 22
36968 - 6 6 6 0 0 0 0 0 0 0 0 0
36969 - 0 0 0 0 0 0 0 0 0 0 0 0
36970 - 0 0 0 0 0 0 0 0 0 0 0 0
36971 - 0 0 0 0 0 0 0 0 0 0 0 0
36972 - 0 0 0 0 0 0 0 0 0 0 0 0
36973 - 0 0 0 0 0 0 0 0 0 0 0 0
36974 - 0 0 0 0 0 0 0 0 0 10 10 10
36975 - 38 38 38 78 78 78 6 6 6 2 2 6
36976 - 2 2 6 46 46 46 14 14 14 42 42 42
36977 -246 246 246 253 253 253 253 253 253 253 253 253
36978 -253 253 253 253 253 253 253 253 253 253 253 253
36979 -253 253 253 253 253 253 231 231 231 242 242 242
36980 -253 253 253 253 253 253 253 253 253 253 253 253
36981 -253 253 253 253 253 253 253 253 253 253 253 253
36982 -253 253 253 253 253 253 253 253 253 253 253 253
36983 -253 253 253 253 253 253 253 253 253 253 253 253
36984 -253 253 253 253 253 253 234 234 234 10 10 10
36985 - 2 2 6 2 2 6 22 22 22 14 14 14
36986 - 2 2 6 2 2 6 2 2 6 2 2 6
36987 - 2 2 6 66 66 66 62 62 62 22 22 22
36988 - 6 6 6 0 0 0 0 0 0 0 0 0
36989 - 0 0 0 0 0 0 0 0 0 0 0 0
36990 - 0 0 0 0 0 0 0 0 0 0 0 0
36991 - 0 0 0 0 0 0 0 0 0 0 0 0
36992 - 0 0 0 0 0 0 0 0 0 0 0 0
36993 - 0 0 0 0 0 0 0 0 0 0 0 0
36994 - 0 0 0 0 0 0 6 6 6 18 18 18
36995 - 50 50 50 74 74 74 2 2 6 2 2 6
36996 - 14 14 14 70 70 70 34 34 34 62 62 62
36997 -250 250 250 253 253 253 253 253 253 253 253 253
36998 -253 253 253 253 253 253 253 253 253 253 253 253
36999 -253 253 253 253 253 253 231 231 231 246 246 246
37000 -253 253 253 253 253 253 253 253 253 253 253 253
37001 -253 253 253 253 253 253 253 253 253 253 253 253
37002 -253 253 253 253 253 253 253 253 253 253 253 253
37003 -253 253 253 253 253 253 253 253 253 253 253 253
37004 -253 253 253 253 253 253 234 234 234 14 14 14
37005 - 2 2 6 2 2 6 30 30 30 2 2 6
37006 - 2 2 6 2 2 6 2 2 6 2 2 6
37007 - 2 2 6 66 66 66 62 62 62 22 22 22
37008 - 6 6 6 0 0 0 0 0 0 0 0 0
37009 - 0 0 0 0 0 0 0 0 0 0 0 0
37010 - 0 0 0 0 0 0 0 0 0 0 0 0
37011 - 0 0 0 0 0 0 0 0 0 0 0 0
37012 - 0 0 0 0 0 0 0 0 0 0 0 0
37013 - 0 0 0 0 0 0 0 0 0 0 0 0
37014 - 0 0 0 0 0 0 6 6 6 18 18 18
37015 - 54 54 54 62 62 62 2 2 6 2 2 6
37016 - 2 2 6 30 30 30 46 46 46 70 70 70
37017 -250 250 250 253 253 253 253 253 253 253 253 253
37018 -253 253 253 253 253 253 253 253 253 253 253 253
37019 -253 253 253 253 253 253 231 231 231 246 246 246
37020 -253 253 253 253 253 253 253 253 253 253 253 253
37021 -253 253 253 253 253 253 253 253 253 253 253 253
37022 -253 253 253 253 253 253 253 253 253 253 253 253
37023 -253 253 253 253 253 253 253 253 253 253 253 253
37024 -253 253 253 253 253 253 226 226 226 10 10 10
37025 - 2 2 6 6 6 6 30 30 30 2 2 6
37026 - 2 2 6 2 2 6 2 2 6 2 2 6
37027 - 2 2 6 66 66 66 58 58 58 22 22 22
37028 - 6 6 6 0 0 0 0 0 0 0 0 0
37029 - 0 0 0 0 0 0 0 0 0 0 0 0
37030 - 0 0 0 0 0 0 0 0 0 0 0 0
37031 - 0 0 0 0 0 0 0 0 0 0 0 0
37032 - 0 0 0 0 0 0 0 0 0 0 0 0
37033 - 0 0 0 0 0 0 0 0 0 0 0 0
37034 - 0 0 0 0 0 0 6 6 6 22 22 22
37035 - 58 58 58 62 62 62 2 2 6 2 2 6
37036 - 2 2 6 2 2 6 30 30 30 78 78 78
37037 -250 250 250 253 253 253 253 253 253 253 253 253
37038 -253 253 253 253 253 253 253 253 253 253 253 253
37039 -253 253 253 253 253 253 231 231 231 246 246 246
37040 -253 253 253 253 253 253 253 253 253 253 253 253
37041 -253 253 253 253 253 253 253 253 253 253 253 253
37042 -253 253 253 253 253 253 253 253 253 253 253 253
37043 -253 253 253 253 253 253 253 253 253 253 253 253
37044 -253 253 253 253 253 253 206 206 206 2 2 6
37045 - 22 22 22 34 34 34 18 14 6 22 22 22
37046 - 26 26 26 18 18 18 6 6 6 2 2 6
37047 - 2 2 6 82 82 82 54 54 54 18 18 18
37048 - 6 6 6 0 0 0 0 0 0 0 0 0
37049 - 0 0 0 0 0 0 0 0 0 0 0 0
37050 - 0 0 0 0 0 0 0 0 0 0 0 0
37051 - 0 0 0 0 0 0 0 0 0 0 0 0
37052 - 0 0 0 0 0 0 0 0 0 0 0 0
37053 - 0 0 0 0 0 0 0 0 0 0 0 0
37054 - 0 0 0 0 0 0 6 6 6 26 26 26
37055 - 62 62 62 106 106 106 74 54 14 185 133 11
37056 -210 162 10 121 92 8 6 6 6 62 62 62
37057 -238 238 238 253 253 253 253 253 253 253 253 253
37058 -253 253 253 253 253 253 253 253 253 253 253 253
37059 -253 253 253 253 253 253 231 231 231 246 246 246
37060 -253 253 253 253 253 253 253 253 253 253 253 253
37061 -253 253 253 253 253 253 253 253 253 253 253 253
37062 -253 253 253 253 253 253 253 253 253 253 253 253
37063 -253 253 253 253 253 253 253 253 253 253 253 253
37064 -253 253 253 253 253 253 158 158 158 18 18 18
37065 - 14 14 14 2 2 6 2 2 6 2 2 6
37066 - 6 6 6 18 18 18 66 66 66 38 38 38
37067 - 6 6 6 94 94 94 50 50 50 18 18 18
37068 - 6 6 6 0 0 0 0 0 0 0 0 0
37069 - 0 0 0 0 0 0 0 0 0 0 0 0
37070 - 0 0 0 0 0 0 0 0 0 0 0 0
37071 - 0 0 0 0 0 0 0 0 0 0 0 0
37072 - 0 0 0 0 0 0 0 0 0 0 0 0
37073 - 0 0 0 0 0 0 0 0 0 6 6 6
37074 - 10 10 10 10 10 10 18 18 18 38 38 38
37075 - 78 78 78 142 134 106 216 158 10 242 186 14
37076 -246 190 14 246 190 14 156 118 10 10 10 10
37077 - 90 90 90 238 238 238 253 253 253 253 253 253
37078 -253 253 253 253 253 253 253 253 253 253 253 253
37079 -253 253 253 253 253 253 231 231 231 250 250 250
37080 -253 253 253 253 253 253 253 253 253 253 253 253
37081 -253 253 253 253 253 253 253 253 253 253 253 253
37082 -253 253 253 253 253 253 253 253 253 253 253 253
37083 -253 253 253 253 253 253 253 253 253 246 230 190
37084 -238 204 91 238 204 91 181 142 44 37 26 9
37085 - 2 2 6 2 2 6 2 2 6 2 2 6
37086 - 2 2 6 2 2 6 38 38 38 46 46 46
37087 - 26 26 26 106 106 106 54 54 54 18 18 18
37088 - 6 6 6 0 0 0 0 0 0 0 0 0
37089 - 0 0 0 0 0 0 0 0 0 0 0 0
37090 - 0 0 0 0 0 0 0 0 0 0 0 0
37091 - 0 0 0 0 0 0 0 0 0 0 0 0
37092 - 0 0 0 0 0 0 0 0 0 0 0 0
37093 - 0 0 0 6 6 6 14 14 14 22 22 22
37094 - 30 30 30 38 38 38 50 50 50 70 70 70
37095 -106 106 106 190 142 34 226 170 11 242 186 14
37096 -246 190 14 246 190 14 246 190 14 154 114 10
37097 - 6 6 6 74 74 74 226 226 226 253 253 253
37098 -253 253 253 253 253 253 253 253 253 253 253 253
37099 -253 253 253 253 253 253 231 231 231 250 250 250
37100 -253 253 253 253 253 253 253 253 253 253 253 253
37101 -253 253 253 253 253 253 253 253 253 253 253 253
37102 -253 253 253 253 253 253 253 253 253 253 253 253
37103 -253 253 253 253 253 253 253 253 253 228 184 62
37104 -241 196 14 241 208 19 232 195 16 38 30 10
37105 - 2 2 6 2 2 6 2 2 6 2 2 6
37106 - 2 2 6 6 6 6 30 30 30 26 26 26
37107 -203 166 17 154 142 90 66 66 66 26 26 26
37108 - 6 6 6 0 0 0 0 0 0 0 0 0
37109 - 0 0 0 0 0 0 0 0 0 0 0 0
37110 - 0 0 0 0 0 0 0 0 0 0 0 0
37111 - 0 0 0 0 0 0 0 0 0 0 0 0
37112 - 0 0 0 0 0 0 0 0 0 0 0 0
37113 - 6 6 6 18 18 18 38 38 38 58 58 58
37114 - 78 78 78 86 86 86 101 101 101 123 123 123
37115 -175 146 61 210 150 10 234 174 13 246 186 14
37116 -246 190 14 246 190 14 246 190 14 238 190 10
37117 -102 78 10 2 2 6 46 46 46 198 198 198
37118 -253 253 253 253 253 253 253 253 253 253 253 253
37119 -253 253 253 253 253 253 234 234 234 242 242 242
37120 -253 253 253 253 253 253 253 253 253 253 253 253
37121 -253 253 253 253 253 253 253 253 253 253 253 253
37122 -253 253 253 253 253 253 253 253 253 253 253 253
37123 -253 253 253 253 253 253 253 253 253 224 178 62
37124 -242 186 14 241 196 14 210 166 10 22 18 6
37125 - 2 2 6 2 2 6 2 2 6 2 2 6
37126 - 2 2 6 2 2 6 6 6 6 121 92 8
37127 -238 202 15 232 195 16 82 82 82 34 34 34
37128 - 10 10 10 0 0 0 0 0 0 0 0 0
37129 - 0 0 0 0 0 0 0 0 0 0 0 0
37130 - 0 0 0 0 0 0 0 0 0 0 0 0
37131 - 0 0 0 0 0 0 0 0 0 0 0 0
37132 - 0 0 0 0 0 0 0 0 0 0 0 0
37133 - 14 14 14 38 38 38 70 70 70 154 122 46
37134 -190 142 34 200 144 11 197 138 11 197 138 11
37135 -213 154 11 226 170 11 242 186 14 246 190 14
37136 -246 190 14 246 190 14 246 190 14 246 190 14
37137 -225 175 15 46 32 6 2 2 6 22 22 22
37138 -158 158 158 250 250 250 253 253 253 253 253 253
37139 -253 253 253 253 253 253 253 253 253 253 253 253
37140 -253 253 253 253 253 253 253 253 253 253 253 253
37141 -253 253 253 253 253 253 253 253 253 253 253 253
37142 -253 253 253 253 253 253 253 253 253 253 253 253
37143 -253 253 253 250 250 250 242 242 242 224 178 62
37144 -239 182 13 236 186 11 213 154 11 46 32 6
37145 - 2 2 6 2 2 6 2 2 6 2 2 6
37146 - 2 2 6 2 2 6 61 42 6 225 175 15
37147 -238 190 10 236 186 11 112 100 78 42 42 42
37148 - 14 14 14 0 0 0 0 0 0 0 0 0
37149 - 0 0 0 0 0 0 0 0 0 0 0 0
37150 - 0 0 0 0 0 0 0 0 0 0 0 0
37151 - 0 0 0 0 0 0 0 0 0 0 0 0
37152 - 0 0 0 0 0 0 0 0 0 6 6 6
37153 - 22 22 22 54 54 54 154 122 46 213 154 11
37154 -226 170 11 230 174 11 226 170 11 226 170 11
37155 -236 178 12 242 186 14 246 190 14 246 190 14
37156 -246 190 14 246 190 14 246 190 14 246 190 14
37157 -241 196 14 184 144 12 10 10 10 2 2 6
37158 - 6 6 6 116 116 116 242 242 242 253 253 253
37159 -253 253 253 253 253 253 253 253 253 253 253 253
37160 -253 253 253 253 253 253 253 253 253 253 253 253
37161 -253 253 253 253 253 253 253 253 253 253 253 253
37162 -253 253 253 253 253 253 253 253 253 253 253 253
37163 -253 253 253 231 231 231 198 198 198 214 170 54
37164 -236 178 12 236 178 12 210 150 10 137 92 6
37165 - 18 14 6 2 2 6 2 2 6 2 2 6
37166 - 6 6 6 70 47 6 200 144 11 236 178 12
37167 -239 182 13 239 182 13 124 112 88 58 58 58
37168 - 22 22 22 6 6 6 0 0 0 0 0 0
37169 - 0 0 0 0 0 0 0 0 0 0 0 0
37170 - 0 0 0 0 0 0 0 0 0 0 0 0
37171 - 0 0 0 0 0 0 0 0 0 0 0 0
37172 - 0 0 0 0 0 0 0 0 0 10 10 10
37173 - 30 30 30 70 70 70 180 133 36 226 170 11
37174 -239 182 13 242 186 14 242 186 14 246 186 14
37175 -246 190 14 246 190 14 246 190 14 246 190 14
37176 -246 190 14 246 190 14 246 190 14 246 190 14
37177 -246 190 14 232 195 16 98 70 6 2 2 6
37178 - 2 2 6 2 2 6 66 66 66 221 221 221
37179 -253 253 253 253 253 253 253 253 253 253 253 253
37180 -253 253 253 253 253 253 253 253 253 253 253 253
37181 -253 253 253 253 253 253 253 253 253 253 253 253
37182 -253 253 253 253 253 253 253 253 253 253 253 253
37183 -253 253 253 206 206 206 198 198 198 214 166 58
37184 -230 174 11 230 174 11 216 158 10 192 133 9
37185 -163 110 8 116 81 8 102 78 10 116 81 8
37186 -167 114 7 197 138 11 226 170 11 239 182 13
37187 -242 186 14 242 186 14 162 146 94 78 78 78
37188 - 34 34 34 14 14 14 6 6 6 0 0 0
37189 - 0 0 0 0 0 0 0 0 0 0 0 0
37190 - 0 0 0 0 0 0 0 0 0 0 0 0
37191 - 0 0 0 0 0 0 0 0 0 0 0 0
37192 - 0 0 0 0 0 0 0 0 0 6 6 6
37193 - 30 30 30 78 78 78 190 142 34 226 170 11
37194 -239 182 13 246 190 14 246 190 14 246 190 14
37195 -246 190 14 246 190 14 246 190 14 246 190 14
37196 -246 190 14 246 190 14 246 190 14 246 190 14
37197 -246 190 14 241 196 14 203 166 17 22 18 6
37198 - 2 2 6 2 2 6 2 2 6 38 38 38
37199 -218 218 218 253 253 253 253 253 253 253 253 253
37200 -253 253 253 253 253 253 253 253 253 253 253 253
37201 -253 253 253 253 253 253 253 253 253 253 253 253
37202 -253 253 253 253 253 253 253 253 253 253 253 253
37203 -250 250 250 206 206 206 198 198 198 202 162 69
37204 -226 170 11 236 178 12 224 166 10 210 150 10
37205 -200 144 11 197 138 11 192 133 9 197 138 11
37206 -210 150 10 226 170 11 242 186 14 246 190 14
37207 -246 190 14 246 186 14 225 175 15 124 112 88
37208 - 62 62 62 30 30 30 14 14 14 6 6 6
37209 - 0 0 0 0 0 0 0 0 0 0 0 0
37210 - 0 0 0 0 0 0 0 0 0 0 0 0
37211 - 0 0 0 0 0 0 0 0 0 0 0 0
37212 - 0 0 0 0 0 0 0 0 0 10 10 10
37213 - 30 30 30 78 78 78 174 135 50 224 166 10
37214 -239 182 13 246 190 14 246 190 14 246 190 14
37215 -246 190 14 246 190 14 246 190 14 246 190 14
37216 -246 190 14 246 190 14 246 190 14 246 190 14
37217 -246 190 14 246 190 14 241 196 14 139 102 15
37218 - 2 2 6 2 2 6 2 2 6 2 2 6
37219 - 78 78 78 250 250 250 253 253 253 253 253 253
37220 -253 253 253 253 253 253 253 253 253 253 253 253
37221 -253 253 253 253 253 253 253 253 253 253 253 253
37222 -253 253 253 253 253 253 253 253 253 253 253 253
37223 -250 250 250 214 214 214 198 198 198 190 150 46
37224 -219 162 10 236 178 12 234 174 13 224 166 10
37225 -216 158 10 213 154 11 213 154 11 216 158 10
37226 -226 170 11 239 182 13 246 190 14 246 190 14
37227 -246 190 14 246 190 14 242 186 14 206 162 42
37228 -101 101 101 58 58 58 30 30 30 14 14 14
37229 - 6 6 6 0 0 0 0 0 0 0 0 0
37230 - 0 0 0 0 0 0 0 0 0 0 0 0
37231 - 0 0 0 0 0 0 0 0 0 0 0 0
37232 - 0 0 0 0 0 0 0 0 0 10 10 10
37233 - 30 30 30 74 74 74 174 135 50 216 158 10
37234 -236 178 12 246 190 14 246 190 14 246 190 14
37235 -246 190 14 246 190 14 246 190 14 246 190 14
37236 -246 190 14 246 190 14 246 190 14 246 190 14
37237 -246 190 14 246 190 14 241 196 14 226 184 13
37238 - 61 42 6 2 2 6 2 2 6 2 2 6
37239 - 22 22 22 238 238 238 253 253 253 253 253 253
37240 -253 253 253 253 253 253 253 253 253 253 253 253
37241 -253 253 253 253 253 253 253 253 253 253 253 253
37242 -253 253 253 253 253 253 253 253 253 253 253 253
37243 -253 253 253 226 226 226 187 187 187 180 133 36
37244 -216 158 10 236 178 12 239 182 13 236 178 12
37245 -230 174 11 226 170 11 226 170 11 230 174 11
37246 -236 178 12 242 186 14 246 190 14 246 190 14
37247 -246 190 14 246 190 14 246 186 14 239 182 13
37248 -206 162 42 106 106 106 66 66 66 34 34 34
37249 - 14 14 14 6 6 6 0 0 0 0 0 0
37250 - 0 0 0 0 0 0 0 0 0 0 0 0
37251 - 0 0 0 0 0 0 0 0 0 0 0 0
37252 - 0 0 0 0 0 0 0 0 0 6 6 6
37253 - 26 26 26 70 70 70 163 133 67 213 154 11
37254 -236 178 12 246 190 14 246 190 14 246 190 14
37255 -246 190 14 246 190 14 246 190 14 246 190 14
37256 -246 190 14 246 190 14 246 190 14 246 190 14
37257 -246 190 14 246 190 14 246 190 14 241 196 14
37258 -190 146 13 18 14 6 2 2 6 2 2 6
37259 - 46 46 46 246 246 246 253 253 253 253 253 253
37260 -253 253 253 253 253 253 253 253 253 253 253 253
37261 -253 253 253 253 253 253 253 253 253 253 253 253
37262 -253 253 253 253 253 253 253 253 253 253 253 253
37263 -253 253 253 221 221 221 86 86 86 156 107 11
37264 -216 158 10 236 178 12 242 186 14 246 186 14
37265 -242 186 14 239 182 13 239 182 13 242 186 14
37266 -242 186 14 246 186 14 246 190 14 246 190 14
37267 -246 190 14 246 190 14 246 190 14 246 190 14
37268 -242 186 14 225 175 15 142 122 72 66 66 66
37269 - 30 30 30 10 10 10 0 0 0 0 0 0
37270 - 0 0 0 0 0 0 0 0 0 0 0 0
37271 - 0 0 0 0 0 0 0 0 0 0 0 0
37272 - 0 0 0 0 0 0 0 0 0 6 6 6
37273 - 26 26 26 70 70 70 163 133 67 210 150 10
37274 -236 178 12 246 190 14 246 190 14 246 190 14
37275 -246 190 14 246 190 14 246 190 14 246 190 14
37276 -246 190 14 246 190 14 246 190 14 246 190 14
37277 -246 190 14 246 190 14 246 190 14 246 190 14
37278 -232 195 16 121 92 8 34 34 34 106 106 106
37279 -221 221 221 253 253 253 253 253 253 253 253 253
37280 -253 253 253 253 253 253 253 253 253 253 253 253
37281 -253 253 253 253 253 253 253 253 253 253 253 253
37282 -253 253 253 253 253 253 253 253 253 253 253 253
37283 -242 242 242 82 82 82 18 14 6 163 110 8
37284 -216 158 10 236 178 12 242 186 14 246 190 14
37285 -246 190 14 246 190 14 246 190 14 246 190 14
37286 -246 190 14 246 190 14 246 190 14 246 190 14
37287 -246 190 14 246 190 14 246 190 14 246 190 14
37288 -246 190 14 246 190 14 242 186 14 163 133 67
37289 - 46 46 46 18 18 18 6 6 6 0 0 0
37290 - 0 0 0 0 0 0 0 0 0 0 0 0
37291 - 0 0 0 0 0 0 0 0 0 0 0 0
37292 - 0 0 0 0 0 0 0 0 0 10 10 10
37293 - 30 30 30 78 78 78 163 133 67 210 150 10
37294 -236 178 12 246 186 14 246 190 14 246 190 14
37295 -246 190 14 246 190 14 246 190 14 246 190 14
37296 -246 190 14 246 190 14 246 190 14 246 190 14
37297 -246 190 14 246 190 14 246 190 14 246 190 14
37298 -241 196 14 215 174 15 190 178 144 253 253 253
37299 -253 253 253 253 253 253 253 253 253 253 253 253
37300 -253 253 253 253 253 253 253 253 253 253 253 253
37301 -253 253 253 253 253 253 253 253 253 253 253 253
37302 -253 253 253 253 253 253 253 253 253 218 218 218
37303 - 58 58 58 2 2 6 22 18 6 167 114 7
37304 -216 158 10 236 178 12 246 186 14 246 190 14
37305 -246 190 14 246 190 14 246 190 14 246 190 14
37306 -246 190 14 246 190 14 246 190 14 246 190 14
37307 -246 190 14 246 190 14 246 190 14 246 190 14
37308 -246 190 14 246 186 14 242 186 14 190 150 46
37309 - 54 54 54 22 22 22 6 6 6 0 0 0
37310 - 0 0 0 0 0 0 0 0 0 0 0 0
37311 - 0 0 0 0 0 0 0 0 0 0 0 0
37312 - 0 0 0 0 0 0 0 0 0 14 14 14
37313 - 38 38 38 86 86 86 180 133 36 213 154 11
37314 -236 178 12 246 186 14 246 190 14 246 190 14
37315 -246 190 14 246 190 14 246 190 14 246 190 14
37316 -246 190 14 246 190 14 246 190 14 246 190 14
37317 -246 190 14 246 190 14 246 190 14 246 190 14
37318 -246 190 14 232 195 16 190 146 13 214 214 214
37319 -253 253 253 253 253 253 253 253 253 253 253 253
37320 -253 253 253 253 253 253 253 253 253 253 253 253
37321 -253 253 253 253 253 253 253 253 253 253 253 253
37322 -253 253 253 250 250 250 170 170 170 26 26 26
37323 - 2 2 6 2 2 6 37 26 9 163 110 8
37324 -219 162 10 239 182 13 246 186 14 246 190 14
37325 -246 190 14 246 190 14 246 190 14 246 190 14
37326 -246 190 14 246 190 14 246 190 14 246 190 14
37327 -246 190 14 246 190 14 246 190 14 246 190 14
37328 -246 186 14 236 178 12 224 166 10 142 122 72
37329 - 46 46 46 18 18 18 6 6 6 0 0 0
37330 - 0 0 0 0 0 0 0 0 0 0 0 0
37331 - 0 0 0 0 0 0 0 0 0 0 0 0
37332 - 0 0 0 0 0 0 6 6 6 18 18 18
37333 - 50 50 50 109 106 95 192 133 9 224 166 10
37334 -242 186 14 246 190 14 246 190 14 246 190 14
37335 -246 190 14 246 190 14 246 190 14 246 190 14
37336 -246 190 14 246 190 14 246 190 14 246 190 14
37337 -246 190 14 246 190 14 246 190 14 246 190 14
37338 -242 186 14 226 184 13 210 162 10 142 110 46
37339 -226 226 226 253 253 253 253 253 253 253 253 253
37340 -253 253 253 253 253 253 253 253 253 253 253 253
37341 -253 253 253 253 253 253 253 253 253 253 253 253
37342 -198 198 198 66 66 66 2 2 6 2 2 6
37343 - 2 2 6 2 2 6 50 34 6 156 107 11
37344 -219 162 10 239 182 13 246 186 14 246 190 14
37345 -246 190 14 246 190 14 246 190 14 246 190 14
37346 -246 190 14 246 190 14 246 190 14 246 190 14
37347 -246 190 14 246 190 14 246 190 14 242 186 14
37348 -234 174 13 213 154 11 154 122 46 66 66 66
37349 - 30 30 30 10 10 10 0 0 0 0 0 0
37350 - 0 0 0 0 0 0 0 0 0 0 0 0
37351 - 0 0 0 0 0 0 0 0 0 0 0 0
37352 - 0 0 0 0 0 0 6 6 6 22 22 22
37353 - 58 58 58 154 121 60 206 145 10 234 174 13
37354 -242 186 14 246 186 14 246 190 14 246 190 14
37355 -246 190 14 246 190 14 246 190 14 246 190 14
37356 -246 190 14 246 190 14 246 190 14 246 190 14
37357 -246 190 14 246 190 14 246 190 14 246 190 14
37358 -246 186 14 236 178 12 210 162 10 163 110 8
37359 - 61 42 6 138 138 138 218 218 218 250 250 250
37360 -253 253 253 253 253 253 253 253 253 250 250 250
37361 -242 242 242 210 210 210 144 144 144 66 66 66
37362 - 6 6 6 2 2 6 2 2 6 2 2 6
37363 - 2 2 6 2 2 6 61 42 6 163 110 8
37364 -216 158 10 236 178 12 246 190 14 246 190 14
37365 -246 190 14 246 190 14 246 190 14 246 190 14
37366 -246 190 14 246 190 14 246 190 14 246 190 14
37367 -246 190 14 239 182 13 230 174 11 216 158 10
37368 -190 142 34 124 112 88 70 70 70 38 38 38
37369 - 18 18 18 6 6 6 0 0 0 0 0 0
37370 - 0 0 0 0 0 0 0 0 0 0 0 0
37371 - 0 0 0 0 0 0 0 0 0 0 0 0
37372 - 0 0 0 0 0 0 6 6 6 22 22 22
37373 - 62 62 62 168 124 44 206 145 10 224 166 10
37374 -236 178 12 239 182 13 242 186 14 242 186 14
37375 -246 186 14 246 190 14 246 190 14 246 190 14
37376 -246 190 14 246 190 14 246 190 14 246 190 14
37377 -246 190 14 246 190 14 246 190 14 246 190 14
37378 -246 190 14 236 178 12 216 158 10 175 118 6
37379 - 80 54 7 2 2 6 6 6 6 30 30 30
37380 - 54 54 54 62 62 62 50 50 50 38 38 38
37381 - 14 14 14 2 2 6 2 2 6 2 2 6
37382 - 2 2 6 2 2 6 2 2 6 2 2 6
37383 - 2 2 6 6 6 6 80 54 7 167 114 7
37384 -213 154 11 236 178 12 246 190 14 246 190 14
37385 -246 190 14 246 190 14 246 190 14 246 190 14
37386 -246 190 14 242 186 14 239 182 13 239 182 13
37387 -230 174 11 210 150 10 174 135 50 124 112 88
37388 - 82 82 82 54 54 54 34 34 34 18 18 18
37389 - 6 6 6 0 0 0 0 0 0 0 0 0
37390 - 0 0 0 0 0 0 0 0 0 0 0 0
37391 - 0 0 0 0 0 0 0 0 0 0 0 0
37392 - 0 0 0 0 0 0 6 6 6 18 18 18
37393 - 50 50 50 158 118 36 192 133 9 200 144 11
37394 -216 158 10 219 162 10 224 166 10 226 170 11
37395 -230 174 11 236 178 12 239 182 13 239 182 13
37396 -242 186 14 246 186 14 246 190 14 246 190 14
37397 -246 190 14 246 190 14 246 190 14 246 190 14
37398 -246 186 14 230 174 11 210 150 10 163 110 8
37399 -104 69 6 10 10 10 2 2 6 2 2 6
37400 - 2 2 6 2 2 6 2 2 6 2 2 6
37401 - 2 2 6 2 2 6 2 2 6 2 2 6
37402 - 2 2 6 2 2 6 2 2 6 2 2 6
37403 - 2 2 6 6 6 6 91 60 6 167 114 7
37404 -206 145 10 230 174 11 242 186 14 246 190 14
37405 -246 190 14 246 190 14 246 186 14 242 186 14
37406 -239 182 13 230 174 11 224 166 10 213 154 11
37407 -180 133 36 124 112 88 86 86 86 58 58 58
37408 - 38 38 38 22 22 22 10 10 10 6 6 6
37409 - 0 0 0 0 0 0 0 0 0 0 0 0
37410 - 0 0 0 0 0 0 0 0 0 0 0 0
37411 - 0 0 0 0 0 0 0 0 0 0 0 0
37412 - 0 0 0 0 0 0 0 0 0 14 14 14
37413 - 34 34 34 70 70 70 138 110 50 158 118 36
37414 -167 114 7 180 123 7 192 133 9 197 138 11
37415 -200 144 11 206 145 10 213 154 11 219 162 10
37416 -224 166 10 230 174 11 239 182 13 242 186 14
37417 -246 186 14 246 186 14 246 186 14 246 186 14
37418 -239 182 13 216 158 10 185 133 11 152 99 6
37419 -104 69 6 18 14 6 2 2 6 2 2 6
37420 - 2 2 6 2 2 6 2 2 6 2 2 6
37421 - 2 2 6 2 2 6 2 2 6 2 2 6
37422 - 2 2 6 2 2 6 2 2 6 2 2 6
37423 - 2 2 6 6 6 6 80 54 7 152 99 6
37424 -192 133 9 219 162 10 236 178 12 239 182 13
37425 -246 186 14 242 186 14 239 182 13 236 178 12
37426 -224 166 10 206 145 10 192 133 9 154 121 60
37427 - 94 94 94 62 62 62 42 42 42 22 22 22
37428 - 14 14 14 6 6 6 0 0 0 0 0 0
37429 - 0 0 0 0 0 0 0 0 0 0 0 0
37430 - 0 0 0 0 0 0 0 0 0 0 0 0
37431 - 0 0 0 0 0 0 0 0 0 0 0 0
37432 - 0 0 0 0 0 0 0 0 0 6 6 6
37433 - 18 18 18 34 34 34 58 58 58 78 78 78
37434 -101 98 89 124 112 88 142 110 46 156 107 11
37435 -163 110 8 167 114 7 175 118 6 180 123 7
37436 -185 133 11 197 138 11 210 150 10 219 162 10
37437 -226 170 11 236 178 12 236 178 12 234 174 13
37438 -219 162 10 197 138 11 163 110 8 130 83 6
37439 - 91 60 6 10 10 10 2 2 6 2 2 6
37440 - 18 18 18 38 38 38 38 38 38 38 38 38
37441 - 38 38 38 38 38 38 38 38 38 38 38 38
37442 - 38 38 38 38 38 38 26 26 26 2 2 6
37443 - 2 2 6 6 6 6 70 47 6 137 92 6
37444 -175 118 6 200 144 11 219 162 10 230 174 11
37445 -234 174 13 230 174 11 219 162 10 210 150 10
37446 -192 133 9 163 110 8 124 112 88 82 82 82
37447 - 50 50 50 30 30 30 14 14 14 6 6 6
37448 - 0 0 0 0 0 0 0 0 0 0 0 0
37449 - 0 0 0 0 0 0 0 0 0 0 0 0
37450 - 0 0 0 0 0 0 0 0 0 0 0 0
37451 - 0 0 0 0 0 0 0 0 0 0 0 0
37452 - 0 0 0 0 0 0 0 0 0 0 0 0
37453 - 6 6 6 14 14 14 22 22 22 34 34 34
37454 - 42 42 42 58 58 58 74 74 74 86 86 86
37455 -101 98 89 122 102 70 130 98 46 121 87 25
37456 -137 92 6 152 99 6 163 110 8 180 123 7
37457 -185 133 11 197 138 11 206 145 10 200 144 11
37458 -180 123 7 156 107 11 130 83 6 104 69 6
37459 - 50 34 6 54 54 54 110 110 110 101 98 89
37460 - 86 86 86 82 82 82 78 78 78 78 78 78
37461 - 78 78 78 78 78 78 78 78 78 78 78 78
37462 - 78 78 78 82 82 82 86 86 86 94 94 94
37463 -106 106 106 101 101 101 86 66 34 124 80 6
37464 -156 107 11 180 123 7 192 133 9 200 144 11
37465 -206 145 10 200 144 11 192 133 9 175 118 6
37466 -139 102 15 109 106 95 70 70 70 42 42 42
37467 - 22 22 22 10 10 10 0 0 0 0 0 0
37468 - 0 0 0 0 0 0 0 0 0 0 0 0
37469 - 0 0 0 0 0 0 0 0 0 0 0 0
37470 - 0 0 0 0 0 0 0 0 0 0 0 0
37471 - 0 0 0 0 0 0 0 0 0 0 0 0
37472 - 0 0 0 0 0 0 0 0 0 0 0 0
37473 - 0 0 0 0 0 0 6 6 6 10 10 10
37474 - 14 14 14 22 22 22 30 30 30 38 38 38
37475 - 50 50 50 62 62 62 74 74 74 90 90 90
37476 -101 98 89 112 100 78 121 87 25 124 80 6
37477 -137 92 6 152 99 6 152 99 6 152 99 6
37478 -138 86 6 124 80 6 98 70 6 86 66 30
37479 -101 98 89 82 82 82 58 58 58 46 46 46
37480 - 38 38 38 34 34 34 34 34 34 34 34 34
37481 - 34 34 34 34 34 34 34 34 34 34 34 34
37482 - 34 34 34 34 34 34 38 38 38 42 42 42
37483 - 54 54 54 82 82 82 94 86 76 91 60 6
37484 -134 86 6 156 107 11 167 114 7 175 118 6
37485 -175 118 6 167 114 7 152 99 6 121 87 25
37486 -101 98 89 62 62 62 34 34 34 18 18 18
37487 - 6 6 6 0 0 0 0 0 0 0 0 0
37488 - 0 0 0 0 0 0 0 0 0 0 0 0
37489 - 0 0 0 0 0 0 0 0 0 0 0 0
37490 - 0 0 0 0 0 0 0 0 0 0 0 0
37491 - 0 0 0 0 0 0 0 0 0 0 0 0
37492 - 0 0 0 0 0 0 0 0 0 0 0 0
37493 - 0 0 0 0 0 0 0 0 0 0 0 0
37494 - 0 0 0 6 6 6 6 6 6 10 10 10
37495 - 18 18 18 22 22 22 30 30 30 42 42 42
37496 - 50 50 50 66 66 66 86 86 86 101 98 89
37497 -106 86 58 98 70 6 104 69 6 104 69 6
37498 -104 69 6 91 60 6 82 62 34 90 90 90
37499 - 62 62 62 38 38 38 22 22 22 14 14 14
37500 - 10 10 10 10 10 10 10 10 10 10 10 10
37501 - 10 10 10 10 10 10 6 6 6 10 10 10
37502 - 10 10 10 10 10 10 10 10 10 14 14 14
37503 - 22 22 22 42 42 42 70 70 70 89 81 66
37504 - 80 54 7 104 69 6 124 80 6 137 92 6
37505 -134 86 6 116 81 8 100 82 52 86 86 86
37506 - 58 58 58 30 30 30 14 14 14 6 6 6
37507 - 0 0 0 0 0 0 0 0 0 0 0 0
37508 - 0 0 0 0 0 0 0 0 0 0 0 0
37509 - 0 0 0 0 0 0 0 0 0 0 0 0
37510 - 0 0 0 0 0 0 0 0 0 0 0 0
37511 - 0 0 0 0 0 0 0 0 0 0 0 0
37512 - 0 0 0 0 0 0 0 0 0 0 0 0
37513 - 0 0 0 0 0 0 0 0 0 0 0 0
37514 - 0 0 0 0 0 0 0 0 0 0 0 0
37515 - 0 0 0 6 6 6 10 10 10 14 14 14
37516 - 18 18 18 26 26 26 38 38 38 54 54 54
37517 - 70 70 70 86 86 86 94 86 76 89 81 66
37518 - 89 81 66 86 86 86 74 74 74 50 50 50
37519 - 30 30 30 14 14 14 6 6 6 0 0 0
37520 - 0 0 0 0 0 0 0 0 0 0 0 0
37521 - 0 0 0 0 0 0 0 0 0 0 0 0
37522 - 0 0 0 0 0 0 0 0 0 0 0 0
37523 - 6 6 6 18 18 18 34 34 34 58 58 58
37524 - 82 82 82 89 81 66 89 81 66 89 81 66
37525 - 94 86 66 94 86 76 74 74 74 50 50 50
37526 - 26 26 26 14 14 14 6 6 6 0 0 0
37527 - 0 0 0 0 0 0 0 0 0 0 0 0
37528 - 0 0 0 0 0 0 0 0 0 0 0 0
37529 - 0 0 0 0 0 0 0 0 0 0 0 0
37530 - 0 0 0 0 0 0 0 0 0 0 0 0
37531 - 0 0 0 0 0 0 0 0 0 0 0 0
37532 - 0 0 0 0 0 0 0 0 0 0 0 0
37533 - 0 0 0 0 0 0 0 0 0 0 0 0
37534 - 0 0 0 0 0 0 0 0 0 0 0 0
37535 - 0 0 0 0 0 0 0 0 0 0 0 0
37536 - 6 6 6 6 6 6 14 14 14 18 18 18
37537 - 30 30 30 38 38 38 46 46 46 54 54 54
37538 - 50 50 50 42 42 42 30 30 30 18 18 18
37539 - 10 10 10 0 0 0 0 0 0 0 0 0
37540 - 0 0 0 0 0 0 0 0 0 0 0 0
37541 - 0 0 0 0 0 0 0 0 0 0 0 0
37542 - 0 0 0 0 0 0 0 0 0 0 0 0
37543 - 0 0 0 6 6 6 14 14 14 26 26 26
37544 - 38 38 38 50 50 50 58 58 58 58 58 58
37545 - 54 54 54 42 42 42 30 30 30 18 18 18
37546 - 10 10 10 0 0 0 0 0 0 0 0 0
37547 - 0 0 0 0 0 0 0 0 0 0 0 0
37548 - 0 0 0 0 0 0 0 0 0 0 0 0
37549 - 0 0 0 0 0 0 0 0 0 0 0 0
37550 - 0 0 0 0 0 0 0 0 0 0 0 0
37551 - 0 0 0 0 0 0 0 0 0 0 0 0
37552 - 0 0 0 0 0 0 0 0 0 0 0 0
37553 - 0 0 0 0 0 0 0 0 0 0 0 0
37554 - 0 0 0 0 0 0 0 0 0 0 0 0
37555 - 0 0 0 0 0 0 0 0 0 0 0 0
37556 - 0 0 0 0 0 0 0 0 0 6 6 6
37557 - 6 6 6 10 10 10 14 14 14 18 18 18
37558 - 18 18 18 14 14 14 10 10 10 6 6 6
37559 - 0 0 0 0 0 0 0 0 0 0 0 0
37560 - 0 0 0 0 0 0 0 0 0 0 0 0
37561 - 0 0 0 0 0 0 0 0 0 0 0 0
37562 - 0 0 0 0 0 0 0 0 0 0 0 0
37563 - 0 0 0 0 0 0 0 0 0 6 6 6
37564 - 14 14 14 18 18 18 22 22 22 22 22 22
37565 - 18 18 18 14 14 14 10 10 10 6 6 6
37566 - 0 0 0 0 0 0 0 0 0 0 0 0
37567 - 0 0 0 0 0 0 0 0 0 0 0 0
37568 - 0 0 0 0 0 0 0 0 0 0 0 0
37569 - 0 0 0 0 0 0 0 0 0 0 0 0
37570 - 0 0 0 0 0 0 0 0 0 0 0 0
37571 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37572 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37573 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37574 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37575 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37576 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37577 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37578 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37579 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37580 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37581 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37582 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37583 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37584 +4 4 4 4 4 4
37585 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37586 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37587 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37588 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37589 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37590 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37591 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37592 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37593 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37594 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37595 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37596 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37597 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37598 +4 4 4 4 4 4
37599 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37600 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37601 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37602 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37603 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37604 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37605 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37606 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37607 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37608 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37609 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37610 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37611 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37612 +4 4 4 4 4 4
37613 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37614 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37615 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37616 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37617 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37618 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37619 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37620 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37621 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37622 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37623 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37624 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37625 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37626 +4 4 4 4 4 4
37627 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37628 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37629 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37630 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37631 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37632 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37633 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37634 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37635 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37636 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37637 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37638 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37639 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37640 +4 4 4 4 4 4
37641 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37642 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37643 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37644 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37645 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37646 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37647 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37648 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37649 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37650 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37651 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37652 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37653 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37654 +4 4 4 4 4 4
37655 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37656 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37657 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37658 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37659 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
37660 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
37661 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37662 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37663 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37664 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
37665 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
37666 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
37667 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37668 +4 4 4 4 4 4
37669 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37670 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37671 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37672 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37673 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
37674 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
37675 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37676 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37677 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37678 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
37679 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
37680 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
37681 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37682 +4 4 4 4 4 4
37683 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37684 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37685 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37686 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37687 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
37688 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
37689 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
37690 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37691 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37692 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
37693 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
37694 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
37695 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
37696 +4 4 4 4 4 4
37697 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37698 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37699 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37700 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
37701 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
37702 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
37703 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
37704 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37705 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
37706 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
37707 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
37708 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
37709 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
37710 +4 4 4 4 4 4
37711 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37712 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37713 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37714 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
37715 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
37716 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
37717 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
37718 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
37719 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
37720 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
37721 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
37722 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
37723 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
37724 +4 4 4 4 4 4
37725 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37726 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37727 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
37728 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
37729 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
37730 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
37731 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
37732 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
37733 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
37734 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
37735 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
37736 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
37737 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
37738 +4 4 4 4 4 4
37739 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37740 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37741 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
37742 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
37743 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
37744 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
37745 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
37746 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
37747 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
37748 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
37749 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
37750 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
37751 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
37752 +4 4 4 4 4 4
37753 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37754 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37755 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
37756 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
37757 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
37758 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
37759 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
37760 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
37761 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
37762 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
37763 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
37764 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
37765 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
37766 +4 4 4 4 4 4
37767 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37768 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37769 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
37770 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
37771 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
37772 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
37773 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
37774 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
37775 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
37776 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
37777 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
37778 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
37779 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
37780 +4 4 4 4 4 4
37781 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37782 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37783 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
37784 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
37785 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
37786 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
37787 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
37788 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
37789 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
37790 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
37791 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
37792 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
37793 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
37794 +4 4 4 4 4 4
37795 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37796 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
37797 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
37798 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
37799 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
37800 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
37801 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
37802 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
37803 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
37804 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
37805 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
37806 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
37807 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
37808 +4 4 4 4 4 4
37809 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37810 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
37811 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
37812 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
37813 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
37814 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
37815 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
37816 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
37817 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
37818 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
37819 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
37820 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
37821 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
37822 +0 0 0 4 4 4
37823 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
37824 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
37825 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
37826 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
37827 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
37828 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
37829 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
37830 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
37831 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
37832 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
37833 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
37834 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
37835 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
37836 +2 0 0 0 0 0
37837 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
37838 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
37839 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
37840 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
37841 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
37842 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
37843 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
37844 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
37845 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
37846 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
37847 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
37848 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
37849 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
37850 +37 38 37 0 0 0
37851 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
37852 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
37853 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
37854 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
37855 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
37856 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
37857 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
37858 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
37859 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
37860 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
37861 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
37862 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
37863 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
37864 +85 115 134 4 0 0
37865 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
37866 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
37867 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
37868 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
37869 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
37870 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
37871 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
37872 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
37873 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
37874 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
37875 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
37876 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
37877 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
37878 +60 73 81 4 0 0
37879 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
37880 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
37881 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
37882 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
37883 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
37884 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
37885 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
37886 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
37887 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
37888 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
37889 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
37890 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
37891 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
37892 +16 19 21 4 0 0
37893 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
37894 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
37895 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
37896 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
37897 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
37898 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
37899 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
37900 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
37901 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
37902 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
37903 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
37904 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
37905 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
37906 +4 0 0 4 3 3
37907 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
37908 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
37909 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
37910 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
37911 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
37912 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
37913 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
37914 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
37915 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
37916 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
37917 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
37918 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
37919 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
37920 +3 2 2 4 4 4
37921 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
37922 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
37923 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
37924 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
37925 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
37926 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
37927 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
37928 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
37929 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
37930 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
37931 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
37932 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
37933 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
37934 +4 4 4 4 4 4
37935 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
37936 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
37937 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
37938 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
37939 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
37940 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
37941 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
37942 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
37943 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
37944 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
37945 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
37946 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
37947 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
37948 +4 4 4 4 4 4
37949 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
37950 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
37951 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
37952 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
37953 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
37954 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
37955 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
37956 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
37957 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
37958 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
37959 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
37960 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
37961 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
37962 +5 5 5 5 5 5
37963 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
37964 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
37965 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
37966 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
37967 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
37968 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
37969 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
37970 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
37971 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
37972 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
37973 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
37974 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
37975 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
37976 +5 5 5 4 4 4
37977 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
37978 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
37979 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
37980 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
37981 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
37982 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
37983 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
37984 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
37985 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
37986 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
37987 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
37988 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
37989 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37990 +4 4 4 4 4 4
37991 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
37992 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
37993 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
37994 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
37995 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
37996 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
37997 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
37998 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
37999 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
38000 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
38001 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
38002 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
38003 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38004 +4 4 4 4 4 4
38005 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
38006 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
38007 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
38008 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
38009 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38010 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
38011 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
38012 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
38013 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
38014 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
38015 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
38016 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38017 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38018 +4 4 4 4 4 4
38019 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
38020 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
38021 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
38022 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
38023 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38024 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38025 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38026 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
38027 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
38028 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
38029 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
38030 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38031 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38032 +4 4 4 4 4 4
38033 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
38034 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
38035 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
38036 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
38037 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38038 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
38039 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38040 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
38041 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
38042 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
38043 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38044 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38045 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38046 +4 4 4 4 4 4
38047 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
38048 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
38049 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
38050 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
38051 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38052 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
38053 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
38054 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
38055 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
38056 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
38057 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
38058 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38059 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38060 +4 4 4 4 4 4
38061 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
38062 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
38063 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
38064 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
38065 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38066 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
38067 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
38068 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
38069 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
38070 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
38071 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
38072 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38073 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38074 +4 4 4 4 4 4
38075 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
38076 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
38077 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
38078 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38079 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
38080 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
38081 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
38082 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
38083 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
38084 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
38085 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38086 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38087 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38088 +4 4 4 4 4 4
38089 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
38090 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
38091 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
38092 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38093 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38094 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
38095 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
38096 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
38097 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
38098 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
38099 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38100 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38101 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38102 +4 4 4 4 4 4
38103 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
38104 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
38105 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38106 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38107 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38108 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
38109 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
38110 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
38111 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
38112 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
38113 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38114 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38115 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38116 +4 4 4 4 4 4
38117 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
38118 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
38119 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38120 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38121 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38122 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
38123 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
38124 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
38125 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38126 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38127 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38128 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38129 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38130 +4 4 4 4 4 4
38131 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38132 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
38133 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38134 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
38135 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
38136 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
38137 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
38138 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
38139 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38140 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38141 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38142 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38143 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38144 +4 4 4 4 4 4
38145 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38146 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
38147 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38148 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
38149 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38150 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
38151 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
38152 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
38153 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38154 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38155 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38156 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38157 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38158 +4 4 4 4 4 4
38159 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
38160 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
38161 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38162 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
38163 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
38164 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
38165 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
38166 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
38167 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38168 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38169 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38170 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38171 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38172 +4 4 4 4 4 4
38173 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
38174 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
38175 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38176 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
38177 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
38178 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
38179 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
38180 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
38181 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38182 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38183 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38184 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38185 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38186 +4 4 4 4 4 4
38187 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38188 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
38189 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38190 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
38191 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
38192 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
38193 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
38194 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
38195 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38196 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38197 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38198 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38199 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38200 +4 4 4 4 4 4
38201 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
38202 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
38203 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38204 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
38205 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
38206 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
38207 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
38208 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
38209 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
38210 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38211 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38212 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38213 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38214 +4 4 4 4 4 4
38215 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38216 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
38217 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
38218 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
38219 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
38220 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
38221 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
38222 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
38223 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38224 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38225 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38226 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38227 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38228 +4 4 4 4 4 4
38229 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38230 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
38231 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38232 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
38233 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
38234 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
38235 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
38236 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
38237 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38238 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38239 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38240 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38241 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38242 +4 4 4 4 4 4
38243 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38244 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
38245 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
38246 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
38247 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
38248 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
38249 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38250 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
38251 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38252 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38253 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38254 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38255 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38256 +4 4 4 4 4 4
38257 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38258 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
38259 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
38260 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38261 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
38262 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
38263 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38264 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
38265 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38266 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38267 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38268 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38269 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38270 +4 4 4 4 4 4
38271 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38272 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
38273 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
38274 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
38275 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
38276 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
38277 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
38278 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
38279 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
38280 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38281 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38282 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38283 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38284 +4 4 4 4 4 4
38285 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38286 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
38287 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
38288 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
38289 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
38290 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
38291 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
38292 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
38293 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
38294 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38295 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38296 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38297 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38298 +4 4 4 4 4 4
38299 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
38300 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
38301 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
38302 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
38303 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38304 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
38305 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
38306 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
38307 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
38308 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38309 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38310 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38311 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38312 +4 4 4 4 4 4
38313 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38314 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
38315 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
38316 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
38317 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
38318 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
38319 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
38320 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
38321 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
38322 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38323 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38324 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38325 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38326 +4 4 4 4 4 4
38327 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
38328 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
38329 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
38330 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
38331 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
38332 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
38333 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
38334 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
38335 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
38336 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
38337 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38338 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38339 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38340 +4 4 4 4 4 4
38341 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
38342 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38343 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
38344 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
38345 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
38346 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
38347 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
38348 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
38349 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
38350 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
38351 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38352 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38353 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38354 +4 4 4 4 4 4
38355 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
38356 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38357 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
38358 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
38359 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
38360 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
38361 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38362 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
38363 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
38364 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
38365 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38366 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38367 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38368 +4 4 4 4 4 4
38369 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
38370 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
38371 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
38372 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
38373 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
38374 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
38375 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
38376 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
38377 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
38378 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
38379 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38380 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38381 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38382 +4 4 4 4 4 4
38383 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
38384 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
38385 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38386 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
38387 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
38388 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
38389 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
38390 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
38391 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
38392 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
38393 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38394 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38395 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38396 +4 4 4 4 4 4
38397 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38398 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
38399 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
38400 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
38401 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
38402 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
38403 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
38404 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
38405 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
38406 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
38407 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38408 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38409 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38410 +4 4 4 4 4 4
38411 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
38412 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
38413 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
38414 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
38415 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
38416 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
38417 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
38418 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
38419 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
38420 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38421 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38422 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38423 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38424 +4 4 4 4 4 4
38425 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
38426 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
38427 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
38428 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
38429 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
38430 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
38431 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
38432 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
38433 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38434 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38435 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38436 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38437 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38438 +4 4 4 4 4 4
38439 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
38440 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38441 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
38442 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38443 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
38444 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
38445 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
38446 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
38447 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
38448 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38449 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38450 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38451 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38452 +4 4 4 4 4 4
38453 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
38454 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
38455 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
38456 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
38457 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
38458 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
38459 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
38460 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
38461 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
38462 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38463 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38464 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38465 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38466 +4 4 4 4 4 4
38467 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
38468 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
38469 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
38470 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
38471 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
38472 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
38473 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
38474 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
38475 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38476 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38477 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38478 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38479 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38480 +4 4 4 4 4 4
38481 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
38482 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
38483 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
38484 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
38485 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
38486 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
38487 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
38488 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
38489 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38490 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38491 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38492 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38493 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38494 +4 4 4 4 4 4
38495 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
38496 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
38497 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
38498 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
38499 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
38500 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
38501 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
38502 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
38503 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38504 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38505 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38506 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38507 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38508 +4 4 4 4 4 4
38509 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38510 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
38511 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
38512 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
38513 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
38514 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
38515 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
38516 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
38517 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38518 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38519 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38520 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38521 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38522 +4 4 4 4 4 4
38523 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38524 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
38525 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
38526 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
38527 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
38528 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
38529 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
38530 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
38531 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38532 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38533 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38534 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38535 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38536 +4 4 4 4 4 4
38537 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38538 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
38539 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
38540 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
38541 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
38542 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
38543 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
38544 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38545 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38546 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38547 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38548 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38549 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38550 +4 4 4 4 4 4
38551 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38552 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38553 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
38554 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
38555 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
38556 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
38557 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
38558 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38559 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38560 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38561 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38562 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38563 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38564 +4 4 4 4 4 4
38565 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38566 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38567 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38568 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
38569 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
38570 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
38571 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
38572 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38573 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38574 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38575 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38576 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38577 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38578 +4 4 4 4 4 4
38579 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38580 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38581 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38582 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
38583 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
38584 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
38585 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
38586 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38587 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38588 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38589 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38590 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38591 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38592 +4 4 4 4 4 4
38593 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38594 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38595 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38596 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
38597 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38598 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
38599 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
38600 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38601 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38602 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38603 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38604 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38605 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38606 +4 4 4 4 4 4
38607 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38608 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38609 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38610 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
38611 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
38612 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
38613 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
38614 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38615 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38616 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38617 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38618 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38619 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38620 +4 4 4 4 4 4
38621 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38622 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38623 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38624 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38625 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
38626 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
38627 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38628 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38629 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38630 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38631 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38632 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38633 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38634 +4 4 4 4 4 4
38635 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38636 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38637 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38638 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38639 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
38640 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
38641 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
38642 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38643 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38644 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38645 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38646 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38647 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38648 +4 4 4 4 4 4
38649 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38650 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38651 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38652 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38653 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
38654 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
38655 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38656 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38657 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38658 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38659 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38660 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38661 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38662 +4 4 4 4 4 4
38663 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38664 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38665 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38666 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38667 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
38668 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
38669 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38670 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38671 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38672 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38673 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38674 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38675 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38676 +4 4 4 4 4 4
38677 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38678 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38679 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38680 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38681 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
38682 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
38683 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38684 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38685 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38686 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38687 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38688 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38689 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38690 +4 4 4 4 4 4
38691 diff -urNp linux-3.0.7/drivers/video/udlfb.c linux-3.0.7/drivers/video/udlfb.c
38692 --- linux-3.0.7/drivers/video/udlfb.c 2011-07-21 22:17:23.000000000 -0400
38693 +++ linux-3.0.7/drivers/video/udlfb.c 2011-08-23 21:47:56.000000000 -0400
38694 @@ -586,11 +586,11 @@ int dlfb_handle_damage(struct dlfb_data
38695 dlfb_urb_completion(urb);
38696
38697 error:
38698 - atomic_add(bytes_sent, &dev->bytes_sent);
38699 - atomic_add(bytes_identical, &dev->bytes_identical);
38700 - atomic_add(width*height*2, &dev->bytes_rendered);
38701 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
38702 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
38703 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
38704 end_cycles = get_cycles();
38705 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
38706 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
38707 >> 10)), /* Kcycles */
38708 &dev->cpu_kcycles_used);
38709
38710 @@ -711,11 +711,11 @@ static void dlfb_dpy_deferred_io(struct
38711 dlfb_urb_completion(urb);
38712
38713 error:
38714 - atomic_add(bytes_sent, &dev->bytes_sent);
38715 - atomic_add(bytes_identical, &dev->bytes_identical);
38716 - atomic_add(bytes_rendered, &dev->bytes_rendered);
38717 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
38718 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
38719 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
38720 end_cycles = get_cycles();
38721 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
38722 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
38723 >> 10)), /* Kcycles */
38724 &dev->cpu_kcycles_used);
38725 }
38726 @@ -1307,7 +1307,7 @@ static ssize_t metrics_bytes_rendered_sh
38727 struct fb_info *fb_info = dev_get_drvdata(fbdev);
38728 struct dlfb_data *dev = fb_info->par;
38729 return snprintf(buf, PAGE_SIZE, "%u\n",
38730 - atomic_read(&dev->bytes_rendered));
38731 + atomic_read_unchecked(&dev->bytes_rendered));
38732 }
38733
38734 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
38735 @@ -1315,7 +1315,7 @@ static ssize_t metrics_bytes_identical_s
38736 struct fb_info *fb_info = dev_get_drvdata(fbdev);
38737 struct dlfb_data *dev = fb_info->par;
38738 return snprintf(buf, PAGE_SIZE, "%u\n",
38739 - atomic_read(&dev->bytes_identical));
38740 + atomic_read_unchecked(&dev->bytes_identical));
38741 }
38742
38743 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
38744 @@ -1323,7 +1323,7 @@ static ssize_t metrics_bytes_sent_show(s
38745 struct fb_info *fb_info = dev_get_drvdata(fbdev);
38746 struct dlfb_data *dev = fb_info->par;
38747 return snprintf(buf, PAGE_SIZE, "%u\n",
38748 - atomic_read(&dev->bytes_sent));
38749 + atomic_read_unchecked(&dev->bytes_sent));
38750 }
38751
38752 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
38753 @@ -1331,7 +1331,7 @@ static ssize_t metrics_cpu_kcycles_used_
38754 struct fb_info *fb_info = dev_get_drvdata(fbdev);
38755 struct dlfb_data *dev = fb_info->par;
38756 return snprintf(buf, PAGE_SIZE, "%u\n",
38757 - atomic_read(&dev->cpu_kcycles_used));
38758 + atomic_read_unchecked(&dev->cpu_kcycles_used));
38759 }
38760
38761 static ssize_t edid_show(
38762 @@ -1388,10 +1388,10 @@ static ssize_t metrics_reset_store(struc
38763 struct fb_info *fb_info = dev_get_drvdata(fbdev);
38764 struct dlfb_data *dev = fb_info->par;
38765
38766 - atomic_set(&dev->bytes_rendered, 0);
38767 - atomic_set(&dev->bytes_identical, 0);
38768 - atomic_set(&dev->bytes_sent, 0);
38769 - atomic_set(&dev->cpu_kcycles_used, 0);
38770 + atomic_set_unchecked(&dev->bytes_rendered, 0);
38771 + atomic_set_unchecked(&dev->bytes_identical, 0);
38772 + atomic_set_unchecked(&dev->bytes_sent, 0);
38773 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
38774
38775 return count;
38776 }
38777 diff -urNp linux-3.0.7/drivers/video/uvesafb.c linux-3.0.7/drivers/video/uvesafb.c
38778 --- linux-3.0.7/drivers/video/uvesafb.c 2011-07-21 22:17:23.000000000 -0400
38779 +++ linux-3.0.7/drivers/video/uvesafb.c 2011-08-23 21:47:56.000000000 -0400
38780 @@ -19,6 +19,7 @@
38781 #include <linux/io.h>
38782 #include <linux/mutex.h>
38783 #include <linux/slab.h>
38784 +#include <linux/moduleloader.h>
38785 #include <video/edid.h>
38786 #include <video/uvesafb.h>
38787 #ifdef CONFIG_X86
38788 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
38789 NULL,
38790 };
38791
38792 - return call_usermodehelper(v86d_path, argv, envp, 1);
38793 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
38794 }
38795
38796 /*
38797 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
38798 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
38799 par->pmi_setpal = par->ypan = 0;
38800 } else {
38801 +
38802 +#ifdef CONFIG_PAX_KERNEXEC
38803 +#ifdef CONFIG_MODULES
38804 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
38805 +#endif
38806 + if (!par->pmi_code) {
38807 + par->pmi_setpal = par->ypan = 0;
38808 + return 0;
38809 + }
38810 +#endif
38811 +
38812 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
38813 + task->t.regs.edi);
38814 +
38815 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38816 + pax_open_kernel();
38817 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
38818 + pax_close_kernel();
38819 +
38820 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
38821 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
38822 +#else
38823 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
38824 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
38825 +#endif
38826 +
38827 printk(KERN_INFO "uvesafb: protected mode interface info at "
38828 "%04x:%04x\n",
38829 (u16)task->t.regs.es, (u16)task->t.regs.edi);
38830 @@ -1821,6 +1844,11 @@ out:
38831 if (par->vbe_modes)
38832 kfree(par->vbe_modes);
38833
38834 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38835 + if (par->pmi_code)
38836 + module_free_exec(NULL, par->pmi_code);
38837 +#endif
38838 +
38839 framebuffer_release(info);
38840 return err;
38841 }
38842 @@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
38843 kfree(par->vbe_state_orig);
38844 if (par->vbe_state_saved)
38845 kfree(par->vbe_state_saved);
38846 +
38847 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38848 + if (par->pmi_code)
38849 + module_free_exec(NULL, par->pmi_code);
38850 +#endif
38851 +
38852 }
38853
38854 framebuffer_release(info);
38855 diff -urNp linux-3.0.7/drivers/video/vesafb.c linux-3.0.7/drivers/video/vesafb.c
38856 --- linux-3.0.7/drivers/video/vesafb.c 2011-07-21 22:17:23.000000000 -0400
38857 +++ linux-3.0.7/drivers/video/vesafb.c 2011-08-23 21:47:56.000000000 -0400
38858 @@ -9,6 +9,7 @@
38859 */
38860
38861 #include <linux/module.h>
38862 +#include <linux/moduleloader.h>
38863 #include <linux/kernel.h>
38864 #include <linux/errno.h>
38865 #include <linux/string.h>
38866 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
38867 static int vram_total __initdata; /* Set total amount of memory */
38868 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
38869 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
38870 -static void (*pmi_start)(void) __read_mostly;
38871 -static void (*pmi_pal) (void) __read_mostly;
38872 +static void (*pmi_start)(void) __read_only;
38873 +static void (*pmi_pal) (void) __read_only;
38874 static int depth __read_mostly;
38875 static int vga_compat __read_mostly;
38876 /* --------------------------------------------------------------------- */
38877 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
38878 unsigned int size_vmode;
38879 unsigned int size_remap;
38880 unsigned int size_total;
38881 + void *pmi_code = NULL;
38882
38883 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
38884 return -ENODEV;
38885 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
38886 size_remap = size_total;
38887 vesafb_fix.smem_len = size_remap;
38888
38889 -#ifndef __i386__
38890 - screen_info.vesapm_seg = 0;
38891 -#endif
38892 -
38893 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
38894 printk(KERN_WARNING
38895 "vesafb: cannot reserve video memory at 0x%lx\n",
38896 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct pl
38897 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
38898 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
38899
38900 +#ifdef __i386__
38901 +
38902 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38903 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
38904 + if (!pmi_code)
38905 +#elif !defined(CONFIG_PAX_KERNEXEC)
38906 + if (0)
38907 +#endif
38908 +
38909 +#endif
38910 + screen_info.vesapm_seg = 0;
38911 +
38912 if (screen_info.vesapm_seg) {
38913 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
38914 - screen_info.vesapm_seg,screen_info.vesapm_off);
38915 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
38916 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
38917 }
38918
38919 if (screen_info.vesapm_seg < 0xc000)
38920 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct pl
38921
38922 if (ypan || pmi_setpal) {
38923 unsigned short *pmi_base;
38924 +
38925 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
38926 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
38927 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
38928 +
38929 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38930 + pax_open_kernel();
38931 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
38932 +#else
38933 + pmi_code = pmi_base;
38934 +#endif
38935 +
38936 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
38937 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
38938 +
38939 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38940 + pmi_start = ktva_ktla(pmi_start);
38941 + pmi_pal = ktva_ktla(pmi_pal);
38942 + pax_close_kernel();
38943 +#endif
38944 +
38945 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
38946 if (pmi_base[3]) {
38947 printk(KERN_INFO "vesafb: pmi: ports = ");
38948 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct pl
38949 info->node, info->fix.id);
38950 return 0;
38951 err:
38952 +
38953 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38954 + module_free_exec(NULL, pmi_code);
38955 +#endif
38956 +
38957 if (info->screen_base)
38958 iounmap(info->screen_base);
38959 framebuffer_release(info);
38960 diff -urNp linux-3.0.7/drivers/video/via/via_clock.h linux-3.0.7/drivers/video/via/via_clock.h
38961 --- linux-3.0.7/drivers/video/via/via_clock.h 2011-07-21 22:17:23.000000000 -0400
38962 +++ linux-3.0.7/drivers/video/via/via_clock.h 2011-08-23 21:47:56.000000000 -0400
38963 @@ -56,7 +56,7 @@ struct via_clock {
38964
38965 void (*set_engine_pll_state)(u8 state);
38966 void (*set_engine_pll)(struct via_pll_config config);
38967 -};
38968 +} __no_const;
38969
38970
38971 static inline u32 get_pll_internal_frequency(u32 ref_freq,
38972 diff -urNp linux-3.0.7/drivers/virtio/virtio_balloon.c linux-3.0.7/drivers/virtio/virtio_balloon.c
38973 --- linux-3.0.7/drivers/virtio/virtio_balloon.c 2011-07-21 22:17:23.000000000 -0400
38974 +++ linux-3.0.7/drivers/virtio/virtio_balloon.c 2011-08-23 21:48:14.000000000 -0400
38975 @@ -174,6 +174,8 @@ static void update_balloon_stats(struct
38976 struct sysinfo i;
38977 int idx = 0;
38978
38979 + pax_track_stack();
38980 +
38981 all_vm_events(events);
38982 si_meminfo(&i);
38983
38984 diff -urNp linux-3.0.7/fs/9p/vfs_inode.c linux-3.0.7/fs/9p/vfs_inode.c
38985 --- linux-3.0.7/fs/9p/vfs_inode.c 2011-10-16 21:54:54.000000000 -0400
38986 +++ linux-3.0.7/fs/9p/vfs_inode.c 2011-10-16 21:55:28.000000000 -0400
38987 @@ -1264,7 +1264,7 @@ static void *v9fs_vfs_follow_link(struct
38988 void
38989 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
38990 {
38991 - char *s = nd_get_link(nd);
38992 + const char *s = nd_get_link(nd);
38993
38994 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
38995 IS_ERR(s) ? "<error>" : s);
38996 diff -urNp linux-3.0.7/fs/aio.c linux-3.0.7/fs/aio.c
38997 --- linux-3.0.7/fs/aio.c 2011-07-21 22:17:23.000000000 -0400
38998 +++ linux-3.0.7/fs/aio.c 2011-08-23 21:48:14.000000000 -0400
38999 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
39000 size += sizeof(struct io_event) * nr_events;
39001 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
39002
39003 - if (nr_pages < 0)
39004 + if (nr_pages <= 0)
39005 return -EINVAL;
39006
39007 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
39008 @@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
39009 struct aio_timeout to;
39010 int retry = 0;
39011
39012 + pax_track_stack();
39013 +
39014 /* needed to zero any padding within an entry (there shouldn't be
39015 * any, but C is fun!
39016 */
39017 @@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
39018 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
39019 {
39020 ssize_t ret;
39021 + struct iovec iovstack;
39022
39023 #ifdef CONFIG_COMPAT
39024 if (compat)
39025 ret = compat_rw_copy_check_uvector(type,
39026 (struct compat_iovec __user *)kiocb->ki_buf,
39027 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39028 + kiocb->ki_nbytes, 1, &iovstack,
39029 &kiocb->ki_iovec);
39030 else
39031 #endif
39032 ret = rw_copy_check_uvector(type,
39033 (struct iovec __user *)kiocb->ki_buf,
39034 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39035 + kiocb->ki_nbytes, 1, &iovstack,
39036 &kiocb->ki_iovec);
39037 if (ret < 0)
39038 goto out;
39039
39040 + if (kiocb->ki_iovec == &iovstack) {
39041 + kiocb->ki_inline_vec = iovstack;
39042 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
39043 + }
39044 kiocb->ki_nr_segs = kiocb->ki_nbytes;
39045 kiocb->ki_cur_seg = 0;
39046 /* ki_nbytes/left now reflect bytes instead of segs */
39047 diff -urNp linux-3.0.7/fs/attr.c linux-3.0.7/fs/attr.c
39048 --- linux-3.0.7/fs/attr.c 2011-07-21 22:17:23.000000000 -0400
39049 +++ linux-3.0.7/fs/attr.c 2011-08-23 21:48:14.000000000 -0400
39050 @@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
39051 unsigned long limit;
39052
39053 limit = rlimit(RLIMIT_FSIZE);
39054 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
39055 if (limit != RLIM_INFINITY && offset > limit)
39056 goto out_sig;
39057 if (offset > inode->i_sb->s_maxbytes)
39058 diff -urNp linux-3.0.7/fs/autofs4/waitq.c linux-3.0.7/fs/autofs4/waitq.c
39059 --- linux-3.0.7/fs/autofs4/waitq.c 2011-07-21 22:17:23.000000000 -0400
39060 +++ linux-3.0.7/fs/autofs4/waitq.c 2011-10-06 04:17:55.000000000 -0400
39061 @@ -60,7 +60,7 @@ static int autofs4_write(struct file *fi
39062 {
39063 unsigned long sigpipe, flags;
39064 mm_segment_t fs;
39065 - const char *data = (const char *)addr;
39066 + const char __user *data = (const char __force_user *)addr;
39067 ssize_t wr = 0;
39068
39069 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
39070 diff -urNp linux-3.0.7/fs/befs/linuxvfs.c linux-3.0.7/fs/befs/linuxvfs.c
39071 --- linux-3.0.7/fs/befs/linuxvfs.c 2011-09-02 18:11:26.000000000 -0400
39072 +++ linux-3.0.7/fs/befs/linuxvfs.c 2011-08-29 23:26:27.000000000 -0400
39073 @@ -503,7 +503,7 @@ static void befs_put_link(struct dentry
39074 {
39075 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
39076 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
39077 - char *link = nd_get_link(nd);
39078 + const char *link = nd_get_link(nd);
39079 if (!IS_ERR(link))
39080 kfree(link);
39081 }
39082 diff -urNp linux-3.0.7/fs/binfmt_aout.c linux-3.0.7/fs/binfmt_aout.c
39083 --- linux-3.0.7/fs/binfmt_aout.c 2011-07-21 22:17:23.000000000 -0400
39084 +++ linux-3.0.7/fs/binfmt_aout.c 2011-08-23 21:48:14.000000000 -0400
39085 @@ -16,6 +16,7 @@
39086 #include <linux/string.h>
39087 #include <linux/fs.h>
39088 #include <linux/file.h>
39089 +#include <linux/security.h>
39090 #include <linux/stat.h>
39091 #include <linux/fcntl.h>
39092 #include <linux/ptrace.h>
39093 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
39094 #endif
39095 # define START_STACK(u) ((void __user *)u.start_stack)
39096
39097 + memset(&dump, 0, sizeof(dump));
39098 +
39099 fs = get_fs();
39100 set_fs(KERNEL_DS);
39101 has_dumped = 1;
39102 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
39103
39104 /* If the size of the dump file exceeds the rlimit, then see what would happen
39105 if we wrote the stack, but not the data area. */
39106 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
39107 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
39108 dump.u_dsize = 0;
39109
39110 /* Make sure we have enough room to write the stack and data areas. */
39111 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
39112 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
39113 dump.u_ssize = 0;
39114
39115 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
39116 rlim = rlimit(RLIMIT_DATA);
39117 if (rlim >= RLIM_INFINITY)
39118 rlim = ~0;
39119 +
39120 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
39121 if (ex.a_data + ex.a_bss > rlim)
39122 return -ENOMEM;
39123
39124 @@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
39125 install_exec_creds(bprm);
39126 current->flags &= ~PF_FORKNOEXEC;
39127
39128 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39129 + current->mm->pax_flags = 0UL;
39130 +#endif
39131 +
39132 +#ifdef CONFIG_PAX_PAGEEXEC
39133 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
39134 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
39135 +
39136 +#ifdef CONFIG_PAX_EMUTRAMP
39137 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
39138 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
39139 +#endif
39140 +
39141 +#ifdef CONFIG_PAX_MPROTECT
39142 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
39143 + current->mm->pax_flags |= MF_PAX_MPROTECT;
39144 +#endif
39145 +
39146 + }
39147 +#endif
39148 +
39149 if (N_MAGIC(ex) == OMAGIC) {
39150 unsigned long text_addr, map_size;
39151 loff_t pos;
39152 @@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
39153
39154 down_write(&current->mm->mmap_sem);
39155 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
39156 - PROT_READ | PROT_WRITE | PROT_EXEC,
39157 + PROT_READ | PROT_WRITE,
39158 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
39159 fd_offset + ex.a_text);
39160 up_write(&current->mm->mmap_sem);
39161 diff -urNp linux-3.0.7/fs/binfmt_elf.c linux-3.0.7/fs/binfmt_elf.c
39162 --- linux-3.0.7/fs/binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
39163 +++ linux-3.0.7/fs/binfmt_elf.c 2011-08-23 21:48:14.000000000 -0400
39164 @@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
39165 #define elf_core_dump NULL
39166 #endif
39167
39168 +#ifdef CONFIG_PAX_MPROTECT
39169 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
39170 +#endif
39171 +
39172 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
39173 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
39174 #else
39175 @@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
39176 .load_binary = load_elf_binary,
39177 .load_shlib = load_elf_library,
39178 .core_dump = elf_core_dump,
39179 +
39180 +#ifdef CONFIG_PAX_MPROTECT
39181 + .handle_mprotect= elf_handle_mprotect,
39182 +#endif
39183 +
39184 .min_coredump = ELF_EXEC_PAGESIZE,
39185 };
39186
39187 @@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
39188
39189 static int set_brk(unsigned long start, unsigned long end)
39190 {
39191 + unsigned long e = end;
39192 +
39193 start = ELF_PAGEALIGN(start);
39194 end = ELF_PAGEALIGN(end);
39195 if (end > start) {
39196 @@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
39197 if (BAD_ADDR(addr))
39198 return addr;
39199 }
39200 - current->mm->start_brk = current->mm->brk = end;
39201 + current->mm->start_brk = current->mm->brk = e;
39202 return 0;
39203 }
39204
39205 @@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
39206 elf_addr_t __user *u_rand_bytes;
39207 const char *k_platform = ELF_PLATFORM;
39208 const char *k_base_platform = ELF_BASE_PLATFORM;
39209 - unsigned char k_rand_bytes[16];
39210 + u32 k_rand_bytes[4];
39211 int items;
39212 elf_addr_t *elf_info;
39213 int ei_index = 0;
39214 const struct cred *cred = current_cred();
39215 struct vm_area_struct *vma;
39216 + unsigned long saved_auxv[AT_VECTOR_SIZE];
39217 +
39218 + pax_track_stack();
39219
39220 /*
39221 * In some cases (e.g. Hyper-Threading), we want to avoid L1
39222 @@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
39223 * Generate 16 random bytes for userspace PRNG seeding.
39224 */
39225 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
39226 - u_rand_bytes = (elf_addr_t __user *)
39227 - STACK_ALLOC(p, sizeof(k_rand_bytes));
39228 + srandom32(k_rand_bytes[0] ^ random32());
39229 + srandom32(k_rand_bytes[1] ^ random32());
39230 + srandom32(k_rand_bytes[2] ^ random32());
39231 + srandom32(k_rand_bytes[3] ^ random32());
39232 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
39233 + u_rand_bytes = (elf_addr_t __user *) p;
39234 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
39235 return -EFAULT;
39236
39237 @@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
39238 return -EFAULT;
39239 current->mm->env_end = p;
39240
39241 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
39242 +
39243 /* Put the elf_info on the stack in the right place. */
39244 sp = (elf_addr_t __user *)envp + 1;
39245 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
39246 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
39247 return -EFAULT;
39248 return 0;
39249 }
39250 @@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
39251 {
39252 struct elf_phdr *elf_phdata;
39253 struct elf_phdr *eppnt;
39254 - unsigned long load_addr = 0;
39255 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
39256 int load_addr_set = 0;
39257 unsigned long last_bss = 0, elf_bss = 0;
39258 - unsigned long error = ~0UL;
39259 + unsigned long error = -EINVAL;
39260 unsigned long total_size;
39261 int retval, i, size;
39262
39263 @@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
39264 goto out_close;
39265 }
39266
39267 +#ifdef CONFIG_PAX_SEGMEXEC
39268 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
39269 + pax_task_size = SEGMEXEC_TASK_SIZE;
39270 +#endif
39271 +
39272 eppnt = elf_phdata;
39273 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
39274 if (eppnt->p_type == PT_LOAD) {
39275 @@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
39276 k = load_addr + eppnt->p_vaddr;
39277 if (BAD_ADDR(k) ||
39278 eppnt->p_filesz > eppnt->p_memsz ||
39279 - eppnt->p_memsz > TASK_SIZE ||
39280 - TASK_SIZE - eppnt->p_memsz < k) {
39281 + eppnt->p_memsz > pax_task_size ||
39282 + pax_task_size - eppnt->p_memsz < k) {
39283 error = -ENOMEM;
39284 goto out_close;
39285 }
39286 @@ -528,6 +553,193 @@ out:
39287 return error;
39288 }
39289
39290 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
39291 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
39292 +{
39293 + unsigned long pax_flags = 0UL;
39294 +
39295 +#ifdef CONFIG_PAX_PAGEEXEC
39296 + if (elf_phdata->p_flags & PF_PAGEEXEC)
39297 + pax_flags |= MF_PAX_PAGEEXEC;
39298 +#endif
39299 +
39300 +#ifdef CONFIG_PAX_SEGMEXEC
39301 + if (elf_phdata->p_flags & PF_SEGMEXEC)
39302 + pax_flags |= MF_PAX_SEGMEXEC;
39303 +#endif
39304 +
39305 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39306 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39307 + if ((__supported_pte_mask & _PAGE_NX))
39308 + pax_flags &= ~MF_PAX_SEGMEXEC;
39309 + else
39310 + pax_flags &= ~MF_PAX_PAGEEXEC;
39311 + }
39312 +#endif
39313 +
39314 +#ifdef CONFIG_PAX_EMUTRAMP
39315 + if (elf_phdata->p_flags & PF_EMUTRAMP)
39316 + pax_flags |= MF_PAX_EMUTRAMP;
39317 +#endif
39318 +
39319 +#ifdef CONFIG_PAX_MPROTECT
39320 + if (elf_phdata->p_flags & PF_MPROTECT)
39321 + pax_flags |= MF_PAX_MPROTECT;
39322 +#endif
39323 +
39324 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39325 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
39326 + pax_flags |= MF_PAX_RANDMMAP;
39327 +#endif
39328 +
39329 + return pax_flags;
39330 +}
39331 +#endif
39332 +
39333 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
39334 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
39335 +{
39336 + unsigned long pax_flags = 0UL;
39337 +
39338 +#ifdef CONFIG_PAX_PAGEEXEC
39339 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
39340 + pax_flags |= MF_PAX_PAGEEXEC;
39341 +#endif
39342 +
39343 +#ifdef CONFIG_PAX_SEGMEXEC
39344 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
39345 + pax_flags |= MF_PAX_SEGMEXEC;
39346 +#endif
39347 +
39348 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39349 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39350 + if ((__supported_pte_mask & _PAGE_NX))
39351 + pax_flags &= ~MF_PAX_SEGMEXEC;
39352 + else
39353 + pax_flags &= ~MF_PAX_PAGEEXEC;
39354 + }
39355 +#endif
39356 +
39357 +#ifdef CONFIG_PAX_EMUTRAMP
39358 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
39359 + pax_flags |= MF_PAX_EMUTRAMP;
39360 +#endif
39361 +
39362 +#ifdef CONFIG_PAX_MPROTECT
39363 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
39364 + pax_flags |= MF_PAX_MPROTECT;
39365 +#endif
39366 +
39367 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39368 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
39369 + pax_flags |= MF_PAX_RANDMMAP;
39370 +#endif
39371 +
39372 + return pax_flags;
39373 +}
39374 +#endif
39375 +
39376 +#ifdef CONFIG_PAX_EI_PAX
39377 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
39378 +{
39379 + unsigned long pax_flags = 0UL;
39380 +
39381 +#ifdef CONFIG_PAX_PAGEEXEC
39382 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
39383 + pax_flags |= MF_PAX_PAGEEXEC;
39384 +#endif
39385 +
39386 +#ifdef CONFIG_PAX_SEGMEXEC
39387 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
39388 + pax_flags |= MF_PAX_SEGMEXEC;
39389 +#endif
39390 +
39391 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39392 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39393 + if ((__supported_pte_mask & _PAGE_NX))
39394 + pax_flags &= ~MF_PAX_SEGMEXEC;
39395 + else
39396 + pax_flags &= ~MF_PAX_PAGEEXEC;
39397 + }
39398 +#endif
39399 +
39400 +#ifdef CONFIG_PAX_EMUTRAMP
39401 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
39402 + pax_flags |= MF_PAX_EMUTRAMP;
39403 +#endif
39404 +
39405 +#ifdef CONFIG_PAX_MPROTECT
39406 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
39407 + pax_flags |= MF_PAX_MPROTECT;
39408 +#endif
39409 +
39410 +#ifdef CONFIG_PAX_ASLR
39411 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
39412 + pax_flags |= MF_PAX_RANDMMAP;
39413 +#endif
39414 +
39415 + return pax_flags;
39416 +}
39417 +#endif
39418 +
39419 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39420 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
39421 +{
39422 + unsigned long pax_flags = 0UL;
39423 +
39424 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
39425 + unsigned long i;
39426 + int found_flags = 0;
39427 +#endif
39428 +
39429 +#ifdef CONFIG_PAX_EI_PAX
39430 + pax_flags = pax_parse_ei_pax(elf_ex);
39431 +#endif
39432 +
39433 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
39434 + for (i = 0UL; i < elf_ex->e_phnum; i++)
39435 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
39436 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
39437 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
39438 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
39439 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
39440 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
39441 + return -EINVAL;
39442 +
39443 +#ifdef CONFIG_PAX_SOFTMODE
39444 + if (pax_softmode)
39445 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
39446 + else
39447 +#endif
39448 +
39449 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
39450 + found_flags = 1;
39451 + break;
39452 + }
39453 +#endif
39454 +
39455 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
39456 + if (found_flags == 0) {
39457 + struct elf_phdr phdr;
39458 + memset(&phdr, 0, sizeof(phdr));
39459 + phdr.p_flags = PF_NOEMUTRAMP;
39460 +#ifdef CONFIG_PAX_SOFTMODE
39461 + if (pax_softmode)
39462 + pax_flags = pax_parse_softmode(&phdr);
39463 + else
39464 +#endif
39465 + pax_flags = pax_parse_hardmode(&phdr);
39466 + }
39467 +#endif
39468 +
39469 + if (0 > pax_check_flags(&pax_flags))
39470 + return -EINVAL;
39471 +
39472 + current->mm->pax_flags = pax_flags;
39473 + return 0;
39474 +}
39475 +#endif
39476 +
39477 /*
39478 * These are the functions used to load ELF style executables and shared
39479 * libraries. There is no binary dependent code anywhere else.
39480 @@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
39481 {
39482 unsigned int random_variable = 0;
39483
39484 +#ifdef CONFIG_PAX_RANDUSTACK
39485 + if (randomize_va_space)
39486 + return stack_top - current->mm->delta_stack;
39487 +#endif
39488 +
39489 if ((current->flags & PF_RANDOMIZE) &&
39490 !(current->personality & ADDR_NO_RANDOMIZE)) {
39491 random_variable = get_random_int() & STACK_RND_MASK;
39492 @@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
39493 unsigned long load_addr = 0, load_bias = 0;
39494 int load_addr_set = 0;
39495 char * elf_interpreter = NULL;
39496 - unsigned long error;
39497 + unsigned long error = 0;
39498 struct elf_phdr *elf_ppnt, *elf_phdata;
39499 unsigned long elf_bss, elf_brk;
39500 int retval, i;
39501 @@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
39502 unsigned long start_code, end_code, start_data, end_data;
39503 unsigned long reloc_func_desc __maybe_unused = 0;
39504 int executable_stack = EXSTACK_DEFAULT;
39505 - unsigned long def_flags = 0;
39506 struct {
39507 struct elfhdr elf_ex;
39508 struct elfhdr interp_elf_ex;
39509 } *loc;
39510 + unsigned long pax_task_size = TASK_SIZE;
39511
39512 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
39513 if (!loc) {
39514 @@ -714,11 +931,81 @@ static int load_elf_binary(struct linux_
39515
39516 /* OK, This is the point of no return */
39517 current->flags &= ~PF_FORKNOEXEC;
39518 - current->mm->def_flags = def_flags;
39519 +
39520 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39521 + current->mm->pax_flags = 0UL;
39522 +#endif
39523 +
39524 +#ifdef CONFIG_PAX_DLRESOLVE
39525 + current->mm->call_dl_resolve = 0UL;
39526 +#endif
39527 +
39528 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
39529 + current->mm->call_syscall = 0UL;
39530 +#endif
39531 +
39532 +#ifdef CONFIG_PAX_ASLR
39533 + current->mm->delta_mmap = 0UL;
39534 + current->mm->delta_stack = 0UL;
39535 +#endif
39536 +
39537 + current->mm->def_flags = 0;
39538 +
39539 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39540 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
39541 + send_sig(SIGKILL, current, 0);
39542 + goto out_free_dentry;
39543 + }
39544 +#endif
39545 +
39546 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
39547 + pax_set_initial_flags(bprm);
39548 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
39549 + if (pax_set_initial_flags_func)
39550 + (pax_set_initial_flags_func)(bprm);
39551 +#endif
39552 +
39553 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
39554 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
39555 + current->mm->context.user_cs_limit = PAGE_SIZE;
39556 + current->mm->def_flags |= VM_PAGEEXEC;
39557 + }
39558 +#endif
39559 +
39560 +#ifdef CONFIG_PAX_SEGMEXEC
39561 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
39562 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
39563 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
39564 + pax_task_size = SEGMEXEC_TASK_SIZE;
39565 + current->mm->def_flags |= VM_NOHUGEPAGE;
39566 + }
39567 +#endif
39568 +
39569 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
39570 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39571 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
39572 + put_cpu();
39573 + }
39574 +#endif
39575
39576 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
39577 may depend on the personality. */
39578 SET_PERSONALITY(loc->elf_ex);
39579 +
39580 +#ifdef CONFIG_PAX_ASLR
39581 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
39582 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
39583 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
39584 + }
39585 +#endif
39586 +
39587 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
39588 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39589 + executable_stack = EXSTACK_DISABLE_X;
39590 + current->personality &= ~READ_IMPLIES_EXEC;
39591 + } else
39592 +#endif
39593 +
39594 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
39595 current->personality |= READ_IMPLIES_EXEC;
39596
39597 @@ -800,6 +1087,20 @@ static int load_elf_binary(struct linux_
39598 #else
39599 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
39600 #endif
39601 +
39602 +#ifdef CONFIG_PAX_RANDMMAP
39603 + /* PaX: randomize base address at the default exe base if requested */
39604 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
39605 +#ifdef CONFIG_SPARC64
39606 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
39607 +#else
39608 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
39609 +#endif
39610 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
39611 + elf_flags |= MAP_FIXED;
39612 + }
39613 +#endif
39614 +
39615 }
39616
39617 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
39618 @@ -832,9 +1133,9 @@ static int load_elf_binary(struct linux_
39619 * allowed task size. Note that p_filesz must always be
39620 * <= p_memsz so it is only necessary to check p_memsz.
39621 */
39622 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
39623 - elf_ppnt->p_memsz > TASK_SIZE ||
39624 - TASK_SIZE - elf_ppnt->p_memsz < k) {
39625 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
39626 + elf_ppnt->p_memsz > pax_task_size ||
39627 + pax_task_size - elf_ppnt->p_memsz < k) {
39628 /* set_brk can never work. Avoid overflows. */
39629 send_sig(SIGKILL, current, 0);
39630 retval = -EINVAL;
39631 @@ -862,6 +1163,11 @@ static int load_elf_binary(struct linux_
39632 start_data += load_bias;
39633 end_data += load_bias;
39634
39635 +#ifdef CONFIG_PAX_RANDMMAP
39636 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
39637 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
39638 +#endif
39639 +
39640 /* Calling set_brk effectively mmaps the pages that we need
39641 * for the bss and break sections. We must do this before
39642 * mapping in the interpreter, to make sure it doesn't wind
39643 @@ -873,9 +1179,11 @@ static int load_elf_binary(struct linux_
39644 goto out_free_dentry;
39645 }
39646 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
39647 - send_sig(SIGSEGV, current, 0);
39648 - retval = -EFAULT; /* Nobody gets to see this, but.. */
39649 - goto out_free_dentry;
39650 + /*
39651 + * This bss-zeroing can fail if the ELF
39652 + * file specifies odd protections. So
39653 + * we don't check the return value
39654 + */
39655 }
39656
39657 if (elf_interpreter) {
39658 @@ -1090,7 +1398,7 @@ out:
39659 * Decide what to dump of a segment, part, all or none.
39660 */
39661 static unsigned long vma_dump_size(struct vm_area_struct *vma,
39662 - unsigned long mm_flags)
39663 + unsigned long mm_flags, long signr)
39664 {
39665 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
39666
39667 @@ -1124,7 +1432,7 @@ static unsigned long vma_dump_size(struc
39668 if (vma->vm_file == NULL)
39669 return 0;
39670
39671 - if (FILTER(MAPPED_PRIVATE))
39672 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
39673 goto whole;
39674
39675 /*
39676 @@ -1346,9 +1654,9 @@ static void fill_auxv_note(struct memelf
39677 {
39678 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
39679 int i = 0;
39680 - do
39681 + do {
39682 i += 2;
39683 - while (auxv[i - 2] != AT_NULL);
39684 + } while (auxv[i - 2] != AT_NULL);
39685 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
39686 }
39687
39688 @@ -1854,14 +2162,14 @@ static void fill_extnum_info(struct elfh
39689 }
39690
39691 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
39692 - unsigned long mm_flags)
39693 + struct coredump_params *cprm)
39694 {
39695 struct vm_area_struct *vma;
39696 size_t size = 0;
39697
39698 for (vma = first_vma(current, gate_vma); vma != NULL;
39699 vma = next_vma(vma, gate_vma))
39700 - size += vma_dump_size(vma, mm_flags);
39701 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
39702 return size;
39703 }
39704
39705 @@ -1955,7 +2263,7 @@ static int elf_core_dump(struct coredump
39706
39707 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
39708
39709 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
39710 + offset += elf_core_vma_data_size(gate_vma, cprm);
39711 offset += elf_core_extra_data_size();
39712 e_shoff = offset;
39713
39714 @@ -1969,10 +2277,12 @@ static int elf_core_dump(struct coredump
39715 offset = dataoff;
39716
39717 size += sizeof(*elf);
39718 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
39719 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
39720 goto end_coredump;
39721
39722 size += sizeof(*phdr4note);
39723 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
39724 if (size > cprm->limit
39725 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
39726 goto end_coredump;
39727 @@ -1986,7 +2296,7 @@ static int elf_core_dump(struct coredump
39728 phdr.p_offset = offset;
39729 phdr.p_vaddr = vma->vm_start;
39730 phdr.p_paddr = 0;
39731 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
39732 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
39733 phdr.p_memsz = vma->vm_end - vma->vm_start;
39734 offset += phdr.p_filesz;
39735 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
39736 @@ -1997,6 +2307,7 @@ static int elf_core_dump(struct coredump
39737 phdr.p_align = ELF_EXEC_PAGESIZE;
39738
39739 size += sizeof(phdr);
39740 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
39741 if (size > cprm->limit
39742 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
39743 goto end_coredump;
39744 @@ -2021,7 +2332,7 @@ static int elf_core_dump(struct coredump
39745 unsigned long addr;
39746 unsigned long end;
39747
39748 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
39749 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
39750
39751 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
39752 struct page *page;
39753 @@ -2030,6 +2341,7 @@ static int elf_core_dump(struct coredump
39754 page = get_dump_page(addr);
39755 if (page) {
39756 void *kaddr = kmap(page);
39757 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
39758 stop = ((size += PAGE_SIZE) > cprm->limit) ||
39759 !dump_write(cprm->file, kaddr,
39760 PAGE_SIZE);
39761 @@ -2047,6 +2359,7 @@ static int elf_core_dump(struct coredump
39762
39763 if (e_phnum == PN_XNUM) {
39764 size += sizeof(*shdr4extnum);
39765 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
39766 if (size > cprm->limit
39767 || !dump_write(cprm->file, shdr4extnum,
39768 sizeof(*shdr4extnum)))
39769 @@ -2067,6 +2380,97 @@ out:
39770
39771 #endif /* CONFIG_ELF_CORE */
39772
39773 +#ifdef CONFIG_PAX_MPROTECT
39774 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
39775 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
39776 + * we'll remove VM_MAYWRITE for good on RELRO segments.
39777 + *
39778 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
39779 + * basis because we want to allow the common case and not the special ones.
39780 + */
39781 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
39782 +{
39783 + struct elfhdr elf_h;
39784 + struct elf_phdr elf_p;
39785 + unsigned long i;
39786 + unsigned long oldflags;
39787 + bool is_textrel_rw, is_textrel_rx, is_relro;
39788 +
39789 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
39790 + return;
39791 +
39792 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
39793 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
39794 +
39795 +#ifdef CONFIG_PAX_ELFRELOCS
39796 + /* possible TEXTREL */
39797 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
39798 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
39799 +#else
39800 + is_textrel_rw = false;
39801 + is_textrel_rx = false;
39802 +#endif
39803 +
39804 + /* possible RELRO */
39805 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
39806 +
39807 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
39808 + return;
39809 +
39810 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
39811 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
39812 +
39813 +#ifdef CONFIG_PAX_ETEXECRELOCS
39814 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
39815 +#else
39816 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
39817 +#endif
39818 +
39819 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
39820 + !elf_check_arch(&elf_h) ||
39821 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
39822 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
39823 + return;
39824 +
39825 + for (i = 0UL; i < elf_h.e_phnum; i++) {
39826 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
39827 + return;
39828 + switch (elf_p.p_type) {
39829 + case PT_DYNAMIC:
39830 + if (!is_textrel_rw && !is_textrel_rx)
39831 + continue;
39832 + i = 0UL;
39833 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
39834 + elf_dyn dyn;
39835 +
39836 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
39837 + return;
39838 + if (dyn.d_tag == DT_NULL)
39839 + return;
39840 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
39841 + gr_log_textrel(vma);
39842 + if (is_textrel_rw)
39843 + vma->vm_flags |= VM_MAYWRITE;
39844 + else
39845 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
39846 + vma->vm_flags &= ~VM_MAYWRITE;
39847 + return;
39848 + }
39849 + i++;
39850 + }
39851 + return;
39852 +
39853 + case PT_GNU_RELRO:
39854 + if (!is_relro)
39855 + continue;
39856 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
39857 + vma->vm_flags &= ~VM_MAYWRITE;
39858 + return;
39859 + }
39860 + }
39861 +}
39862 +#endif
39863 +
39864 static int __init init_elf_binfmt(void)
39865 {
39866 return register_binfmt(&elf_format);
39867 diff -urNp linux-3.0.7/fs/binfmt_flat.c linux-3.0.7/fs/binfmt_flat.c
39868 --- linux-3.0.7/fs/binfmt_flat.c 2011-07-21 22:17:23.000000000 -0400
39869 +++ linux-3.0.7/fs/binfmt_flat.c 2011-08-23 21:47:56.000000000 -0400
39870 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
39871 realdatastart = (unsigned long) -ENOMEM;
39872 printk("Unable to allocate RAM for process data, errno %d\n",
39873 (int)-realdatastart);
39874 + down_write(&current->mm->mmap_sem);
39875 do_munmap(current->mm, textpos, text_len);
39876 + up_write(&current->mm->mmap_sem);
39877 ret = realdatastart;
39878 goto err;
39879 }
39880 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
39881 }
39882 if (IS_ERR_VALUE(result)) {
39883 printk("Unable to read data+bss, errno %d\n", (int)-result);
39884 + down_write(&current->mm->mmap_sem);
39885 do_munmap(current->mm, textpos, text_len);
39886 do_munmap(current->mm, realdatastart, len);
39887 + up_write(&current->mm->mmap_sem);
39888 ret = result;
39889 goto err;
39890 }
39891 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
39892 }
39893 if (IS_ERR_VALUE(result)) {
39894 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
39895 + down_write(&current->mm->mmap_sem);
39896 do_munmap(current->mm, textpos, text_len + data_len + extra +
39897 MAX_SHARED_LIBS * sizeof(unsigned long));
39898 + up_write(&current->mm->mmap_sem);
39899 ret = result;
39900 goto err;
39901 }
39902 diff -urNp linux-3.0.7/fs/bio.c linux-3.0.7/fs/bio.c
39903 --- linux-3.0.7/fs/bio.c 2011-07-21 22:17:23.000000000 -0400
39904 +++ linux-3.0.7/fs/bio.c 2011-10-06 04:17:55.000000000 -0400
39905 @@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
39906 const int read = bio_data_dir(bio) == READ;
39907 struct bio_map_data *bmd = bio->bi_private;
39908 int i;
39909 - char *p = bmd->sgvecs[0].iov_base;
39910 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
39911
39912 __bio_for_each_segment(bvec, bio, i, 0) {
39913 char *addr = page_address(bvec->bv_page);
39914 diff -urNp linux-3.0.7/fs/block_dev.c linux-3.0.7/fs/block_dev.c
39915 --- linux-3.0.7/fs/block_dev.c 2011-10-16 21:54:54.000000000 -0400
39916 +++ linux-3.0.7/fs/block_dev.c 2011-10-16 21:55:28.000000000 -0400
39917 @@ -671,7 +671,7 @@ static bool bd_may_claim(struct block_de
39918 else if (bdev->bd_contains == bdev)
39919 return true; /* is a whole device which isn't held */
39920
39921 - else if (whole->bd_holder == bd_may_claim)
39922 + else if (whole->bd_holder == (void *)bd_may_claim)
39923 return true; /* is a partition of a device that is being partitioned */
39924 else if (whole->bd_holder != NULL)
39925 return false; /* is a partition of a held device */
39926 diff -urNp linux-3.0.7/fs/btrfs/ctree.c linux-3.0.7/fs/btrfs/ctree.c
39927 --- linux-3.0.7/fs/btrfs/ctree.c 2011-07-21 22:17:23.000000000 -0400
39928 +++ linux-3.0.7/fs/btrfs/ctree.c 2011-08-23 21:47:56.000000000 -0400
39929 @@ -454,9 +454,12 @@ static noinline int __btrfs_cow_block(st
39930 free_extent_buffer(buf);
39931 add_root_to_dirty_list(root);
39932 } else {
39933 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
39934 - parent_start = parent->start;
39935 - else
39936 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
39937 + if (parent)
39938 + parent_start = parent->start;
39939 + else
39940 + parent_start = 0;
39941 + } else
39942 parent_start = 0;
39943
39944 WARN_ON(trans->transid != btrfs_header_generation(parent));
39945 diff -urNp linux-3.0.7/fs/btrfs/inode.c linux-3.0.7/fs/btrfs/inode.c
39946 --- linux-3.0.7/fs/btrfs/inode.c 2011-10-16 21:54:54.000000000 -0400
39947 +++ linux-3.0.7/fs/btrfs/inode.c 2011-10-16 21:55:28.000000000 -0400
39948 @@ -6896,7 +6896,7 @@ fail:
39949 return -ENOMEM;
39950 }
39951
39952 -static int btrfs_getattr(struct vfsmount *mnt,
39953 +int btrfs_getattr(struct vfsmount *mnt,
39954 struct dentry *dentry, struct kstat *stat)
39955 {
39956 struct inode *inode = dentry->d_inode;
39957 @@ -6908,6 +6908,14 @@ static int btrfs_getattr(struct vfsmount
39958 return 0;
39959 }
39960
39961 +EXPORT_SYMBOL(btrfs_getattr);
39962 +
39963 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
39964 +{
39965 + return BTRFS_I(inode)->root->anon_super.s_dev;
39966 +}
39967 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
39968 +
39969 /*
39970 * If a file is moved, it will inherit the cow and compression flags of the new
39971 * directory.
39972 diff -urNp linux-3.0.7/fs/btrfs/ioctl.c linux-3.0.7/fs/btrfs/ioctl.c
39973 --- linux-3.0.7/fs/btrfs/ioctl.c 2011-07-21 22:17:23.000000000 -0400
39974 +++ linux-3.0.7/fs/btrfs/ioctl.c 2011-10-06 04:17:55.000000000 -0400
39975 @@ -2676,9 +2676,12 @@ long btrfs_ioctl_space_info(struct btrfs
39976 for (i = 0; i < num_types; i++) {
39977 struct btrfs_space_info *tmp;
39978
39979 + /* Don't copy in more than we allocated */
39980 if (!slot_count)
39981 break;
39982
39983 + slot_count--;
39984 +
39985 info = NULL;
39986 rcu_read_lock();
39987 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
39988 @@ -2700,15 +2703,12 @@ long btrfs_ioctl_space_info(struct btrfs
39989 memcpy(dest, &space, sizeof(space));
39990 dest++;
39991 space_args.total_spaces++;
39992 - slot_count--;
39993 }
39994 - if (!slot_count)
39995 - break;
39996 }
39997 up_read(&info->groups_sem);
39998 }
39999
40000 - user_dest = (struct btrfs_ioctl_space_info *)
40001 + user_dest = (struct btrfs_ioctl_space_info __user *)
40002 (arg + sizeof(struct btrfs_ioctl_space_args));
40003
40004 if (copy_to_user(user_dest, dest_orig, alloc_size))
40005 diff -urNp linux-3.0.7/fs/btrfs/relocation.c linux-3.0.7/fs/btrfs/relocation.c
40006 --- linux-3.0.7/fs/btrfs/relocation.c 2011-07-21 22:17:23.000000000 -0400
40007 +++ linux-3.0.7/fs/btrfs/relocation.c 2011-08-23 21:47:56.000000000 -0400
40008 @@ -1242,7 +1242,7 @@ static int __update_reloc_root(struct bt
40009 }
40010 spin_unlock(&rc->reloc_root_tree.lock);
40011
40012 - BUG_ON((struct btrfs_root *)node->data != root);
40013 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
40014
40015 if (!del) {
40016 spin_lock(&rc->reloc_root_tree.lock);
40017 diff -urNp linux-3.0.7/fs/cachefiles/bind.c linux-3.0.7/fs/cachefiles/bind.c
40018 --- linux-3.0.7/fs/cachefiles/bind.c 2011-07-21 22:17:23.000000000 -0400
40019 +++ linux-3.0.7/fs/cachefiles/bind.c 2011-08-23 21:47:56.000000000 -0400
40020 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
40021 args);
40022
40023 /* start by checking things over */
40024 - ASSERT(cache->fstop_percent >= 0 &&
40025 - cache->fstop_percent < cache->fcull_percent &&
40026 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
40027 cache->fcull_percent < cache->frun_percent &&
40028 cache->frun_percent < 100);
40029
40030 - ASSERT(cache->bstop_percent >= 0 &&
40031 - cache->bstop_percent < cache->bcull_percent &&
40032 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
40033 cache->bcull_percent < cache->brun_percent &&
40034 cache->brun_percent < 100);
40035
40036 diff -urNp linux-3.0.7/fs/cachefiles/daemon.c linux-3.0.7/fs/cachefiles/daemon.c
40037 --- linux-3.0.7/fs/cachefiles/daemon.c 2011-07-21 22:17:23.000000000 -0400
40038 +++ linux-3.0.7/fs/cachefiles/daemon.c 2011-08-23 21:47:56.000000000 -0400
40039 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
40040 if (n > buflen)
40041 return -EMSGSIZE;
40042
40043 - if (copy_to_user(_buffer, buffer, n) != 0)
40044 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
40045 return -EFAULT;
40046
40047 return n;
40048 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
40049 if (test_bit(CACHEFILES_DEAD, &cache->flags))
40050 return -EIO;
40051
40052 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
40053 + if (datalen > PAGE_SIZE - 1)
40054 return -EOPNOTSUPP;
40055
40056 /* drag the command string into the kernel so we can parse it */
40057 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
40058 if (args[0] != '%' || args[1] != '\0')
40059 return -EINVAL;
40060
40061 - if (fstop < 0 || fstop >= cache->fcull_percent)
40062 + if (fstop >= cache->fcull_percent)
40063 return cachefiles_daemon_range_error(cache, args);
40064
40065 cache->fstop_percent = fstop;
40066 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
40067 if (args[0] != '%' || args[1] != '\0')
40068 return -EINVAL;
40069
40070 - if (bstop < 0 || bstop >= cache->bcull_percent)
40071 + if (bstop >= cache->bcull_percent)
40072 return cachefiles_daemon_range_error(cache, args);
40073
40074 cache->bstop_percent = bstop;
40075 diff -urNp linux-3.0.7/fs/cachefiles/internal.h linux-3.0.7/fs/cachefiles/internal.h
40076 --- linux-3.0.7/fs/cachefiles/internal.h 2011-07-21 22:17:23.000000000 -0400
40077 +++ linux-3.0.7/fs/cachefiles/internal.h 2011-08-23 21:47:56.000000000 -0400
40078 @@ -57,7 +57,7 @@ struct cachefiles_cache {
40079 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
40080 struct rb_root active_nodes; /* active nodes (can't be culled) */
40081 rwlock_t active_lock; /* lock for active_nodes */
40082 - atomic_t gravecounter; /* graveyard uniquifier */
40083 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
40084 unsigned frun_percent; /* when to stop culling (% files) */
40085 unsigned fcull_percent; /* when to start culling (% files) */
40086 unsigned fstop_percent; /* when to stop allocating (% files) */
40087 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
40088 * proc.c
40089 */
40090 #ifdef CONFIG_CACHEFILES_HISTOGRAM
40091 -extern atomic_t cachefiles_lookup_histogram[HZ];
40092 -extern atomic_t cachefiles_mkdir_histogram[HZ];
40093 -extern atomic_t cachefiles_create_histogram[HZ];
40094 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40095 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40096 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
40097
40098 extern int __init cachefiles_proc_init(void);
40099 extern void cachefiles_proc_cleanup(void);
40100 static inline
40101 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
40102 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
40103 {
40104 unsigned long jif = jiffies - start_jif;
40105 if (jif >= HZ)
40106 jif = HZ - 1;
40107 - atomic_inc(&histogram[jif]);
40108 + atomic_inc_unchecked(&histogram[jif]);
40109 }
40110
40111 #else
40112 diff -urNp linux-3.0.7/fs/cachefiles/namei.c linux-3.0.7/fs/cachefiles/namei.c
40113 --- linux-3.0.7/fs/cachefiles/namei.c 2011-07-21 22:17:23.000000000 -0400
40114 +++ linux-3.0.7/fs/cachefiles/namei.c 2011-08-23 21:47:56.000000000 -0400
40115 @@ -318,7 +318,7 @@ try_again:
40116 /* first step is to make up a grave dentry in the graveyard */
40117 sprintf(nbuffer, "%08x%08x",
40118 (uint32_t) get_seconds(),
40119 - (uint32_t) atomic_inc_return(&cache->gravecounter));
40120 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
40121
40122 /* do the multiway lock magic */
40123 trap = lock_rename(cache->graveyard, dir);
40124 diff -urNp linux-3.0.7/fs/cachefiles/proc.c linux-3.0.7/fs/cachefiles/proc.c
40125 --- linux-3.0.7/fs/cachefiles/proc.c 2011-07-21 22:17:23.000000000 -0400
40126 +++ linux-3.0.7/fs/cachefiles/proc.c 2011-08-23 21:47:56.000000000 -0400
40127 @@ -14,9 +14,9 @@
40128 #include <linux/seq_file.h>
40129 #include "internal.h"
40130
40131 -atomic_t cachefiles_lookup_histogram[HZ];
40132 -atomic_t cachefiles_mkdir_histogram[HZ];
40133 -atomic_t cachefiles_create_histogram[HZ];
40134 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40135 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40136 +atomic_unchecked_t cachefiles_create_histogram[HZ];
40137
40138 /*
40139 * display the latency histogram
40140 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
40141 return 0;
40142 default:
40143 index = (unsigned long) v - 3;
40144 - x = atomic_read(&cachefiles_lookup_histogram[index]);
40145 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
40146 - z = atomic_read(&cachefiles_create_histogram[index]);
40147 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
40148 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
40149 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
40150 if (x == 0 && y == 0 && z == 0)
40151 return 0;
40152
40153 diff -urNp linux-3.0.7/fs/cachefiles/rdwr.c linux-3.0.7/fs/cachefiles/rdwr.c
40154 --- linux-3.0.7/fs/cachefiles/rdwr.c 2011-07-21 22:17:23.000000000 -0400
40155 +++ linux-3.0.7/fs/cachefiles/rdwr.c 2011-10-06 04:17:55.000000000 -0400
40156 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
40157 old_fs = get_fs();
40158 set_fs(KERNEL_DS);
40159 ret = file->f_op->write(
40160 - file, (const void __user *) data, len, &pos);
40161 + file, (const void __force_user *) data, len, &pos);
40162 set_fs(old_fs);
40163 kunmap(page);
40164 if (ret != len)
40165 diff -urNp linux-3.0.7/fs/ceph/dir.c linux-3.0.7/fs/ceph/dir.c
40166 --- linux-3.0.7/fs/ceph/dir.c 2011-07-21 22:17:23.000000000 -0400
40167 +++ linux-3.0.7/fs/ceph/dir.c 2011-08-23 21:47:56.000000000 -0400
40168 @@ -226,7 +226,7 @@ static int ceph_readdir(struct file *fil
40169 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
40170 struct ceph_mds_client *mdsc = fsc->mdsc;
40171 unsigned frag = fpos_frag(filp->f_pos);
40172 - int off = fpos_off(filp->f_pos);
40173 + unsigned int off = fpos_off(filp->f_pos);
40174 int err;
40175 u32 ftype;
40176 struct ceph_mds_reply_info_parsed *rinfo;
40177 diff -urNp linux-3.0.7/fs/cifs/cifs_debug.c linux-3.0.7/fs/cifs/cifs_debug.c
40178 --- linux-3.0.7/fs/cifs/cifs_debug.c 2011-07-21 22:17:23.000000000 -0400
40179 +++ linux-3.0.7/fs/cifs/cifs_debug.c 2011-08-25 17:18:05.000000000 -0400
40180 @@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(str
40181
40182 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
40183 #ifdef CONFIG_CIFS_STATS2
40184 - atomic_set(&totBufAllocCount, 0);
40185 - atomic_set(&totSmBufAllocCount, 0);
40186 + atomic_set_unchecked(&totBufAllocCount, 0);
40187 + atomic_set_unchecked(&totSmBufAllocCount, 0);
40188 #endif /* CONFIG_CIFS_STATS2 */
40189 spin_lock(&cifs_tcp_ses_lock);
40190 list_for_each(tmp1, &cifs_tcp_ses_list) {
40191 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
40192 tcon = list_entry(tmp3,
40193 struct cifs_tcon,
40194 tcon_list);
40195 - atomic_set(&tcon->num_smbs_sent, 0);
40196 - atomic_set(&tcon->num_writes, 0);
40197 - atomic_set(&tcon->num_reads, 0);
40198 - atomic_set(&tcon->num_oplock_brks, 0);
40199 - atomic_set(&tcon->num_opens, 0);
40200 - atomic_set(&tcon->num_posixopens, 0);
40201 - atomic_set(&tcon->num_posixmkdirs, 0);
40202 - atomic_set(&tcon->num_closes, 0);
40203 - atomic_set(&tcon->num_deletes, 0);
40204 - atomic_set(&tcon->num_mkdirs, 0);
40205 - atomic_set(&tcon->num_rmdirs, 0);
40206 - atomic_set(&tcon->num_renames, 0);
40207 - atomic_set(&tcon->num_t2renames, 0);
40208 - atomic_set(&tcon->num_ffirst, 0);
40209 - atomic_set(&tcon->num_fnext, 0);
40210 - atomic_set(&tcon->num_fclose, 0);
40211 - atomic_set(&tcon->num_hardlinks, 0);
40212 - atomic_set(&tcon->num_symlinks, 0);
40213 - atomic_set(&tcon->num_locks, 0);
40214 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
40215 + atomic_set_unchecked(&tcon->num_writes, 0);
40216 + atomic_set_unchecked(&tcon->num_reads, 0);
40217 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
40218 + atomic_set_unchecked(&tcon->num_opens, 0);
40219 + atomic_set_unchecked(&tcon->num_posixopens, 0);
40220 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
40221 + atomic_set_unchecked(&tcon->num_closes, 0);
40222 + atomic_set_unchecked(&tcon->num_deletes, 0);
40223 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
40224 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
40225 + atomic_set_unchecked(&tcon->num_renames, 0);
40226 + atomic_set_unchecked(&tcon->num_t2renames, 0);
40227 + atomic_set_unchecked(&tcon->num_ffirst, 0);
40228 + atomic_set_unchecked(&tcon->num_fnext, 0);
40229 + atomic_set_unchecked(&tcon->num_fclose, 0);
40230 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
40231 + atomic_set_unchecked(&tcon->num_symlinks, 0);
40232 + atomic_set_unchecked(&tcon->num_locks, 0);
40233 }
40234 }
40235 }
40236 @@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct s
40237 smBufAllocCount.counter, cifs_min_small);
40238 #ifdef CONFIG_CIFS_STATS2
40239 seq_printf(m, "Total Large %d Small %d Allocations\n",
40240 - atomic_read(&totBufAllocCount),
40241 - atomic_read(&totSmBufAllocCount));
40242 + atomic_read_unchecked(&totBufAllocCount),
40243 + atomic_read_unchecked(&totSmBufAllocCount));
40244 #endif /* CONFIG_CIFS_STATS2 */
40245
40246 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
40247 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
40248 if (tcon->need_reconnect)
40249 seq_puts(m, "\tDISCONNECTED ");
40250 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
40251 - atomic_read(&tcon->num_smbs_sent),
40252 - atomic_read(&tcon->num_oplock_brks));
40253 + atomic_read_unchecked(&tcon->num_smbs_sent),
40254 + atomic_read_unchecked(&tcon->num_oplock_brks));
40255 seq_printf(m, "\nReads: %d Bytes: %lld",
40256 - atomic_read(&tcon->num_reads),
40257 + atomic_read_unchecked(&tcon->num_reads),
40258 (long long)(tcon->bytes_read));
40259 seq_printf(m, "\nWrites: %d Bytes: %lld",
40260 - atomic_read(&tcon->num_writes),
40261 + atomic_read_unchecked(&tcon->num_writes),
40262 (long long)(tcon->bytes_written));
40263 seq_printf(m, "\nFlushes: %d",
40264 - atomic_read(&tcon->num_flushes));
40265 + atomic_read_unchecked(&tcon->num_flushes));
40266 seq_printf(m, "\nLocks: %d HardLinks: %d "
40267 "Symlinks: %d",
40268 - atomic_read(&tcon->num_locks),
40269 - atomic_read(&tcon->num_hardlinks),
40270 - atomic_read(&tcon->num_symlinks));
40271 + atomic_read_unchecked(&tcon->num_locks),
40272 + atomic_read_unchecked(&tcon->num_hardlinks),
40273 + atomic_read_unchecked(&tcon->num_symlinks));
40274 seq_printf(m, "\nOpens: %d Closes: %d "
40275 "Deletes: %d",
40276 - atomic_read(&tcon->num_opens),
40277 - atomic_read(&tcon->num_closes),
40278 - atomic_read(&tcon->num_deletes));
40279 + atomic_read_unchecked(&tcon->num_opens),
40280 + atomic_read_unchecked(&tcon->num_closes),
40281 + atomic_read_unchecked(&tcon->num_deletes));
40282 seq_printf(m, "\nPosix Opens: %d "
40283 "Posix Mkdirs: %d",
40284 - atomic_read(&tcon->num_posixopens),
40285 - atomic_read(&tcon->num_posixmkdirs));
40286 + atomic_read_unchecked(&tcon->num_posixopens),
40287 + atomic_read_unchecked(&tcon->num_posixmkdirs));
40288 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
40289 - atomic_read(&tcon->num_mkdirs),
40290 - atomic_read(&tcon->num_rmdirs));
40291 + atomic_read_unchecked(&tcon->num_mkdirs),
40292 + atomic_read_unchecked(&tcon->num_rmdirs));
40293 seq_printf(m, "\nRenames: %d T2 Renames %d",
40294 - atomic_read(&tcon->num_renames),
40295 - atomic_read(&tcon->num_t2renames));
40296 + atomic_read_unchecked(&tcon->num_renames),
40297 + atomic_read_unchecked(&tcon->num_t2renames));
40298 seq_printf(m, "\nFindFirst: %d FNext %d "
40299 "FClose %d",
40300 - atomic_read(&tcon->num_ffirst),
40301 - atomic_read(&tcon->num_fnext),
40302 - atomic_read(&tcon->num_fclose));
40303 + atomic_read_unchecked(&tcon->num_ffirst),
40304 + atomic_read_unchecked(&tcon->num_fnext),
40305 + atomic_read_unchecked(&tcon->num_fclose));
40306 }
40307 }
40308 }
40309 diff -urNp linux-3.0.7/fs/cifs/cifsfs.c linux-3.0.7/fs/cifs/cifsfs.c
40310 --- linux-3.0.7/fs/cifs/cifsfs.c 2011-09-02 18:11:21.000000000 -0400
40311 +++ linux-3.0.7/fs/cifs/cifsfs.c 2011-08-25 17:18:05.000000000 -0400
40312 @@ -994,7 +994,7 @@ cifs_init_request_bufs(void)
40313 cifs_req_cachep = kmem_cache_create("cifs_request",
40314 CIFSMaxBufSize +
40315 MAX_CIFS_HDR_SIZE, 0,
40316 - SLAB_HWCACHE_ALIGN, NULL);
40317 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
40318 if (cifs_req_cachep == NULL)
40319 return -ENOMEM;
40320
40321 @@ -1021,7 +1021,7 @@ cifs_init_request_bufs(void)
40322 efficient to alloc 1 per page off the slab compared to 17K (5page)
40323 alloc of large cifs buffers even when page debugging is on */
40324 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
40325 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
40326 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
40327 NULL);
40328 if (cifs_sm_req_cachep == NULL) {
40329 mempool_destroy(cifs_req_poolp);
40330 @@ -1106,8 +1106,8 @@ init_cifs(void)
40331 atomic_set(&bufAllocCount, 0);
40332 atomic_set(&smBufAllocCount, 0);
40333 #ifdef CONFIG_CIFS_STATS2
40334 - atomic_set(&totBufAllocCount, 0);
40335 - atomic_set(&totSmBufAllocCount, 0);
40336 + atomic_set_unchecked(&totBufAllocCount, 0);
40337 + atomic_set_unchecked(&totSmBufAllocCount, 0);
40338 #endif /* CONFIG_CIFS_STATS2 */
40339
40340 atomic_set(&midCount, 0);
40341 diff -urNp linux-3.0.7/fs/cifs/cifsglob.h linux-3.0.7/fs/cifs/cifsglob.h
40342 --- linux-3.0.7/fs/cifs/cifsglob.h 2011-07-21 22:17:23.000000000 -0400
40343 +++ linux-3.0.7/fs/cifs/cifsglob.h 2011-08-25 17:18:05.000000000 -0400
40344 @@ -381,28 +381,28 @@ struct cifs_tcon {
40345 __u16 Flags; /* optional support bits */
40346 enum statusEnum tidStatus;
40347 #ifdef CONFIG_CIFS_STATS
40348 - atomic_t num_smbs_sent;
40349 - atomic_t num_writes;
40350 - atomic_t num_reads;
40351 - atomic_t num_flushes;
40352 - atomic_t num_oplock_brks;
40353 - atomic_t num_opens;
40354 - atomic_t num_closes;
40355 - atomic_t num_deletes;
40356 - atomic_t num_mkdirs;
40357 - atomic_t num_posixopens;
40358 - atomic_t num_posixmkdirs;
40359 - atomic_t num_rmdirs;
40360 - atomic_t num_renames;
40361 - atomic_t num_t2renames;
40362 - atomic_t num_ffirst;
40363 - atomic_t num_fnext;
40364 - atomic_t num_fclose;
40365 - atomic_t num_hardlinks;
40366 - atomic_t num_symlinks;
40367 - atomic_t num_locks;
40368 - atomic_t num_acl_get;
40369 - atomic_t num_acl_set;
40370 + atomic_unchecked_t num_smbs_sent;
40371 + atomic_unchecked_t num_writes;
40372 + atomic_unchecked_t num_reads;
40373 + atomic_unchecked_t num_flushes;
40374 + atomic_unchecked_t num_oplock_brks;
40375 + atomic_unchecked_t num_opens;
40376 + atomic_unchecked_t num_closes;
40377 + atomic_unchecked_t num_deletes;
40378 + atomic_unchecked_t num_mkdirs;
40379 + atomic_unchecked_t num_posixopens;
40380 + atomic_unchecked_t num_posixmkdirs;
40381 + atomic_unchecked_t num_rmdirs;
40382 + atomic_unchecked_t num_renames;
40383 + atomic_unchecked_t num_t2renames;
40384 + atomic_unchecked_t num_ffirst;
40385 + atomic_unchecked_t num_fnext;
40386 + atomic_unchecked_t num_fclose;
40387 + atomic_unchecked_t num_hardlinks;
40388 + atomic_unchecked_t num_symlinks;
40389 + atomic_unchecked_t num_locks;
40390 + atomic_unchecked_t num_acl_get;
40391 + atomic_unchecked_t num_acl_set;
40392 #ifdef CONFIG_CIFS_STATS2
40393 unsigned long long time_writes;
40394 unsigned long long time_reads;
40395 @@ -613,7 +613,7 @@ convert_delimiter(char *path, char delim
40396 }
40397
40398 #ifdef CONFIG_CIFS_STATS
40399 -#define cifs_stats_inc atomic_inc
40400 +#define cifs_stats_inc atomic_inc_unchecked
40401
40402 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
40403 unsigned int bytes)
40404 @@ -911,8 +911,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnect
40405 /* Various Debug counters */
40406 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
40407 #ifdef CONFIG_CIFS_STATS2
40408 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
40409 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
40410 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
40411 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
40412 #endif
40413 GLOBAL_EXTERN atomic_t smBufAllocCount;
40414 GLOBAL_EXTERN atomic_t midCount;
40415 diff -urNp linux-3.0.7/fs/cifs/link.c linux-3.0.7/fs/cifs/link.c
40416 --- linux-3.0.7/fs/cifs/link.c 2011-07-21 22:17:23.000000000 -0400
40417 +++ linux-3.0.7/fs/cifs/link.c 2011-08-23 21:47:56.000000000 -0400
40418 @@ -587,7 +587,7 @@ symlink_exit:
40419
40420 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
40421 {
40422 - char *p = nd_get_link(nd);
40423 + const char *p = nd_get_link(nd);
40424 if (!IS_ERR(p))
40425 kfree(p);
40426 }
40427 diff -urNp linux-3.0.7/fs/cifs/misc.c linux-3.0.7/fs/cifs/misc.c
40428 --- linux-3.0.7/fs/cifs/misc.c 2011-07-21 22:17:23.000000000 -0400
40429 +++ linux-3.0.7/fs/cifs/misc.c 2011-08-25 17:18:05.000000000 -0400
40430 @@ -156,7 +156,7 @@ cifs_buf_get(void)
40431 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
40432 atomic_inc(&bufAllocCount);
40433 #ifdef CONFIG_CIFS_STATS2
40434 - atomic_inc(&totBufAllocCount);
40435 + atomic_inc_unchecked(&totBufAllocCount);
40436 #endif /* CONFIG_CIFS_STATS2 */
40437 }
40438
40439 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
40440 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
40441 atomic_inc(&smBufAllocCount);
40442 #ifdef CONFIG_CIFS_STATS2
40443 - atomic_inc(&totSmBufAllocCount);
40444 + atomic_inc_unchecked(&totSmBufAllocCount);
40445 #endif /* CONFIG_CIFS_STATS2 */
40446
40447 }
40448 diff -urNp linux-3.0.7/fs/coda/cache.c linux-3.0.7/fs/coda/cache.c
40449 --- linux-3.0.7/fs/coda/cache.c 2011-07-21 22:17:23.000000000 -0400
40450 +++ linux-3.0.7/fs/coda/cache.c 2011-08-23 21:47:56.000000000 -0400
40451 @@ -24,7 +24,7 @@
40452 #include "coda_linux.h"
40453 #include "coda_cache.h"
40454
40455 -static atomic_t permission_epoch = ATOMIC_INIT(0);
40456 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
40457
40458 /* replace or extend an acl cache hit */
40459 void coda_cache_enter(struct inode *inode, int mask)
40460 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
40461 struct coda_inode_info *cii = ITOC(inode);
40462
40463 spin_lock(&cii->c_lock);
40464 - cii->c_cached_epoch = atomic_read(&permission_epoch);
40465 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
40466 if (cii->c_uid != current_fsuid()) {
40467 cii->c_uid = current_fsuid();
40468 cii->c_cached_perm = mask;
40469 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
40470 {
40471 struct coda_inode_info *cii = ITOC(inode);
40472 spin_lock(&cii->c_lock);
40473 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
40474 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
40475 spin_unlock(&cii->c_lock);
40476 }
40477
40478 /* remove all acl caches */
40479 void coda_cache_clear_all(struct super_block *sb)
40480 {
40481 - atomic_inc(&permission_epoch);
40482 + atomic_inc_unchecked(&permission_epoch);
40483 }
40484
40485
40486 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
40487 spin_lock(&cii->c_lock);
40488 hit = (mask & cii->c_cached_perm) == mask &&
40489 cii->c_uid == current_fsuid() &&
40490 - cii->c_cached_epoch == atomic_read(&permission_epoch);
40491 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
40492 spin_unlock(&cii->c_lock);
40493
40494 return hit;
40495 diff -urNp linux-3.0.7/fs/compat_binfmt_elf.c linux-3.0.7/fs/compat_binfmt_elf.c
40496 --- linux-3.0.7/fs/compat_binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
40497 +++ linux-3.0.7/fs/compat_binfmt_elf.c 2011-08-23 21:47:56.000000000 -0400
40498 @@ -30,11 +30,13 @@
40499 #undef elf_phdr
40500 #undef elf_shdr
40501 #undef elf_note
40502 +#undef elf_dyn
40503 #undef elf_addr_t
40504 #define elfhdr elf32_hdr
40505 #define elf_phdr elf32_phdr
40506 #define elf_shdr elf32_shdr
40507 #define elf_note elf32_note
40508 +#define elf_dyn Elf32_Dyn
40509 #define elf_addr_t Elf32_Addr
40510
40511 /*
40512 diff -urNp linux-3.0.7/fs/compat.c linux-3.0.7/fs/compat.c
40513 --- linux-3.0.7/fs/compat.c 2011-07-21 22:17:23.000000000 -0400
40514 +++ linux-3.0.7/fs/compat.c 2011-10-06 04:17:55.000000000 -0400
40515 @@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(const
40516 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
40517 {
40518 compat_ino_t ino = stat->ino;
40519 - typeof(ubuf->st_uid) uid = 0;
40520 - typeof(ubuf->st_gid) gid = 0;
40521 + typeof(((struct compat_stat *)0)->st_uid) uid = 0;
40522 + typeof(((struct compat_stat *)0)->st_gid) gid = 0;
40523 int err;
40524
40525 SET_UID(uid, stat->uid);
40526 @@ -508,7 +508,7 @@ compat_sys_io_setup(unsigned nr_reqs, u3
40527
40528 set_fs(KERNEL_DS);
40529 /* The __user pointer cast is valid because of the set_fs() */
40530 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
40531 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
40532 set_fs(oldfs);
40533 /* truncating is ok because it's a user address */
40534 if (!ret)
40535 @@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
40536 goto out;
40537
40538 ret = -EINVAL;
40539 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
40540 + if (nr_segs > UIO_MAXIOV)
40541 goto out;
40542 if (nr_segs > fast_segs) {
40543 ret = -ENOMEM;
40544 @@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
40545
40546 struct compat_readdir_callback {
40547 struct compat_old_linux_dirent __user *dirent;
40548 + struct file * file;
40549 int result;
40550 };
40551
40552 @@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
40553 buf->result = -EOVERFLOW;
40554 return -EOVERFLOW;
40555 }
40556 +
40557 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40558 + return 0;
40559 +
40560 buf->result++;
40561 dirent = buf->dirent;
40562 if (!access_ok(VERIFY_WRITE, dirent,
40563 @@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
40564
40565 buf.result = 0;
40566 buf.dirent = dirent;
40567 + buf.file = file;
40568
40569 error = vfs_readdir(file, compat_fillonedir, &buf);
40570 if (buf.result)
40571 @@ -917,6 +923,7 @@ struct compat_linux_dirent {
40572 struct compat_getdents_callback {
40573 struct compat_linux_dirent __user *current_dir;
40574 struct compat_linux_dirent __user *previous;
40575 + struct file * file;
40576 int count;
40577 int error;
40578 };
40579 @@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
40580 buf->error = -EOVERFLOW;
40581 return -EOVERFLOW;
40582 }
40583 +
40584 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40585 + return 0;
40586 +
40587 dirent = buf->previous;
40588 if (dirent) {
40589 if (__put_user(offset, &dirent->d_off))
40590 @@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
40591 buf.previous = NULL;
40592 buf.count = count;
40593 buf.error = 0;
40594 + buf.file = file;
40595
40596 error = vfs_readdir(file, compat_filldir, &buf);
40597 if (error >= 0)
40598 @@ -1006,6 +1018,7 @@ out:
40599 struct compat_getdents_callback64 {
40600 struct linux_dirent64 __user *current_dir;
40601 struct linux_dirent64 __user *previous;
40602 + struct file * file;
40603 int count;
40604 int error;
40605 };
40606 @@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
40607 buf->error = -EINVAL; /* only used if we fail.. */
40608 if (reclen > buf->count)
40609 return -EINVAL;
40610 +
40611 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40612 + return 0;
40613 +
40614 dirent = buf->previous;
40615
40616 if (dirent) {
40617 @@ -1073,13 +1090,14 @@ asmlinkage long compat_sys_getdents64(un
40618 buf.previous = NULL;
40619 buf.count = count;
40620 buf.error = 0;
40621 + buf.file = file;
40622
40623 error = vfs_readdir(file, compat_filldir64, &buf);
40624 if (error >= 0)
40625 error = buf.error;
40626 lastdirent = buf.previous;
40627 if (lastdirent) {
40628 - typeof(lastdirent->d_off) d_off = file->f_pos;
40629 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
40630 if (__put_user_unaligned(d_off, &lastdirent->d_off))
40631 error = -EFAULT;
40632 else
40633 @@ -1446,6 +1464,8 @@ int compat_core_sys_select(int n, compat
40634 struct fdtable *fdt;
40635 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
40636
40637 + pax_track_stack();
40638 +
40639 if (n < 0)
40640 goto out_nofds;
40641
40642 @@ -1904,7 +1924,7 @@ asmlinkage long compat_sys_nfsservctl(in
40643 oldfs = get_fs();
40644 set_fs(KERNEL_DS);
40645 /* The __user pointer casts are valid because of the set_fs() */
40646 - err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
40647 + err = sys_nfsservctl(cmd, (void __force_user *) karg, (void __force_user *) kres);
40648 set_fs(oldfs);
40649
40650 if (err)
40651 diff -urNp linux-3.0.7/fs/compat_ioctl.c linux-3.0.7/fs/compat_ioctl.c
40652 --- linux-3.0.7/fs/compat_ioctl.c 2011-07-21 22:17:23.000000000 -0400
40653 +++ linux-3.0.7/fs/compat_ioctl.c 2011-10-06 04:17:55.000000000 -0400
40654 @@ -208,6 +208,8 @@ static int do_video_set_spu_palette(unsi
40655
40656 err = get_user(palp, &up->palette);
40657 err |= get_user(length, &up->length);
40658 + if (err)
40659 + return -EFAULT;
40660
40661 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
40662 err = put_user(compat_ptr(palp), &up_native->palette);
40663 @@ -619,7 +621,7 @@ static int serial_struct_ioctl(unsigned
40664 return -EFAULT;
40665 if (__get_user(udata, &ss32->iomem_base))
40666 return -EFAULT;
40667 - ss.iomem_base = compat_ptr(udata);
40668 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
40669 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
40670 __get_user(ss.port_high, &ss32->port_high))
40671 return -EFAULT;
40672 @@ -794,7 +796,7 @@ static int compat_ioctl_preallocate(stru
40673 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
40674 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
40675 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
40676 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
40677 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
40678 return -EFAULT;
40679
40680 return ioctl_preallocate(file, p);
40681 @@ -1638,8 +1640,8 @@ asmlinkage long compat_sys_ioctl(unsigne
40682 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
40683 {
40684 unsigned int a, b;
40685 - a = *(unsigned int *)p;
40686 - b = *(unsigned int *)q;
40687 + a = *(const unsigned int *)p;
40688 + b = *(const unsigned int *)q;
40689 if (a > b)
40690 return 1;
40691 if (a < b)
40692 diff -urNp linux-3.0.7/fs/configfs/dir.c linux-3.0.7/fs/configfs/dir.c
40693 --- linux-3.0.7/fs/configfs/dir.c 2011-07-21 22:17:23.000000000 -0400
40694 +++ linux-3.0.7/fs/configfs/dir.c 2011-08-23 21:47:56.000000000 -0400
40695 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
40696 }
40697 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
40698 struct configfs_dirent *next;
40699 - const char * name;
40700 + const unsigned char * name;
40701 + char d_name[sizeof(next->s_dentry->d_iname)];
40702 int len;
40703 struct inode *inode = NULL;
40704
40705 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
40706 continue;
40707
40708 name = configfs_get_name(next);
40709 - len = strlen(name);
40710 + if (next->s_dentry && name == next->s_dentry->d_iname) {
40711 + len = next->s_dentry->d_name.len;
40712 + memcpy(d_name, name, len);
40713 + name = d_name;
40714 + } else
40715 + len = strlen(name);
40716
40717 /*
40718 * We'll have a dentry and an inode for
40719 diff -urNp linux-3.0.7/fs/dcache.c linux-3.0.7/fs/dcache.c
40720 --- linux-3.0.7/fs/dcache.c 2011-07-21 22:17:23.000000000 -0400
40721 +++ linux-3.0.7/fs/dcache.c 2011-08-23 21:47:56.000000000 -0400
40722 @@ -3089,7 +3089,7 @@ void __init vfs_caches_init(unsigned lon
40723 mempages -= reserve;
40724
40725 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
40726 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
40727 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
40728
40729 dcache_init();
40730 inode_init();
40731 diff -urNp linux-3.0.7/fs/ecryptfs/inode.c linux-3.0.7/fs/ecryptfs/inode.c
40732 --- linux-3.0.7/fs/ecryptfs/inode.c 2011-09-02 18:11:21.000000000 -0400
40733 +++ linux-3.0.7/fs/ecryptfs/inode.c 2011-10-06 04:17:55.000000000 -0400
40734 @@ -704,7 +704,7 @@ static int ecryptfs_readlink_lower(struc
40735 old_fs = get_fs();
40736 set_fs(get_ds());
40737 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
40738 - (char __user *)lower_buf,
40739 + (char __force_user *)lower_buf,
40740 lower_bufsiz);
40741 set_fs(old_fs);
40742 if (rc < 0)
40743 @@ -750,7 +750,7 @@ static void *ecryptfs_follow_link(struct
40744 }
40745 old_fs = get_fs();
40746 set_fs(get_ds());
40747 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
40748 + rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
40749 set_fs(old_fs);
40750 if (rc < 0) {
40751 kfree(buf);
40752 @@ -765,7 +765,7 @@ out:
40753 static void
40754 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
40755 {
40756 - char *buf = nd_get_link(nd);
40757 + const char *buf = nd_get_link(nd);
40758 if (!IS_ERR(buf)) {
40759 /* Free the char* */
40760 kfree(buf);
40761 diff -urNp linux-3.0.7/fs/ecryptfs/miscdev.c linux-3.0.7/fs/ecryptfs/miscdev.c
40762 --- linux-3.0.7/fs/ecryptfs/miscdev.c 2011-07-21 22:17:23.000000000 -0400
40763 +++ linux-3.0.7/fs/ecryptfs/miscdev.c 2011-08-23 21:47:56.000000000 -0400
40764 @@ -328,7 +328,7 @@ check_list:
40765 goto out_unlock_msg_ctx;
40766 i = 5;
40767 if (msg_ctx->msg) {
40768 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
40769 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
40770 goto out_unlock_msg_ctx;
40771 i += packet_length_size;
40772 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
40773 diff -urNp linux-3.0.7/fs/ecryptfs/read_write.c linux-3.0.7/fs/ecryptfs/read_write.c
40774 --- linux-3.0.7/fs/ecryptfs/read_write.c 2011-09-02 18:11:21.000000000 -0400
40775 +++ linux-3.0.7/fs/ecryptfs/read_write.c 2011-10-06 04:17:55.000000000 -0400
40776 @@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *e
40777 return -EIO;
40778 fs_save = get_fs();
40779 set_fs(get_ds());
40780 - rc = vfs_write(lower_file, data, size, &offset);
40781 + rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
40782 set_fs(fs_save);
40783 mark_inode_dirty_sync(ecryptfs_inode);
40784 return rc;
40785 @@ -235,7 +235,7 @@ int ecryptfs_read_lower(char *data, loff
40786 return -EIO;
40787 fs_save = get_fs();
40788 set_fs(get_ds());
40789 - rc = vfs_read(lower_file, data, size, &offset);
40790 + rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
40791 set_fs(fs_save);
40792 return rc;
40793 }
40794 diff -urNp linux-3.0.7/fs/exec.c linux-3.0.7/fs/exec.c
40795 --- linux-3.0.7/fs/exec.c 2011-10-17 23:17:09.000000000 -0400
40796 +++ linux-3.0.7/fs/exec.c 2011-10-17 23:17:19.000000000 -0400
40797 @@ -55,12 +55,24 @@
40798 #include <linux/pipe_fs_i.h>
40799 #include <linux/oom.h>
40800 #include <linux/compat.h>
40801 +#include <linux/random.h>
40802 +#include <linux/seq_file.h>
40803 +
40804 +#ifdef CONFIG_PAX_REFCOUNT
40805 +#include <linux/kallsyms.h>
40806 +#include <linux/kdebug.h>
40807 +#endif
40808
40809 #include <asm/uaccess.h>
40810 #include <asm/mmu_context.h>
40811 #include <asm/tlb.h>
40812 #include "internal.h"
40813
40814 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
40815 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
40816 +EXPORT_SYMBOL(pax_set_initial_flags_func);
40817 +#endif
40818 +
40819 int core_uses_pid;
40820 char core_pattern[CORENAME_MAX_SIZE] = "core";
40821 unsigned int core_pipe_limit;
40822 @@ -70,7 +82,7 @@ struct core_name {
40823 char *corename;
40824 int used, size;
40825 };
40826 -static atomic_t call_count = ATOMIC_INIT(1);
40827 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
40828
40829 /* The maximal length of core_pattern is also specified in sysctl.c */
40830
40831 @@ -116,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __use
40832 char *tmp = getname(library);
40833 int error = PTR_ERR(tmp);
40834 static const struct open_flags uselib_flags = {
40835 - .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
40836 + .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
40837 .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
40838 .intent = LOOKUP_OPEN
40839 };
40840 @@ -195,18 +207,10 @@ static struct page *get_arg_page(struct
40841 int write)
40842 {
40843 struct page *page;
40844 - int ret;
40845
40846 -#ifdef CONFIG_STACK_GROWSUP
40847 - if (write) {
40848 - ret = expand_downwards(bprm->vma, pos);
40849 - if (ret < 0)
40850 - return NULL;
40851 - }
40852 -#endif
40853 - ret = get_user_pages(current, bprm->mm, pos,
40854 - 1, write, 1, &page, NULL);
40855 - if (ret <= 0)
40856 + if (0 > expand_downwards(bprm->vma, pos))
40857 + return NULL;
40858 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
40859 return NULL;
40860
40861 if (write) {
40862 @@ -281,6 +285,11 @@ static int __bprm_mm_init(struct linux_b
40863 vma->vm_end = STACK_TOP_MAX;
40864 vma->vm_start = vma->vm_end - PAGE_SIZE;
40865 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
40866 +
40867 +#ifdef CONFIG_PAX_SEGMEXEC
40868 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
40869 +#endif
40870 +
40871 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
40872 INIT_LIST_HEAD(&vma->anon_vma_chain);
40873
40874 @@ -295,6 +304,12 @@ static int __bprm_mm_init(struct linux_b
40875 mm->stack_vm = mm->total_vm = 1;
40876 up_write(&mm->mmap_sem);
40877 bprm->p = vma->vm_end - sizeof(void *);
40878 +
40879 +#ifdef CONFIG_PAX_RANDUSTACK
40880 + if (randomize_va_space)
40881 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
40882 +#endif
40883 +
40884 return 0;
40885 err:
40886 up_write(&mm->mmap_sem);
40887 @@ -403,19 +418,7 @@ err:
40888 return err;
40889 }
40890
40891 -struct user_arg_ptr {
40892 -#ifdef CONFIG_COMPAT
40893 - bool is_compat;
40894 -#endif
40895 - union {
40896 - const char __user *const __user *native;
40897 -#ifdef CONFIG_COMPAT
40898 - compat_uptr_t __user *compat;
40899 -#endif
40900 - } ptr;
40901 -};
40902 -
40903 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
40904 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
40905 {
40906 const char __user *native;
40907
40908 @@ -424,14 +427,14 @@ static const char __user *get_user_arg_p
40909 compat_uptr_t compat;
40910
40911 if (get_user(compat, argv.ptr.compat + nr))
40912 - return ERR_PTR(-EFAULT);
40913 + return (const char __force_user *)ERR_PTR(-EFAULT);
40914
40915 return compat_ptr(compat);
40916 }
40917 #endif
40918
40919 if (get_user(native, argv.ptr.native + nr))
40920 - return ERR_PTR(-EFAULT);
40921 + return (const char __force_user *)ERR_PTR(-EFAULT);
40922
40923 return native;
40924 }
40925 @@ -450,7 +453,7 @@ static int count(struct user_arg_ptr arg
40926 if (!p)
40927 break;
40928
40929 - if (IS_ERR(p))
40930 + if (IS_ERR((const char __force_kernel *)p))
40931 return -EFAULT;
40932
40933 if (i++ >= max)
40934 @@ -484,7 +487,7 @@ static int copy_strings(int argc, struct
40935
40936 ret = -EFAULT;
40937 str = get_user_arg_ptr(argv, argc);
40938 - if (IS_ERR(str))
40939 + if (IS_ERR((const char __force_kernel *)str))
40940 goto out;
40941
40942 len = strnlen_user(str, MAX_ARG_STRLEN);
40943 @@ -566,7 +569,7 @@ int copy_strings_kernel(int argc, const
40944 int r;
40945 mm_segment_t oldfs = get_fs();
40946 struct user_arg_ptr argv = {
40947 - .ptr.native = (const char __user *const __user *)__argv,
40948 + .ptr.native = (const char __force_user *const __force_user *)__argv,
40949 };
40950
40951 set_fs(KERNEL_DS);
40952 @@ -601,7 +604,8 @@ static int shift_arg_pages(struct vm_are
40953 unsigned long new_end = old_end - shift;
40954 struct mmu_gather tlb;
40955
40956 - BUG_ON(new_start > new_end);
40957 + if (new_start >= new_end || new_start < mmap_min_addr)
40958 + return -ENOMEM;
40959
40960 /*
40961 * ensure there are no vmas between where we want to go
40962 @@ -610,6 +614,10 @@ static int shift_arg_pages(struct vm_are
40963 if (vma != find_vma(mm, new_start))
40964 return -EFAULT;
40965
40966 +#ifdef CONFIG_PAX_SEGMEXEC
40967 + BUG_ON(pax_find_mirror_vma(vma));
40968 +#endif
40969 +
40970 /*
40971 * cover the whole range: [new_start, old_end)
40972 */
40973 @@ -690,10 +698,6 @@ int setup_arg_pages(struct linux_binprm
40974 stack_top = arch_align_stack(stack_top);
40975 stack_top = PAGE_ALIGN(stack_top);
40976
40977 - if (unlikely(stack_top < mmap_min_addr) ||
40978 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
40979 - return -ENOMEM;
40980 -
40981 stack_shift = vma->vm_end - stack_top;
40982
40983 bprm->p -= stack_shift;
40984 @@ -705,8 +709,28 @@ int setup_arg_pages(struct linux_binprm
40985 bprm->exec -= stack_shift;
40986
40987 down_write(&mm->mmap_sem);
40988 +
40989 + /* Move stack pages down in memory. */
40990 + if (stack_shift) {
40991 + ret = shift_arg_pages(vma, stack_shift);
40992 + if (ret)
40993 + goto out_unlock;
40994 + }
40995 +
40996 vm_flags = VM_STACK_FLAGS;
40997
40998 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40999 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41000 + vm_flags &= ~VM_EXEC;
41001 +
41002 +#ifdef CONFIG_PAX_MPROTECT
41003 + if (mm->pax_flags & MF_PAX_MPROTECT)
41004 + vm_flags &= ~VM_MAYEXEC;
41005 +#endif
41006 +
41007 + }
41008 +#endif
41009 +
41010 /*
41011 * Adjust stack execute permissions; explicitly enable for
41012 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
41013 @@ -725,13 +749,6 @@ int setup_arg_pages(struct linux_binprm
41014 goto out_unlock;
41015 BUG_ON(prev != vma);
41016
41017 - /* Move stack pages down in memory. */
41018 - if (stack_shift) {
41019 - ret = shift_arg_pages(vma, stack_shift);
41020 - if (ret)
41021 - goto out_unlock;
41022 - }
41023 -
41024 /* mprotect_fixup is overkill to remove the temporary stack flags */
41025 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
41026
41027 @@ -771,7 +788,7 @@ struct file *open_exec(const char *name)
41028 struct file *file;
41029 int err;
41030 static const struct open_flags open_exec_flags = {
41031 - .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
41032 + .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
41033 .acc_mode = MAY_EXEC | MAY_OPEN,
41034 .intent = LOOKUP_OPEN
41035 };
41036 @@ -812,7 +829,7 @@ int kernel_read(struct file *file, loff_
41037 old_fs = get_fs();
41038 set_fs(get_ds());
41039 /* The cast to a user pointer is valid due to the set_fs() */
41040 - result = vfs_read(file, (void __user *)addr, count, &pos);
41041 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
41042 set_fs(old_fs);
41043 return result;
41044 }
41045 @@ -1236,7 +1253,7 @@ int check_unsafe_exec(struct linux_binpr
41046 }
41047 rcu_read_unlock();
41048
41049 - if (p->fs->users > n_fs) {
41050 + if (atomic_read(&p->fs->users) > n_fs) {
41051 bprm->unsafe |= LSM_UNSAFE_SHARE;
41052 } else {
41053 res = -EAGAIN;
41054 @@ -1430,11 +1447,35 @@ static int do_execve_common(const char *
41055 struct user_arg_ptr envp,
41056 struct pt_regs *regs)
41057 {
41058 +#ifdef CONFIG_GRKERNSEC
41059 + struct file *old_exec_file;
41060 + struct acl_subject_label *old_acl;
41061 + struct rlimit old_rlim[RLIM_NLIMITS];
41062 +#endif
41063 struct linux_binprm *bprm;
41064 struct file *file;
41065 struct files_struct *displaced;
41066 bool clear_in_exec;
41067 int retval;
41068 + const struct cred *cred = current_cred();
41069 +
41070 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
41071 +
41072 + /*
41073 + * We move the actual failure in case of RLIMIT_NPROC excess from
41074 + * set*uid() to execve() because too many poorly written programs
41075 + * don't check setuid() return code. Here we additionally recheck
41076 + * whether NPROC limit is still exceeded.
41077 + */
41078 + if ((current->flags & PF_NPROC_EXCEEDED) &&
41079 + atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) {
41080 + retval = -EAGAIN;
41081 + goto out_ret;
41082 + }
41083 +
41084 + /* We're below the limit (still or again), so we don't want to make
41085 + * further execve() calls fail. */
41086 + current->flags &= ~PF_NPROC_EXCEEDED;
41087
41088 retval = unshare_files(&displaced);
41089 if (retval)
41090 @@ -1466,6 +1507,16 @@ static int do_execve_common(const char *
41091 bprm->filename = filename;
41092 bprm->interp = filename;
41093
41094 + if (gr_process_user_ban()) {
41095 + retval = -EPERM;
41096 + goto out_file;
41097 + }
41098 +
41099 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
41100 + retval = -EACCES;
41101 + goto out_file;
41102 + }
41103 +
41104 retval = bprm_mm_init(bprm);
41105 if (retval)
41106 goto out_file;
41107 @@ -1495,9 +1546,40 @@ static int do_execve_common(const char *
41108 if (retval < 0)
41109 goto out;
41110
41111 + if (!gr_tpe_allow(file)) {
41112 + retval = -EACCES;
41113 + goto out;
41114 + }
41115 +
41116 + if (gr_check_crash_exec(file)) {
41117 + retval = -EACCES;
41118 + goto out;
41119 + }
41120 +
41121 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
41122 +
41123 + gr_handle_exec_args(bprm, argv);
41124 +
41125 +#ifdef CONFIG_GRKERNSEC
41126 + old_acl = current->acl;
41127 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
41128 + old_exec_file = current->exec_file;
41129 + get_file(file);
41130 + current->exec_file = file;
41131 +#endif
41132 +
41133 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
41134 + bprm->unsafe & LSM_UNSAFE_SHARE);
41135 + if (retval < 0)
41136 + goto out_fail;
41137 +
41138 retval = search_binary_handler(bprm,regs);
41139 if (retval < 0)
41140 - goto out;
41141 + goto out_fail;
41142 +#ifdef CONFIG_GRKERNSEC
41143 + if (old_exec_file)
41144 + fput(old_exec_file);
41145 +#endif
41146
41147 /* execve succeeded */
41148 current->fs->in_exec = 0;
41149 @@ -1508,6 +1590,14 @@ static int do_execve_common(const char *
41150 put_files_struct(displaced);
41151 return retval;
41152
41153 +out_fail:
41154 +#ifdef CONFIG_GRKERNSEC
41155 + current->acl = old_acl;
41156 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
41157 + fput(current->exec_file);
41158 + current->exec_file = old_exec_file;
41159 +#endif
41160 +
41161 out:
41162 if (bprm->mm) {
41163 acct_arg_size(bprm, 0);
41164 @@ -1581,7 +1671,7 @@ static int expand_corename(struct core_n
41165 {
41166 char *old_corename = cn->corename;
41167
41168 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
41169 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
41170 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
41171
41172 if (!cn->corename) {
41173 @@ -1669,7 +1759,7 @@ static int format_corename(struct core_n
41174 int pid_in_pattern = 0;
41175 int err = 0;
41176
41177 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
41178 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
41179 cn->corename = kmalloc(cn->size, GFP_KERNEL);
41180 cn->used = 0;
41181
41182 @@ -1760,6 +1850,219 @@ out:
41183 return ispipe;
41184 }
41185
41186 +int pax_check_flags(unsigned long *flags)
41187 +{
41188 + int retval = 0;
41189 +
41190 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
41191 + if (*flags & MF_PAX_SEGMEXEC)
41192 + {
41193 + *flags &= ~MF_PAX_SEGMEXEC;
41194 + retval = -EINVAL;
41195 + }
41196 +#endif
41197 +
41198 + if ((*flags & MF_PAX_PAGEEXEC)
41199 +
41200 +#ifdef CONFIG_PAX_PAGEEXEC
41201 + && (*flags & MF_PAX_SEGMEXEC)
41202 +#endif
41203 +
41204 + )
41205 + {
41206 + *flags &= ~MF_PAX_PAGEEXEC;
41207 + retval = -EINVAL;
41208 + }
41209 +
41210 + if ((*flags & MF_PAX_MPROTECT)
41211 +
41212 +#ifdef CONFIG_PAX_MPROTECT
41213 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41214 +#endif
41215 +
41216 + )
41217 + {
41218 + *flags &= ~MF_PAX_MPROTECT;
41219 + retval = -EINVAL;
41220 + }
41221 +
41222 + if ((*flags & MF_PAX_EMUTRAMP)
41223 +
41224 +#ifdef CONFIG_PAX_EMUTRAMP
41225 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41226 +#endif
41227 +
41228 + )
41229 + {
41230 + *flags &= ~MF_PAX_EMUTRAMP;
41231 + retval = -EINVAL;
41232 + }
41233 +
41234 + return retval;
41235 +}
41236 +
41237 +EXPORT_SYMBOL(pax_check_flags);
41238 +
41239 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41240 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
41241 +{
41242 + struct task_struct *tsk = current;
41243 + struct mm_struct *mm = current->mm;
41244 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
41245 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
41246 + char *path_exec = NULL;
41247 + char *path_fault = NULL;
41248 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
41249 +
41250 + if (buffer_exec && buffer_fault) {
41251 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
41252 +
41253 + down_read(&mm->mmap_sem);
41254 + vma = mm->mmap;
41255 + while (vma && (!vma_exec || !vma_fault)) {
41256 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
41257 + vma_exec = vma;
41258 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
41259 + vma_fault = vma;
41260 + vma = vma->vm_next;
41261 + }
41262 + if (vma_exec) {
41263 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
41264 + if (IS_ERR(path_exec))
41265 + path_exec = "<path too long>";
41266 + else {
41267 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
41268 + if (path_exec) {
41269 + *path_exec = 0;
41270 + path_exec = buffer_exec;
41271 + } else
41272 + path_exec = "<path too long>";
41273 + }
41274 + }
41275 + if (vma_fault) {
41276 + start = vma_fault->vm_start;
41277 + end = vma_fault->vm_end;
41278 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
41279 + if (vma_fault->vm_file) {
41280 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
41281 + if (IS_ERR(path_fault))
41282 + path_fault = "<path too long>";
41283 + else {
41284 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
41285 + if (path_fault) {
41286 + *path_fault = 0;
41287 + path_fault = buffer_fault;
41288 + } else
41289 + path_fault = "<path too long>";
41290 + }
41291 + } else
41292 + path_fault = "<anonymous mapping>";
41293 + }
41294 + up_read(&mm->mmap_sem);
41295 + }
41296 + if (tsk->signal->curr_ip)
41297 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
41298 + else
41299 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
41300 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
41301 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
41302 + task_uid(tsk), task_euid(tsk), pc, sp);
41303 + free_page((unsigned long)buffer_exec);
41304 + free_page((unsigned long)buffer_fault);
41305 + pax_report_insns(pc, sp);
41306 + do_coredump(SIGKILL, SIGKILL, regs);
41307 +}
41308 +#endif
41309 +
41310 +#ifdef CONFIG_PAX_REFCOUNT
41311 +void pax_report_refcount_overflow(struct pt_regs *regs)
41312 +{
41313 + if (current->signal->curr_ip)
41314 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41315 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
41316 + else
41317 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41318 + current->comm, task_pid_nr(current), current_uid(), current_euid());
41319 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
41320 + show_regs(regs);
41321 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
41322 +}
41323 +#endif
41324 +
41325 +#ifdef CONFIG_PAX_USERCOPY
41326 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
41327 +int object_is_on_stack(const void *obj, unsigned long len)
41328 +{
41329 + const void * const stack = task_stack_page(current);
41330 + const void * const stackend = stack + THREAD_SIZE;
41331 +
41332 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41333 + const void *frame = NULL;
41334 + const void *oldframe;
41335 +#endif
41336 +
41337 + if (obj + len < obj)
41338 + return -1;
41339 +
41340 + if (obj + len <= stack || stackend <= obj)
41341 + return 0;
41342 +
41343 + if (obj < stack || stackend < obj + len)
41344 + return -1;
41345 +
41346 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41347 + oldframe = __builtin_frame_address(1);
41348 + if (oldframe)
41349 + frame = __builtin_frame_address(2);
41350 + /*
41351 + low ----------------------------------------------> high
41352 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
41353 + ^----------------^
41354 + allow copies only within here
41355 + */
41356 + while (stack <= frame && frame < stackend) {
41357 + /* if obj + len extends past the last frame, this
41358 + check won't pass and the next frame will be 0,
41359 + causing us to bail out and correctly report
41360 + the copy as invalid
41361 + */
41362 + if (obj + len <= frame)
41363 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
41364 + oldframe = frame;
41365 + frame = *(const void * const *)frame;
41366 + }
41367 + return -1;
41368 +#else
41369 + return 1;
41370 +#endif
41371 +}
41372 +
41373 +
41374 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
41375 +{
41376 + if (current->signal->curr_ip)
41377 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41378 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41379 + else
41380 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41381 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41382 + dump_stack();
41383 + gr_handle_kernel_exploit();
41384 + do_group_exit(SIGKILL);
41385 +}
41386 +#endif
41387 +
41388 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
41389 +void pax_track_stack(void)
41390 +{
41391 + unsigned long sp = (unsigned long)&sp;
41392 + if (sp < current_thread_info()->lowest_stack &&
41393 + sp > (unsigned long)task_stack_page(current))
41394 + current_thread_info()->lowest_stack = sp;
41395 +}
41396 +EXPORT_SYMBOL(pax_track_stack);
41397 +#endif
41398 +
41399 static int zap_process(struct task_struct *start, int exit_code)
41400 {
41401 struct task_struct *t;
41402 @@ -1971,17 +2274,17 @@ static void wait_for_dump_helpers(struct
41403 pipe = file->f_path.dentry->d_inode->i_pipe;
41404
41405 pipe_lock(pipe);
41406 - pipe->readers++;
41407 - pipe->writers--;
41408 + atomic_inc(&pipe->readers);
41409 + atomic_dec(&pipe->writers);
41410
41411 - while ((pipe->readers > 1) && (!signal_pending(current))) {
41412 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
41413 wake_up_interruptible_sync(&pipe->wait);
41414 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
41415 pipe_wait(pipe);
41416 }
41417
41418 - pipe->readers--;
41419 - pipe->writers++;
41420 + atomic_dec(&pipe->readers);
41421 + atomic_inc(&pipe->writers);
41422 pipe_unlock(pipe);
41423
41424 }
41425 @@ -2042,7 +2345,7 @@ void do_coredump(long signr, int exit_co
41426 int retval = 0;
41427 int flag = 0;
41428 int ispipe;
41429 - static atomic_t core_dump_count = ATOMIC_INIT(0);
41430 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
41431 struct coredump_params cprm = {
41432 .signr = signr,
41433 .regs = regs,
41434 @@ -2057,6 +2360,9 @@ void do_coredump(long signr, int exit_co
41435
41436 audit_core_dumps(signr);
41437
41438 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
41439 + gr_handle_brute_attach(current, cprm.mm_flags);
41440 +
41441 binfmt = mm->binfmt;
41442 if (!binfmt || !binfmt->core_dump)
41443 goto fail;
41444 @@ -2097,6 +2403,8 @@ void do_coredump(long signr, int exit_co
41445 goto fail_corename;
41446 }
41447
41448 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
41449 +
41450 if (ispipe) {
41451 int dump_count;
41452 char **helper_argv;
41453 @@ -2124,7 +2432,7 @@ void do_coredump(long signr, int exit_co
41454 }
41455 cprm.limit = RLIM_INFINITY;
41456
41457 - dump_count = atomic_inc_return(&core_dump_count);
41458 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
41459 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
41460 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
41461 task_tgid_vnr(current), current->comm);
41462 @@ -2194,7 +2502,7 @@ close_fail:
41463 filp_close(cprm.file, NULL);
41464 fail_dropcount:
41465 if (ispipe)
41466 - atomic_dec(&core_dump_count);
41467 + atomic_dec_unchecked(&core_dump_count);
41468 fail_unlock:
41469 kfree(cn.corename);
41470 fail_corename:
41471 @@ -2213,7 +2521,7 @@ fail:
41472 */
41473 int dump_write(struct file *file, const void *addr, int nr)
41474 {
41475 - return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
41476 + return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
41477 }
41478 EXPORT_SYMBOL(dump_write);
41479
41480 diff -urNp linux-3.0.7/fs/ext2/balloc.c linux-3.0.7/fs/ext2/balloc.c
41481 --- linux-3.0.7/fs/ext2/balloc.c 2011-07-21 22:17:23.000000000 -0400
41482 +++ linux-3.0.7/fs/ext2/balloc.c 2011-08-23 21:48:14.000000000 -0400
41483 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
41484
41485 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41486 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41487 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41488 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41489 sbi->s_resuid != current_fsuid() &&
41490 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41491 return 0;
41492 diff -urNp linux-3.0.7/fs/ext3/balloc.c linux-3.0.7/fs/ext3/balloc.c
41493 --- linux-3.0.7/fs/ext3/balloc.c 2011-07-21 22:17:23.000000000 -0400
41494 +++ linux-3.0.7/fs/ext3/balloc.c 2011-08-23 21:48:14.000000000 -0400
41495 @@ -1441,7 +1441,7 @@ static int ext3_has_free_blocks(struct e
41496
41497 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41498 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41499 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41500 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41501 sbi->s_resuid != current_fsuid() &&
41502 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41503 return 0;
41504 diff -urNp linux-3.0.7/fs/ext3/ioctl.c linux-3.0.7/fs/ext3/ioctl.c
41505 --- linux-3.0.7/fs/ext3/ioctl.c 2011-07-21 22:17:23.000000000 -0400
41506 +++ linux-3.0.7/fs/ext3/ioctl.c 2011-10-06 04:17:55.000000000 -0400
41507 @@ -285,7 +285,7 @@ group_add_out:
41508 if (!capable(CAP_SYS_ADMIN))
41509 return -EPERM;
41510
41511 - if (copy_from_user(&range, (struct fstrim_range *)arg,
41512 + if (copy_from_user(&range, (struct fstrim_range __user *)arg,
41513 sizeof(range)))
41514 return -EFAULT;
41515
41516 @@ -293,7 +293,7 @@ group_add_out:
41517 if (ret < 0)
41518 return ret;
41519
41520 - if (copy_to_user((struct fstrim_range *)arg, &range,
41521 + if (copy_to_user((struct fstrim_range __user *)arg, &range,
41522 sizeof(range)))
41523 return -EFAULT;
41524
41525 diff -urNp linux-3.0.7/fs/ext4/balloc.c linux-3.0.7/fs/ext4/balloc.c
41526 --- linux-3.0.7/fs/ext4/balloc.c 2011-07-21 22:17:23.000000000 -0400
41527 +++ linux-3.0.7/fs/ext4/balloc.c 2011-08-23 21:48:14.000000000 -0400
41528 @@ -394,8 +394,8 @@ static int ext4_has_free_blocks(struct e
41529 /* Hm, nope. Are (enough) root reserved blocks available? */
41530 if (sbi->s_resuid == current_fsuid() ||
41531 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
41532 - capable(CAP_SYS_RESOURCE) ||
41533 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
41534 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
41535 + capable_nolog(CAP_SYS_RESOURCE)) {
41536
41537 if (free_blocks >= (nblocks + dirty_blocks))
41538 return 1;
41539 diff -urNp linux-3.0.7/fs/ext4/ext4.h linux-3.0.7/fs/ext4/ext4.h
41540 --- linux-3.0.7/fs/ext4/ext4.h 2011-09-02 18:11:21.000000000 -0400
41541 +++ linux-3.0.7/fs/ext4/ext4.h 2011-08-23 21:47:56.000000000 -0400
41542 @@ -1177,19 +1177,19 @@ struct ext4_sb_info {
41543 unsigned long s_mb_last_start;
41544
41545 /* stats for buddy allocator */
41546 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
41547 - atomic_t s_bal_success; /* we found long enough chunks */
41548 - atomic_t s_bal_allocated; /* in blocks */
41549 - atomic_t s_bal_ex_scanned; /* total extents scanned */
41550 - atomic_t s_bal_goals; /* goal hits */
41551 - atomic_t s_bal_breaks; /* too long searches */
41552 - atomic_t s_bal_2orders; /* 2^order hits */
41553 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
41554 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
41555 + atomic_unchecked_t s_bal_allocated; /* in blocks */
41556 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
41557 + atomic_unchecked_t s_bal_goals; /* goal hits */
41558 + atomic_unchecked_t s_bal_breaks; /* too long searches */
41559 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
41560 spinlock_t s_bal_lock;
41561 unsigned long s_mb_buddies_generated;
41562 unsigned long long s_mb_generation_time;
41563 - atomic_t s_mb_lost_chunks;
41564 - atomic_t s_mb_preallocated;
41565 - atomic_t s_mb_discarded;
41566 + atomic_unchecked_t s_mb_lost_chunks;
41567 + atomic_unchecked_t s_mb_preallocated;
41568 + atomic_unchecked_t s_mb_discarded;
41569 atomic_t s_lock_busy;
41570
41571 /* locality groups */
41572 diff -urNp linux-3.0.7/fs/ext4/file.c linux-3.0.7/fs/ext4/file.c
41573 --- linux-3.0.7/fs/ext4/file.c 2011-07-21 22:17:23.000000000 -0400
41574 +++ linux-3.0.7/fs/ext4/file.c 2011-10-17 02:30:30.000000000 -0400
41575 @@ -181,8 +181,8 @@ static int ext4_file_open(struct inode *
41576 path.dentry = mnt->mnt_root;
41577 cp = d_path(&path, buf, sizeof(buf));
41578 if (!IS_ERR(cp)) {
41579 - memcpy(sbi->s_es->s_last_mounted, cp,
41580 - sizeof(sbi->s_es->s_last_mounted));
41581 + strlcpy(sbi->s_es->s_last_mounted, cp,
41582 + sizeof(sbi->s_es->s_last_mounted));
41583 ext4_mark_super_dirty(sb);
41584 }
41585 }
41586 diff -urNp linux-3.0.7/fs/ext4/ioctl.c linux-3.0.7/fs/ext4/ioctl.c
41587 --- linux-3.0.7/fs/ext4/ioctl.c 2011-07-21 22:17:23.000000000 -0400
41588 +++ linux-3.0.7/fs/ext4/ioctl.c 2011-10-06 04:17:55.000000000 -0400
41589 @@ -344,7 +344,7 @@ mext_out:
41590 if (!blk_queue_discard(q))
41591 return -EOPNOTSUPP;
41592
41593 - if (copy_from_user(&range, (struct fstrim_range *)arg,
41594 + if (copy_from_user(&range, (struct fstrim_range __user *)arg,
41595 sizeof(range)))
41596 return -EFAULT;
41597
41598 @@ -354,7 +354,7 @@ mext_out:
41599 if (ret < 0)
41600 return ret;
41601
41602 - if (copy_to_user((struct fstrim_range *)arg, &range,
41603 + if (copy_to_user((struct fstrim_range __user *)arg, &range,
41604 sizeof(range)))
41605 return -EFAULT;
41606
41607 diff -urNp linux-3.0.7/fs/ext4/mballoc.c linux-3.0.7/fs/ext4/mballoc.c
41608 --- linux-3.0.7/fs/ext4/mballoc.c 2011-09-02 18:11:21.000000000 -0400
41609 +++ linux-3.0.7/fs/ext4/mballoc.c 2011-08-23 21:48:14.000000000 -0400
41610 @@ -1793,7 +1793,7 @@ void ext4_mb_simple_scan_group(struct ex
41611 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
41612
41613 if (EXT4_SB(sb)->s_mb_stats)
41614 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
41615 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
41616
41617 break;
41618 }
41619 @@ -2087,7 +2087,7 @@ repeat:
41620 ac->ac_status = AC_STATUS_CONTINUE;
41621 ac->ac_flags |= EXT4_MB_HINT_FIRST;
41622 cr = 3;
41623 - atomic_inc(&sbi->s_mb_lost_chunks);
41624 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
41625 goto repeat;
41626 }
41627 }
41628 @@ -2130,6 +2130,8 @@ static int ext4_mb_seq_groups_show(struc
41629 ext4_grpblk_t counters[16];
41630 } sg;
41631
41632 + pax_track_stack();
41633 +
41634 group--;
41635 if (group == 0)
41636 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
41637 @@ -2553,25 +2555,25 @@ int ext4_mb_release(struct super_block *
41638 if (sbi->s_mb_stats) {
41639 printk(KERN_INFO
41640 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
41641 - atomic_read(&sbi->s_bal_allocated),
41642 - atomic_read(&sbi->s_bal_reqs),
41643 - atomic_read(&sbi->s_bal_success));
41644 + atomic_read_unchecked(&sbi->s_bal_allocated),
41645 + atomic_read_unchecked(&sbi->s_bal_reqs),
41646 + atomic_read_unchecked(&sbi->s_bal_success));
41647 printk(KERN_INFO
41648 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
41649 "%u 2^N hits, %u breaks, %u lost\n",
41650 - atomic_read(&sbi->s_bal_ex_scanned),
41651 - atomic_read(&sbi->s_bal_goals),
41652 - atomic_read(&sbi->s_bal_2orders),
41653 - atomic_read(&sbi->s_bal_breaks),
41654 - atomic_read(&sbi->s_mb_lost_chunks));
41655 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
41656 + atomic_read_unchecked(&sbi->s_bal_goals),
41657 + atomic_read_unchecked(&sbi->s_bal_2orders),
41658 + atomic_read_unchecked(&sbi->s_bal_breaks),
41659 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
41660 printk(KERN_INFO
41661 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
41662 sbi->s_mb_buddies_generated++,
41663 sbi->s_mb_generation_time);
41664 printk(KERN_INFO
41665 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
41666 - atomic_read(&sbi->s_mb_preallocated),
41667 - atomic_read(&sbi->s_mb_discarded));
41668 + atomic_read_unchecked(&sbi->s_mb_preallocated),
41669 + atomic_read_unchecked(&sbi->s_mb_discarded));
41670 }
41671
41672 free_percpu(sbi->s_locality_groups);
41673 @@ -3041,16 +3043,16 @@ static void ext4_mb_collect_stats(struct
41674 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
41675
41676 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
41677 - atomic_inc(&sbi->s_bal_reqs);
41678 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
41679 + atomic_inc_unchecked(&sbi->s_bal_reqs);
41680 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
41681 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
41682 - atomic_inc(&sbi->s_bal_success);
41683 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
41684 + atomic_inc_unchecked(&sbi->s_bal_success);
41685 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
41686 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
41687 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
41688 - atomic_inc(&sbi->s_bal_goals);
41689 + atomic_inc_unchecked(&sbi->s_bal_goals);
41690 if (ac->ac_found > sbi->s_mb_max_to_scan)
41691 - atomic_inc(&sbi->s_bal_breaks);
41692 + atomic_inc_unchecked(&sbi->s_bal_breaks);
41693 }
41694
41695 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
41696 @@ -3448,7 +3450,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
41697 trace_ext4_mb_new_inode_pa(ac, pa);
41698
41699 ext4_mb_use_inode_pa(ac, pa);
41700 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41701 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41702
41703 ei = EXT4_I(ac->ac_inode);
41704 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
41705 @@ -3508,7 +3510,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
41706 trace_ext4_mb_new_group_pa(ac, pa);
41707
41708 ext4_mb_use_group_pa(ac, pa);
41709 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41710 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41711
41712 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
41713 lg = ac->ac_lg;
41714 @@ -3595,7 +3597,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
41715 * from the bitmap and continue.
41716 */
41717 }
41718 - atomic_add(free, &sbi->s_mb_discarded);
41719 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
41720
41721 return err;
41722 }
41723 @@ -3613,7 +3615,7 @@ ext4_mb_release_group_pa(struct ext4_bud
41724 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
41725 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
41726 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
41727 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
41728 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
41729 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
41730
41731 return 0;
41732 diff -urNp linux-3.0.7/fs/fcntl.c linux-3.0.7/fs/fcntl.c
41733 --- linux-3.0.7/fs/fcntl.c 2011-07-21 22:17:23.000000000 -0400
41734 +++ linux-3.0.7/fs/fcntl.c 2011-10-06 04:17:55.000000000 -0400
41735 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
41736 if (err)
41737 return err;
41738
41739 + if (gr_handle_chroot_fowner(pid, type))
41740 + return -ENOENT;
41741 + if (gr_check_protected_task_fowner(pid, type))
41742 + return -EACCES;
41743 +
41744 f_modown(filp, pid, type, force);
41745 return 0;
41746 }
41747 @@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
41748
41749 static int f_setown_ex(struct file *filp, unsigned long arg)
41750 {
41751 - struct f_owner_ex * __user owner_p = (void * __user)arg;
41752 + struct f_owner_ex __user *owner_p = (void __user *)arg;
41753 struct f_owner_ex owner;
41754 struct pid *pid;
41755 int type;
41756 @@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp
41757
41758 static int f_getown_ex(struct file *filp, unsigned long arg)
41759 {
41760 - struct f_owner_ex * __user owner_p = (void * __user)arg;
41761 + struct f_owner_ex __user *owner_p = (void __user *)arg;
41762 struct f_owner_ex owner;
41763 int ret = 0;
41764
41765 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
41766 switch (cmd) {
41767 case F_DUPFD:
41768 case F_DUPFD_CLOEXEC:
41769 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
41770 if (arg >= rlimit(RLIMIT_NOFILE))
41771 break;
41772 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
41773 @@ -835,14 +841,14 @@ static int __init fcntl_init(void)
41774 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
41775 * is defined as O_NONBLOCK on some platforms and not on others.
41776 */
41777 - BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
41778 + BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
41779 O_RDONLY | O_WRONLY | O_RDWR |
41780 O_CREAT | O_EXCL | O_NOCTTY |
41781 O_TRUNC | O_APPEND | /* O_NONBLOCK | */
41782 __O_SYNC | O_DSYNC | FASYNC |
41783 O_DIRECT | O_LARGEFILE | O_DIRECTORY |
41784 O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
41785 - __FMODE_EXEC | O_PATH
41786 + __FMODE_EXEC | O_PATH | FMODE_GREXEC
41787 ));
41788
41789 fasync_cache = kmem_cache_create("fasync_cache",
41790 diff -urNp linux-3.0.7/fs/fifo.c linux-3.0.7/fs/fifo.c
41791 --- linux-3.0.7/fs/fifo.c 2011-07-21 22:17:23.000000000 -0400
41792 +++ linux-3.0.7/fs/fifo.c 2011-08-23 21:47:56.000000000 -0400
41793 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
41794 */
41795 filp->f_op = &read_pipefifo_fops;
41796 pipe->r_counter++;
41797 - if (pipe->readers++ == 0)
41798 + if (atomic_inc_return(&pipe->readers) == 1)
41799 wake_up_partner(inode);
41800
41801 - if (!pipe->writers) {
41802 + if (!atomic_read(&pipe->writers)) {
41803 if ((filp->f_flags & O_NONBLOCK)) {
41804 /* suppress POLLHUP until we have
41805 * seen a writer */
41806 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
41807 * errno=ENXIO when there is no process reading the FIFO.
41808 */
41809 ret = -ENXIO;
41810 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
41811 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
41812 goto err;
41813
41814 filp->f_op = &write_pipefifo_fops;
41815 pipe->w_counter++;
41816 - if (!pipe->writers++)
41817 + if (atomic_inc_return(&pipe->writers) == 1)
41818 wake_up_partner(inode);
41819
41820 - if (!pipe->readers) {
41821 + if (!atomic_read(&pipe->readers)) {
41822 wait_for_partner(inode, &pipe->r_counter);
41823 if (signal_pending(current))
41824 goto err_wr;
41825 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
41826 */
41827 filp->f_op = &rdwr_pipefifo_fops;
41828
41829 - pipe->readers++;
41830 - pipe->writers++;
41831 + atomic_inc(&pipe->readers);
41832 + atomic_inc(&pipe->writers);
41833 pipe->r_counter++;
41834 pipe->w_counter++;
41835 - if (pipe->readers == 1 || pipe->writers == 1)
41836 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
41837 wake_up_partner(inode);
41838 break;
41839
41840 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
41841 return 0;
41842
41843 err_rd:
41844 - if (!--pipe->readers)
41845 + if (atomic_dec_and_test(&pipe->readers))
41846 wake_up_interruptible(&pipe->wait);
41847 ret = -ERESTARTSYS;
41848 goto err;
41849
41850 err_wr:
41851 - if (!--pipe->writers)
41852 + if (atomic_dec_and_test(&pipe->writers))
41853 wake_up_interruptible(&pipe->wait);
41854 ret = -ERESTARTSYS;
41855 goto err;
41856
41857 err:
41858 - if (!pipe->readers && !pipe->writers)
41859 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
41860 free_pipe_info(inode);
41861
41862 err_nocleanup:
41863 diff -urNp linux-3.0.7/fs/file.c linux-3.0.7/fs/file.c
41864 --- linux-3.0.7/fs/file.c 2011-07-21 22:17:23.000000000 -0400
41865 +++ linux-3.0.7/fs/file.c 2011-08-23 21:48:14.000000000 -0400
41866 @@ -15,6 +15,7 @@
41867 #include <linux/slab.h>
41868 #include <linux/vmalloc.h>
41869 #include <linux/file.h>
41870 +#include <linux/security.h>
41871 #include <linux/fdtable.h>
41872 #include <linux/bitops.h>
41873 #include <linux/interrupt.h>
41874 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
41875 * N.B. For clone tasks sharing a files structure, this test
41876 * will limit the total number of files that can be opened.
41877 */
41878 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
41879 if (nr >= rlimit(RLIMIT_NOFILE))
41880 return -EMFILE;
41881
41882 diff -urNp linux-3.0.7/fs/filesystems.c linux-3.0.7/fs/filesystems.c
41883 --- linux-3.0.7/fs/filesystems.c 2011-07-21 22:17:23.000000000 -0400
41884 +++ linux-3.0.7/fs/filesystems.c 2011-08-23 21:48:14.000000000 -0400
41885 @@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
41886 int len = dot ? dot - name : strlen(name);
41887
41888 fs = __get_fs_type(name, len);
41889 +
41890 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
41891 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
41892 +#else
41893 if (!fs && (request_module("%.*s", len, name) == 0))
41894 +#endif
41895 fs = __get_fs_type(name, len);
41896
41897 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
41898 diff -urNp linux-3.0.7/fs/fscache/cookie.c linux-3.0.7/fs/fscache/cookie.c
41899 --- linux-3.0.7/fs/fscache/cookie.c 2011-07-21 22:17:23.000000000 -0400
41900 +++ linux-3.0.7/fs/fscache/cookie.c 2011-08-23 21:47:56.000000000 -0400
41901 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
41902 parent ? (char *) parent->def->name : "<no-parent>",
41903 def->name, netfs_data);
41904
41905 - fscache_stat(&fscache_n_acquires);
41906 + fscache_stat_unchecked(&fscache_n_acquires);
41907
41908 /* if there's no parent cookie, then we don't create one here either */
41909 if (!parent) {
41910 - fscache_stat(&fscache_n_acquires_null);
41911 + fscache_stat_unchecked(&fscache_n_acquires_null);
41912 _leave(" [no parent]");
41913 return NULL;
41914 }
41915 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
41916 /* allocate and initialise a cookie */
41917 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
41918 if (!cookie) {
41919 - fscache_stat(&fscache_n_acquires_oom);
41920 + fscache_stat_unchecked(&fscache_n_acquires_oom);
41921 _leave(" [ENOMEM]");
41922 return NULL;
41923 }
41924 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
41925
41926 switch (cookie->def->type) {
41927 case FSCACHE_COOKIE_TYPE_INDEX:
41928 - fscache_stat(&fscache_n_cookie_index);
41929 + fscache_stat_unchecked(&fscache_n_cookie_index);
41930 break;
41931 case FSCACHE_COOKIE_TYPE_DATAFILE:
41932 - fscache_stat(&fscache_n_cookie_data);
41933 + fscache_stat_unchecked(&fscache_n_cookie_data);
41934 break;
41935 default:
41936 - fscache_stat(&fscache_n_cookie_special);
41937 + fscache_stat_unchecked(&fscache_n_cookie_special);
41938 break;
41939 }
41940
41941 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
41942 if (fscache_acquire_non_index_cookie(cookie) < 0) {
41943 atomic_dec(&parent->n_children);
41944 __fscache_cookie_put(cookie);
41945 - fscache_stat(&fscache_n_acquires_nobufs);
41946 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
41947 _leave(" = NULL");
41948 return NULL;
41949 }
41950 }
41951
41952 - fscache_stat(&fscache_n_acquires_ok);
41953 + fscache_stat_unchecked(&fscache_n_acquires_ok);
41954 _leave(" = %p", cookie);
41955 return cookie;
41956 }
41957 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
41958 cache = fscache_select_cache_for_object(cookie->parent);
41959 if (!cache) {
41960 up_read(&fscache_addremove_sem);
41961 - fscache_stat(&fscache_n_acquires_no_cache);
41962 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
41963 _leave(" = -ENOMEDIUM [no cache]");
41964 return -ENOMEDIUM;
41965 }
41966 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
41967 object = cache->ops->alloc_object(cache, cookie);
41968 fscache_stat_d(&fscache_n_cop_alloc_object);
41969 if (IS_ERR(object)) {
41970 - fscache_stat(&fscache_n_object_no_alloc);
41971 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
41972 ret = PTR_ERR(object);
41973 goto error;
41974 }
41975
41976 - fscache_stat(&fscache_n_object_alloc);
41977 + fscache_stat_unchecked(&fscache_n_object_alloc);
41978
41979 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
41980
41981 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
41982 struct fscache_object *object;
41983 struct hlist_node *_p;
41984
41985 - fscache_stat(&fscache_n_updates);
41986 + fscache_stat_unchecked(&fscache_n_updates);
41987
41988 if (!cookie) {
41989 - fscache_stat(&fscache_n_updates_null);
41990 + fscache_stat_unchecked(&fscache_n_updates_null);
41991 _leave(" [no cookie]");
41992 return;
41993 }
41994 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
41995 struct fscache_object *object;
41996 unsigned long event;
41997
41998 - fscache_stat(&fscache_n_relinquishes);
41999 + fscache_stat_unchecked(&fscache_n_relinquishes);
42000 if (retire)
42001 - fscache_stat(&fscache_n_relinquishes_retire);
42002 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
42003
42004 if (!cookie) {
42005 - fscache_stat(&fscache_n_relinquishes_null);
42006 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
42007 _leave(" [no cookie]");
42008 return;
42009 }
42010 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
42011
42012 /* wait for the cookie to finish being instantiated (or to fail) */
42013 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
42014 - fscache_stat(&fscache_n_relinquishes_waitcrt);
42015 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
42016 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
42017 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
42018 }
42019 diff -urNp linux-3.0.7/fs/fscache/internal.h linux-3.0.7/fs/fscache/internal.h
42020 --- linux-3.0.7/fs/fscache/internal.h 2011-07-21 22:17:23.000000000 -0400
42021 +++ linux-3.0.7/fs/fscache/internal.h 2011-08-23 21:47:56.000000000 -0400
42022 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
42023 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
42024 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
42025
42026 -extern atomic_t fscache_n_op_pend;
42027 -extern atomic_t fscache_n_op_run;
42028 -extern atomic_t fscache_n_op_enqueue;
42029 -extern atomic_t fscache_n_op_deferred_release;
42030 -extern atomic_t fscache_n_op_release;
42031 -extern atomic_t fscache_n_op_gc;
42032 -extern atomic_t fscache_n_op_cancelled;
42033 -extern atomic_t fscache_n_op_rejected;
42034 -
42035 -extern atomic_t fscache_n_attr_changed;
42036 -extern atomic_t fscache_n_attr_changed_ok;
42037 -extern atomic_t fscache_n_attr_changed_nobufs;
42038 -extern atomic_t fscache_n_attr_changed_nomem;
42039 -extern atomic_t fscache_n_attr_changed_calls;
42040 -
42041 -extern atomic_t fscache_n_allocs;
42042 -extern atomic_t fscache_n_allocs_ok;
42043 -extern atomic_t fscache_n_allocs_wait;
42044 -extern atomic_t fscache_n_allocs_nobufs;
42045 -extern atomic_t fscache_n_allocs_intr;
42046 -extern atomic_t fscache_n_allocs_object_dead;
42047 -extern atomic_t fscache_n_alloc_ops;
42048 -extern atomic_t fscache_n_alloc_op_waits;
42049 -
42050 -extern atomic_t fscache_n_retrievals;
42051 -extern atomic_t fscache_n_retrievals_ok;
42052 -extern atomic_t fscache_n_retrievals_wait;
42053 -extern atomic_t fscache_n_retrievals_nodata;
42054 -extern atomic_t fscache_n_retrievals_nobufs;
42055 -extern atomic_t fscache_n_retrievals_intr;
42056 -extern atomic_t fscache_n_retrievals_nomem;
42057 -extern atomic_t fscache_n_retrievals_object_dead;
42058 -extern atomic_t fscache_n_retrieval_ops;
42059 -extern atomic_t fscache_n_retrieval_op_waits;
42060 -
42061 -extern atomic_t fscache_n_stores;
42062 -extern atomic_t fscache_n_stores_ok;
42063 -extern atomic_t fscache_n_stores_again;
42064 -extern atomic_t fscache_n_stores_nobufs;
42065 -extern atomic_t fscache_n_stores_oom;
42066 -extern atomic_t fscache_n_store_ops;
42067 -extern atomic_t fscache_n_store_calls;
42068 -extern atomic_t fscache_n_store_pages;
42069 -extern atomic_t fscache_n_store_radix_deletes;
42070 -extern atomic_t fscache_n_store_pages_over_limit;
42071 -
42072 -extern atomic_t fscache_n_store_vmscan_not_storing;
42073 -extern atomic_t fscache_n_store_vmscan_gone;
42074 -extern atomic_t fscache_n_store_vmscan_busy;
42075 -extern atomic_t fscache_n_store_vmscan_cancelled;
42076 -
42077 -extern atomic_t fscache_n_marks;
42078 -extern atomic_t fscache_n_uncaches;
42079 -
42080 -extern atomic_t fscache_n_acquires;
42081 -extern atomic_t fscache_n_acquires_null;
42082 -extern atomic_t fscache_n_acquires_no_cache;
42083 -extern atomic_t fscache_n_acquires_ok;
42084 -extern atomic_t fscache_n_acquires_nobufs;
42085 -extern atomic_t fscache_n_acquires_oom;
42086 -
42087 -extern atomic_t fscache_n_updates;
42088 -extern atomic_t fscache_n_updates_null;
42089 -extern atomic_t fscache_n_updates_run;
42090 -
42091 -extern atomic_t fscache_n_relinquishes;
42092 -extern atomic_t fscache_n_relinquishes_null;
42093 -extern atomic_t fscache_n_relinquishes_waitcrt;
42094 -extern atomic_t fscache_n_relinquishes_retire;
42095 -
42096 -extern atomic_t fscache_n_cookie_index;
42097 -extern atomic_t fscache_n_cookie_data;
42098 -extern atomic_t fscache_n_cookie_special;
42099 -
42100 -extern atomic_t fscache_n_object_alloc;
42101 -extern atomic_t fscache_n_object_no_alloc;
42102 -extern atomic_t fscache_n_object_lookups;
42103 -extern atomic_t fscache_n_object_lookups_negative;
42104 -extern atomic_t fscache_n_object_lookups_positive;
42105 -extern atomic_t fscache_n_object_lookups_timed_out;
42106 -extern atomic_t fscache_n_object_created;
42107 -extern atomic_t fscache_n_object_avail;
42108 -extern atomic_t fscache_n_object_dead;
42109 -
42110 -extern atomic_t fscache_n_checkaux_none;
42111 -extern atomic_t fscache_n_checkaux_okay;
42112 -extern atomic_t fscache_n_checkaux_update;
42113 -extern atomic_t fscache_n_checkaux_obsolete;
42114 +extern atomic_unchecked_t fscache_n_op_pend;
42115 +extern atomic_unchecked_t fscache_n_op_run;
42116 +extern atomic_unchecked_t fscache_n_op_enqueue;
42117 +extern atomic_unchecked_t fscache_n_op_deferred_release;
42118 +extern atomic_unchecked_t fscache_n_op_release;
42119 +extern atomic_unchecked_t fscache_n_op_gc;
42120 +extern atomic_unchecked_t fscache_n_op_cancelled;
42121 +extern atomic_unchecked_t fscache_n_op_rejected;
42122 +
42123 +extern atomic_unchecked_t fscache_n_attr_changed;
42124 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
42125 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
42126 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
42127 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
42128 +
42129 +extern atomic_unchecked_t fscache_n_allocs;
42130 +extern atomic_unchecked_t fscache_n_allocs_ok;
42131 +extern atomic_unchecked_t fscache_n_allocs_wait;
42132 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
42133 +extern atomic_unchecked_t fscache_n_allocs_intr;
42134 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
42135 +extern atomic_unchecked_t fscache_n_alloc_ops;
42136 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
42137 +
42138 +extern atomic_unchecked_t fscache_n_retrievals;
42139 +extern atomic_unchecked_t fscache_n_retrievals_ok;
42140 +extern atomic_unchecked_t fscache_n_retrievals_wait;
42141 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
42142 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
42143 +extern atomic_unchecked_t fscache_n_retrievals_intr;
42144 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
42145 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
42146 +extern atomic_unchecked_t fscache_n_retrieval_ops;
42147 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
42148 +
42149 +extern atomic_unchecked_t fscache_n_stores;
42150 +extern atomic_unchecked_t fscache_n_stores_ok;
42151 +extern atomic_unchecked_t fscache_n_stores_again;
42152 +extern atomic_unchecked_t fscache_n_stores_nobufs;
42153 +extern atomic_unchecked_t fscache_n_stores_oom;
42154 +extern atomic_unchecked_t fscache_n_store_ops;
42155 +extern atomic_unchecked_t fscache_n_store_calls;
42156 +extern atomic_unchecked_t fscache_n_store_pages;
42157 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
42158 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
42159 +
42160 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42161 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
42162 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
42163 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42164 +
42165 +extern atomic_unchecked_t fscache_n_marks;
42166 +extern atomic_unchecked_t fscache_n_uncaches;
42167 +
42168 +extern atomic_unchecked_t fscache_n_acquires;
42169 +extern atomic_unchecked_t fscache_n_acquires_null;
42170 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
42171 +extern atomic_unchecked_t fscache_n_acquires_ok;
42172 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
42173 +extern atomic_unchecked_t fscache_n_acquires_oom;
42174 +
42175 +extern atomic_unchecked_t fscache_n_updates;
42176 +extern atomic_unchecked_t fscache_n_updates_null;
42177 +extern atomic_unchecked_t fscache_n_updates_run;
42178 +
42179 +extern atomic_unchecked_t fscache_n_relinquishes;
42180 +extern atomic_unchecked_t fscache_n_relinquishes_null;
42181 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
42182 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
42183 +
42184 +extern atomic_unchecked_t fscache_n_cookie_index;
42185 +extern atomic_unchecked_t fscache_n_cookie_data;
42186 +extern atomic_unchecked_t fscache_n_cookie_special;
42187 +
42188 +extern atomic_unchecked_t fscache_n_object_alloc;
42189 +extern atomic_unchecked_t fscache_n_object_no_alloc;
42190 +extern atomic_unchecked_t fscache_n_object_lookups;
42191 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
42192 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
42193 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
42194 +extern atomic_unchecked_t fscache_n_object_created;
42195 +extern atomic_unchecked_t fscache_n_object_avail;
42196 +extern atomic_unchecked_t fscache_n_object_dead;
42197 +
42198 +extern atomic_unchecked_t fscache_n_checkaux_none;
42199 +extern atomic_unchecked_t fscache_n_checkaux_okay;
42200 +extern atomic_unchecked_t fscache_n_checkaux_update;
42201 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
42202
42203 extern atomic_t fscache_n_cop_alloc_object;
42204 extern atomic_t fscache_n_cop_lookup_object;
42205 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
42206 atomic_inc(stat);
42207 }
42208
42209 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
42210 +{
42211 + atomic_inc_unchecked(stat);
42212 +}
42213 +
42214 static inline void fscache_stat_d(atomic_t *stat)
42215 {
42216 atomic_dec(stat);
42217 @@ -267,6 +272,7 @@ extern const struct file_operations fsca
42218
42219 #define __fscache_stat(stat) (NULL)
42220 #define fscache_stat(stat) do {} while (0)
42221 +#define fscache_stat_unchecked(stat) do {} while (0)
42222 #define fscache_stat_d(stat) do {} while (0)
42223 #endif
42224
42225 diff -urNp linux-3.0.7/fs/fscache/object.c linux-3.0.7/fs/fscache/object.c
42226 --- linux-3.0.7/fs/fscache/object.c 2011-07-21 22:17:23.000000000 -0400
42227 +++ linux-3.0.7/fs/fscache/object.c 2011-08-23 21:47:56.000000000 -0400
42228 @@ -128,7 +128,7 @@ static void fscache_object_state_machine
42229 /* update the object metadata on disk */
42230 case FSCACHE_OBJECT_UPDATING:
42231 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
42232 - fscache_stat(&fscache_n_updates_run);
42233 + fscache_stat_unchecked(&fscache_n_updates_run);
42234 fscache_stat(&fscache_n_cop_update_object);
42235 object->cache->ops->update_object(object);
42236 fscache_stat_d(&fscache_n_cop_update_object);
42237 @@ -217,7 +217,7 @@ static void fscache_object_state_machine
42238 spin_lock(&object->lock);
42239 object->state = FSCACHE_OBJECT_DEAD;
42240 spin_unlock(&object->lock);
42241 - fscache_stat(&fscache_n_object_dead);
42242 + fscache_stat_unchecked(&fscache_n_object_dead);
42243 goto terminal_transit;
42244
42245 /* handle the parent cache of this object being withdrawn from
42246 @@ -232,7 +232,7 @@ static void fscache_object_state_machine
42247 spin_lock(&object->lock);
42248 object->state = FSCACHE_OBJECT_DEAD;
42249 spin_unlock(&object->lock);
42250 - fscache_stat(&fscache_n_object_dead);
42251 + fscache_stat_unchecked(&fscache_n_object_dead);
42252 goto terminal_transit;
42253
42254 /* complain about the object being woken up once it is
42255 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
42256 parent->cookie->def->name, cookie->def->name,
42257 object->cache->tag->name);
42258
42259 - fscache_stat(&fscache_n_object_lookups);
42260 + fscache_stat_unchecked(&fscache_n_object_lookups);
42261 fscache_stat(&fscache_n_cop_lookup_object);
42262 ret = object->cache->ops->lookup_object(object);
42263 fscache_stat_d(&fscache_n_cop_lookup_object);
42264 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
42265 if (ret == -ETIMEDOUT) {
42266 /* probably stuck behind another object, so move this one to
42267 * the back of the queue */
42268 - fscache_stat(&fscache_n_object_lookups_timed_out);
42269 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
42270 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42271 }
42272
42273 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
42274
42275 spin_lock(&object->lock);
42276 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42277 - fscache_stat(&fscache_n_object_lookups_negative);
42278 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
42279
42280 /* transit here to allow write requests to begin stacking up
42281 * and read requests to begin returning ENODATA */
42282 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
42283 * result, in which case there may be data available */
42284 spin_lock(&object->lock);
42285 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42286 - fscache_stat(&fscache_n_object_lookups_positive);
42287 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
42288
42289 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
42290
42291 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
42292 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42293 } else {
42294 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
42295 - fscache_stat(&fscache_n_object_created);
42296 + fscache_stat_unchecked(&fscache_n_object_created);
42297
42298 object->state = FSCACHE_OBJECT_AVAILABLE;
42299 spin_unlock(&object->lock);
42300 @@ -602,7 +602,7 @@ static void fscache_object_available(str
42301 fscache_enqueue_dependents(object);
42302
42303 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
42304 - fscache_stat(&fscache_n_object_avail);
42305 + fscache_stat_unchecked(&fscache_n_object_avail);
42306
42307 _leave("");
42308 }
42309 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
42310 enum fscache_checkaux result;
42311
42312 if (!object->cookie->def->check_aux) {
42313 - fscache_stat(&fscache_n_checkaux_none);
42314 + fscache_stat_unchecked(&fscache_n_checkaux_none);
42315 return FSCACHE_CHECKAUX_OKAY;
42316 }
42317
42318 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
42319 switch (result) {
42320 /* entry okay as is */
42321 case FSCACHE_CHECKAUX_OKAY:
42322 - fscache_stat(&fscache_n_checkaux_okay);
42323 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
42324 break;
42325
42326 /* entry requires update */
42327 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
42328 - fscache_stat(&fscache_n_checkaux_update);
42329 + fscache_stat_unchecked(&fscache_n_checkaux_update);
42330 break;
42331
42332 /* entry requires deletion */
42333 case FSCACHE_CHECKAUX_OBSOLETE:
42334 - fscache_stat(&fscache_n_checkaux_obsolete);
42335 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
42336 break;
42337
42338 default:
42339 diff -urNp linux-3.0.7/fs/fscache/operation.c linux-3.0.7/fs/fscache/operation.c
42340 --- linux-3.0.7/fs/fscache/operation.c 2011-07-21 22:17:23.000000000 -0400
42341 +++ linux-3.0.7/fs/fscache/operation.c 2011-08-23 21:47:56.000000000 -0400
42342 @@ -17,7 +17,7 @@
42343 #include <linux/slab.h>
42344 #include "internal.h"
42345
42346 -atomic_t fscache_op_debug_id;
42347 +atomic_unchecked_t fscache_op_debug_id;
42348 EXPORT_SYMBOL(fscache_op_debug_id);
42349
42350 /**
42351 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fs
42352 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
42353 ASSERTCMP(atomic_read(&op->usage), >, 0);
42354
42355 - fscache_stat(&fscache_n_op_enqueue);
42356 + fscache_stat_unchecked(&fscache_n_op_enqueue);
42357 switch (op->flags & FSCACHE_OP_TYPE) {
42358 case FSCACHE_OP_ASYNC:
42359 _debug("queue async");
42360 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscach
42361 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
42362 if (op->processor)
42363 fscache_enqueue_operation(op);
42364 - fscache_stat(&fscache_n_op_run);
42365 + fscache_stat_unchecked(&fscache_n_op_run);
42366 }
42367
42368 /*
42369 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct f
42370 if (object->n_ops > 1) {
42371 atomic_inc(&op->usage);
42372 list_add_tail(&op->pend_link, &object->pending_ops);
42373 - fscache_stat(&fscache_n_op_pend);
42374 + fscache_stat_unchecked(&fscache_n_op_pend);
42375 } else if (!list_empty(&object->pending_ops)) {
42376 atomic_inc(&op->usage);
42377 list_add_tail(&op->pend_link, &object->pending_ops);
42378 - fscache_stat(&fscache_n_op_pend);
42379 + fscache_stat_unchecked(&fscache_n_op_pend);
42380 fscache_start_operations(object);
42381 } else {
42382 ASSERTCMP(object->n_in_progress, ==, 0);
42383 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct f
42384 object->n_exclusive++; /* reads and writes must wait */
42385 atomic_inc(&op->usage);
42386 list_add_tail(&op->pend_link, &object->pending_ops);
42387 - fscache_stat(&fscache_n_op_pend);
42388 + fscache_stat_unchecked(&fscache_n_op_pend);
42389 ret = 0;
42390 } else {
42391 /* not allowed to submit ops in any other state */
42392 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_obj
42393 if (object->n_exclusive > 0) {
42394 atomic_inc(&op->usage);
42395 list_add_tail(&op->pend_link, &object->pending_ops);
42396 - fscache_stat(&fscache_n_op_pend);
42397 + fscache_stat_unchecked(&fscache_n_op_pend);
42398 } else if (!list_empty(&object->pending_ops)) {
42399 atomic_inc(&op->usage);
42400 list_add_tail(&op->pend_link, &object->pending_ops);
42401 - fscache_stat(&fscache_n_op_pend);
42402 + fscache_stat_unchecked(&fscache_n_op_pend);
42403 fscache_start_operations(object);
42404 } else {
42405 ASSERTCMP(object->n_exclusive, ==, 0);
42406 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_obj
42407 object->n_ops++;
42408 atomic_inc(&op->usage);
42409 list_add_tail(&op->pend_link, &object->pending_ops);
42410 - fscache_stat(&fscache_n_op_pend);
42411 + fscache_stat_unchecked(&fscache_n_op_pend);
42412 ret = 0;
42413 } else if (object->state == FSCACHE_OBJECT_DYING ||
42414 object->state == FSCACHE_OBJECT_LC_DYING ||
42415 object->state == FSCACHE_OBJECT_WITHDRAWING) {
42416 - fscache_stat(&fscache_n_op_rejected);
42417 + fscache_stat_unchecked(&fscache_n_op_rejected);
42418 ret = -ENOBUFS;
42419 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
42420 fscache_report_unexpected_submission(object, op, ostate);
42421 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_ope
42422
42423 ret = -EBUSY;
42424 if (!list_empty(&op->pend_link)) {
42425 - fscache_stat(&fscache_n_op_cancelled);
42426 + fscache_stat_unchecked(&fscache_n_op_cancelled);
42427 list_del_init(&op->pend_link);
42428 object->n_ops--;
42429 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
42430 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscach
42431 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
42432 BUG();
42433
42434 - fscache_stat(&fscache_n_op_release);
42435 + fscache_stat_unchecked(&fscache_n_op_release);
42436
42437 if (op->release) {
42438 op->release(op);
42439 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscach
42440 * lock, and defer it otherwise */
42441 if (!spin_trylock(&object->lock)) {
42442 _debug("defer put");
42443 - fscache_stat(&fscache_n_op_deferred_release);
42444 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
42445
42446 cache = object->cache;
42447 spin_lock(&cache->op_gc_list_lock);
42448 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_st
42449
42450 _debug("GC DEFERRED REL OBJ%x OP%x",
42451 object->debug_id, op->debug_id);
42452 - fscache_stat(&fscache_n_op_gc);
42453 + fscache_stat_unchecked(&fscache_n_op_gc);
42454
42455 ASSERTCMP(atomic_read(&op->usage), ==, 0);
42456
42457 diff -urNp linux-3.0.7/fs/fscache/page.c linux-3.0.7/fs/fscache/page.c
42458 --- linux-3.0.7/fs/fscache/page.c 2011-07-21 22:17:23.000000000 -0400
42459 +++ linux-3.0.7/fs/fscache/page.c 2011-08-23 21:47:56.000000000 -0400
42460 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
42461 val = radix_tree_lookup(&cookie->stores, page->index);
42462 if (!val) {
42463 rcu_read_unlock();
42464 - fscache_stat(&fscache_n_store_vmscan_not_storing);
42465 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
42466 __fscache_uncache_page(cookie, page);
42467 return true;
42468 }
42469 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
42470 spin_unlock(&cookie->stores_lock);
42471
42472 if (xpage) {
42473 - fscache_stat(&fscache_n_store_vmscan_cancelled);
42474 - fscache_stat(&fscache_n_store_radix_deletes);
42475 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
42476 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42477 ASSERTCMP(xpage, ==, page);
42478 } else {
42479 - fscache_stat(&fscache_n_store_vmscan_gone);
42480 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
42481 }
42482
42483 wake_up_bit(&cookie->flags, 0);
42484 @@ -107,7 +107,7 @@ page_busy:
42485 /* we might want to wait here, but that could deadlock the allocator as
42486 * the work threads writing to the cache may all end up sleeping
42487 * on memory allocation */
42488 - fscache_stat(&fscache_n_store_vmscan_busy);
42489 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
42490 return false;
42491 }
42492 EXPORT_SYMBOL(__fscache_maybe_release_page);
42493 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
42494 FSCACHE_COOKIE_STORING_TAG);
42495 if (!radix_tree_tag_get(&cookie->stores, page->index,
42496 FSCACHE_COOKIE_PENDING_TAG)) {
42497 - fscache_stat(&fscache_n_store_radix_deletes);
42498 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42499 xpage = radix_tree_delete(&cookie->stores, page->index);
42500 }
42501 spin_unlock(&cookie->stores_lock);
42502 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
42503
42504 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
42505
42506 - fscache_stat(&fscache_n_attr_changed_calls);
42507 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
42508
42509 if (fscache_object_is_active(object)) {
42510 fscache_stat(&fscache_n_cop_attr_changed);
42511 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscach
42512
42513 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42514
42515 - fscache_stat(&fscache_n_attr_changed);
42516 + fscache_stat_unchecked(&fscache_n_attr_changed);
42517
42518 op = kzalloc(sizeof(*op), GFP_KERNEL);
42519 if (!op) {
42520 - fscache_stat(&fscache_n_attr_changed_nomem);
42521 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
42522 _leave(" = -ENOMEM");
42523 return -ENOMEM;
42524 }
42525 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscach
42526 if (fscache_submit_exclusive_op(object, op) < 0)
42527 goto nobufs;
42528 spin_unlock(&cookie->lock);
42529 - fscache_stat(&fscache_n_attr_changed_ok);
42530 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
42531 fscache_put_operation(op);
42532 _leave(" = 0");
42533 return 0;
42534 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscach
42535 nobufs:
42536 spin_unlock(&cookie->lock);
42537 kfree(op);
42538 - fscache_stat(&fscache_n_attr_changed_nobufs);
42539 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
42540 _leave(" = %d", -ENOBUFS);
42541 return -ENOBUFS;
42542 }
42543 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache
42544 /* allocate a retrieval operation and attempt to submit it */
42545 op = kzalloc(sizeof(*op), GFP_NOIO);
42546 if (!op) {
42547 - fscache_stat(&fscache_n_retrievals_nomem);
42548 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42549 return NULL;
42550 }
42551
42552 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_loo
42553 return 0;
42554 }
42555
42556 - fscache_stat(&fscache_n_retrievals_wait);
42557 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
42558
42559 jif = jiffies;
42560 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
42561 fscache_wait_bit_interruptible,
42562 TASK_INTERRUPTIBLE) != 0) {
42563 - fscache_stat(&fscache_n_retrievals_intr);
42564 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
42565 _leave(" = -ERESTARTSYS");
42566 return -ERESTARTSYS;
42567 }
42568 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_loo
42569 */
42570 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
42571 struct fscache_retrieval *op,
42572 - atomic_t *stat_op_waits,
42573 - atomic_t *stat_object_dead)
42574 + atomic_unchecked_t *stat_op_waits,
42575 + atomic_unchecked_t *stat_object_dead)
42576 {
42577 int ret;
42578
42579 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_ac
42580 goto check_if_dead;
42581
42582 _debug(">>> WT");
42583 - fscache_stat(stat_op_waits);
42584 + fscache_stat_unchecked(stat_op_waits);
42585 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
42586 fscache_wait_bit_interruptible,
42587 TASK_INTERRUPTIBLE) < 0) {
42588 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_ac
42589
42590 check_if_dead:
42591 if (unlikely(fscache_object_is_dead(object))) {
42592 - fscache_stat(stat_object_dead);
42593 + fscache_stat_unchecked(stat_object_dead);
42594 return -ENOBUFS;
42595 }
42596 return 0;
42597 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct
42598
42599 _enter("%p,%p,,,", cookie, page);
42600
42601 - fscache_stat(&fscache_n_retrievals);
42602 + fscache_stat_unchecked(&fscache_n_retrievals);
42603
42604 if (hlist_empty(&cookie->backing_objects))
42605 goto nobufs;
42606 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct
42607 goto nobufs_unlock;
42608 spin_unlock(&cookie->lock);
42609
42610 - fscache_stat(&fscache_n_retrieval_ops);
42611 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
42612
42613 /* pin the netfs read context in case we need to do the actual netfs
42614 * read because we've encountered a cache read failure */
42615 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct
42616
42617 error:
42618 if (ret == -ENOMEM)
42619 - fscache_stat(&fscache_n_retrievals_nomem);
42620 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42621 else if (ret == -ERESTARTSYS)
42622 - fscache_stat(&fscache_n_retrievals_intr);
42623 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
42624 else if (ret == -ENODATA)
42625 - fscache_stat(&fscache_n_retrievals_nodata);
42626 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42627 else if (ret < 0)
42628 - fscache_stat(&fscache_n_retrievals_nobufs);
42629 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42630 else
42631 - fscache_stat(&fscache_n_retrievals_ok);
42632 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
42633
42634 fscache_put_retrieval(op);
42635 _leave(" = %d", ret);
42636 @@ -429,7 +429,7 @@ nobufs_unlock:
42637 spin_unlock(&cookie->lock);
42638 kfree(op);
42639 nobufs:
42640 - fscache_stat(&fscache_n_retrievals_nobufs);
42641 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42642 _leave(" = -ENOBUFS");
42643 return -ENOBUFS;
42644 }
42645 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct
42646
42647 _enter("%p,,%d,,,", cookie, *nr_pages);
42648
42649 - fscache_stat(&fscache_n_retrievals);
42650 + fscache_stat_unchecked(&fscache_n_retrievals);
42651
42652 if (hlist_empty(&cookie->backing_objects))
42653 goto nobufs;
42654 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct
42655 goto nobufs_unlock;
42656 spin_unlock(&cookie->lock);
42657
42658 - fscache_stat(&fscache_n_retrieval_ops);
42659 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
42660
42661 /* pin the netfs read context in case we need to do the actual netfs
42662 * read because we've encountered a cache read failure */
42663 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct
42664
42665 error:
42666 if (ret == -ENOMEM)
42667 - fscache_stat(&fscache_n_retrievals_nomem);
42668 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42669 else if (ret == -ERESTARTSYS)
42670 - fscache_stat(&fscache_n_retrievals_intr);
42671 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
42672 else if (ret == -ENODATA)
42673 - fscache_stat(&fscache_n_retrievals_nodata);
42674 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42675 else if (ret < 0)
42676 - fscache_stat(&fscache_n_retrievals_nobufs);
42677 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42678 else
42679 - fscache_stat(&fscache_n_retrievals_ok);
42680 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
42681
42682 fscache_put_retrieval(op);
42683 _leave(" = %d", ret);
42684 @@ -545,7 +545,7 @@ nobufs_unlock:
42685 spin_unlock(&cookie->lock);
42686 kfree(op);
42687 nobufs:
42688 - fscache_stat(&fscache_n_retrievals_nobufs);
42689 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42690 _leave(" = -ENOBUFS");
42691 return -ENOBUFS;
42692 }
42693 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_
42694
42695 _enter("%p,%p,,,", cookie, page);
42696
42697 - fscache_stat(&fscache_n_allocs);
42698 + fscache_stat_unchecked(&fscache_n_allocs);
42699
42700 if (hlist_empty(&cookie->backing_objects))
42701 goto nobufs;
42702 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_
42703 goto nobufs_unlock;
42704 spin_unlock(&cookie->lock);
42705
42706 - fscache_stat(&fscache_n_alloc_ops);
42707 + fscache_stat_unchecked(&fscache_n_alloc_ops);
42708
42709 ret = fscache_wait_for_retrieval_activation(
42710 object, op,
42711 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_
42712
42713 error:
42714 if (ret == -ERESTARTSYS)
42715 - fscache_stat(&fscache_n_allocs_intr);
42716 + fscache_stat_unchecked(&fscache_n_allocs_intr);
42717 else if (ret < 0)
42718 - fscache_stat(&fscache_n_allocs_nobufs);
42719 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
42720 else
42721 - fscache_stat(&fscache_n_allocs_ok);
42722 + fscache_stat_unchecked(&fscache_n_allocs_ok);
42723
42724 fscache_put_retrieval(op);
42725 _leave(" = %d", ret);
42726 @@ -625,7 +625,7 @@ nobufs_unlock:
42727 spin_unlock(&cookie->lock);
42728 kfree(op);
42729 nobufs:
42730 - fscache_stat(&fscache_n_allocs_nobufs);
42731 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
42732 _leave(" = -ENOBUFS");
42733 return -ENOBUFS;
42734 }
42735 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fsca
42736
42737 spin_lock(&cookie->stores_lock);
42738
42739 - fscache_stat(&fscache_n_store_calls);
42740 + fscache_stat_unchecked(&fscache_n_store_calls);
42741
42742 /* find a page to store */
42743 page = NULL;
42744 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fsca
42745 page = results[0];
42746 _debug("gang %d [%lx]", n, page->index);
42747 if (page->index > op->store_limit) {
42748 - fscache_stat(&fscache_n_store_pages_over_limit);
42749 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
42750 goto superseded;
42751 }
42752
42753 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fsca
42754 spin_unlock(&cookie->stores_lock);
42755 spin_unlock(&object->lock);
42756
42757 - fscache_stat(&fscache_n_store_pages);
42758 + fscache_stat_unchecked(&fscache_n_store_pages);
42759 fscache_stat(&fscache_n_cop_write_page);
42760 ret = object->cache->ops->write_page(op, page);
42761 fscache_stat_d(&fscache_n_cop_write_page);
42762 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_
42763 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42764 ASSERT(PageFsCache(page));
42765
42766 - fscache_stat(&fscache_n_stores);
42767 + fscache_stat_unchecked(&fscache_n_stores);
42768
42769 op = kzalloc(sizeof(*op), GFP_NOIO);
42770 if (!op)
42771 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_
42772 spin_unlock(&cookie->stores_lock);
42773 spin_unlock(&object->lock);
42774
42775 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
42776 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
42777 op->store_limit = object->store_limit;
42778
42779 if (fscache_submit_op(object, &op->op) < 0)
42780 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_
42781
42782 spin_unlock(&cookie->lock);
42783 radix_tree_preload_end();
42784 - fscache_stat(&fscache_n_store_ops);
42785 - fscache_stat(&fscache_n_stores_ok);
42786 + fscache_stat_unchecked(&fscache_n_store_ops);
42787 + fscache_stat_unchecked(&fscache_n_stores_ok);
42788
42789 /* the work queue now carries its own ref on the object */
42790 fscache_put_operation(&op->op);
42791 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_
42792 return 0;
42793
42794 already_queued:
42795 - fscache_stat(&fscache_n_stores_again);
42796 + fscache_stat_unchecked(&fscache_n_stores_again);
42797 already_pending:
42798 spin_unlock(&cookie->stores_lock);
42799 spin_unlock(&object->lock);
42800 spin_unlock(&cookie->lock);
42801 radix_tree_preload_end();
42802 kfree(op);
42803 - fscache_stat(&fscache_n_stores_ok);
42804 + fscache_stat_unchecked(&fscache_n_stores_ok);
42805 _leave(" = 0");
42806 return 0;
42807
42808 @@ -851,14 +851,14 @@ nobufs:
42809 spin_unlock(&cookie->lock);
42810 radix_tree_preload_end();
42811 kfree(op);
42812 - fscache_stat(&fscache_n_stores_nobufs);
42813 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
42814 _leave(" = -ENOBUFS");
42815 return -ENOBUFS;
42816
42817 nomem_free:
42818 kfree(op);
42819 nomem:
42820 - fscache_stat(&fscache_n_stores_oom);
42821 + fscache_stat_unchecked(&fscache_n_stores_oom);
42822 _leave(" = -ENOMEM");
42823 return -ENOMEM;
42824 }
42825 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscac
42826 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42827 ASSERTCMP(page, !=, NULL);
42828
42829 - fscache_stat(&fscache_n_uncaches);
42830 + fscache_stat_unchecked(&fscache_n_uncaches);
42831
42832 /* cache withdrawal may beat us to it */
42833 if (!PageFsCache(page))
42834 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fs
42835 unsigned long loop;
42836
42837 #ifdef CONFIG_FSCACHE_STATS
42838 - atomic_add(pagevec->nr, &fscache_n_marks);
42839 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
42840 #endif
42841
42842 for (loop = 0; loop < pagevec->nr; loop++) {
42843 diff -urNp linux-3.0.7/fs/fscache/stats.c linux-3.0.7/fs/fscache/stats.c
42844 --- linux-3.0.7/fs/fscache/stats.c 2011-07-21 22:17:23.000000000 -0400
42845 +++ linux-3.0.7/fs/fscache/stats.c 2011-08-23 21:47:56.000000000 -0400
42846 @@ -18,95 +18,95 @@
42847 /*
42848 * operation counters
42849 */
42850 -atomic_t fscache_n_op_pend;
42851 -atomic_t fscache_n_op_run;
42852 -atomic_t fscache_n_op_enqueue;
42853 -atomic_t fscache_n_op_requeue;
42854 -atomic_t fscache_n_op_deferred_release;
42855 -atomic_t fscache_n_op_release;
42856 -atomic_t fscache_n_op_gc;
42857 -atomic_t fscache_n_op_cancelled;
42858 -atomic_t fscache_n_op_rejected;
42859 -
42860 -atomic_t fscache_n_attr_changed;
42861 -atomic_t fscache_n_attr_changed_ok;
42862 -atomic_t fscache_n_attr_changed_nobufs;
42863 -atomic_t fscache_n_attr_changed_nomem;
42864 -atomic_t fscache_n_attr_changed_calls;
42865 -
42866 -atomic_t fscache_n_allocs;
42867 -atomic_t fscache_n_allocs_ok;
42868 -atomic_t fscache_n_allocs_wait;
42869 -atomic_t fscache_n_allocs_nobufs;
42870 -atomic_t fscache_n_allocs_intr;
42871 -atomic_t fscache_n_allocs_object_dead;
42872 -atomic_t fscache_n_alloc_ops;
42873 -atomic_t fscache_n_alloc_op_waits;
42874 -
42875 -atomic_t fscache_n_retrievals;
42876 -atomic_t fscache_n_retrievals_ok;
42877 -atomic_t fscache_n_retrievals_wait;
42878 -atomic_t fscache_n_retrievals_nodata;
42879 -atomic_t fscache_n_retrievals_nobufs;
42880 -atomic_t fscache_n_retrievals_intr;
42881 -atomic_t fscache_n_retrievals_nomem;
42882 -atomic_t fscache_n_retrievals_object_dead;
42883 -atomic_t fscache_n_retrieval_ops;
42884 -atomic_t fscache_n_retrieval_op_waits;
42885 -
42886 -atomic_t fscache_n_stores;
42887 -atomic_t fscache_n_stores_ok;
42888 -atomic_t fscache_n_stores_again;
42889 -atomic_t fscache_n_stores_nobufs;
42890 -atomic_t fscache_n_stores_oom;
42891 -atomic_t fscache_n_store_ops;
42892 -atomic_t fscache_n_store_calls;
42893 -atomic_t fscache_n_store_pages;
42894 -atomic_t fscache_n_store_radix_deletes;
42895 -atomic_t fscache_n_store_pages_over_limit;
42896 -
42897 -atomic_t fscache_n_store_vmscan_not_storing;
42898 -atomic_t fscache_n_store_vmscan_gone;
42899 -atomic_t fscache_n_store_vmscan_busy;
42900 -atomic_t fscache_n_store_vmscan_cancelled;
42901 -
42902 -atomic_t fscache_n_marks;
42903 -atomic_t fscache_n_uncaches;
42904 -
42905 -atomic_t fscache_n_acquires;
42906 -atomic_t fscache_n_acquires_null;
42907 -atomic_t fscache_n_acquires_no_cache;
42908 -atomic_t fscache_n_acquires_ok;
42909 -atomic_t fscache_n_acquires_nobufs;
42910 -atomic_t fscache_n_acquires_oom;
42911 -
42912 -atomic_t fscache_n_updates;
42913 -atomic_t fscache_n_updates_null;
42914 -atomic_t fscache_n_updates_run;
42915 -
42916 -atomic_t fscache_n_relinquishes;
42917 -atomic_t fscache_n_relinquishes_null;
42918 -atomic_t fscache_n_relinquishes_waitcrt;
42919 -atomic_t fscache_n_relinquishes_retire;
42920 -
42921 -atomic_t fscache_n_cookie_index;
42922 -atomic_t fscache_n_cookie_data;
42923 -atomic_t fscache_n_cookie_special;
42924 -
42925 -atomic_t fscache_n_object_alloc;
42926 -atomic_t fscache_n_object_no_alloc;
42927 -atomic_t fscache_n_object_lookups;
42928 -atomic_t fscache_n_object_lookups_negative;
42929 -atomic_t fscache_n_object_lookups_positive;
42930 -atomic_t fscache_n_object_lookups_timed_out;
42931 -atomic_t fscache_n_object_created;
42932 -atomic_t fscache_n_object_avail;
42933 -atomic_t fscache_n_object_dead;
42934 -
42935 -atomic_t fscache_n_checkaux_none;
42936 -atomic_t fscache_n_checkaux_okay;
42937 -atomic_t fscache_n_checkaux_update;
42938 -atomic_t fscache_n_checkaux_obsolete;
42939 +atomic_unchecked_t fscache_n_op_pend;
42940 +atomic_unchecked_t fscache_n_op_run;
42941 +atomic_unchecked_t fscache_n_op_enqueue;
42942 +atomic_unchecked_t fscache_n_op_requeue;
42943 +atomic_unchecked_t fscache_n_op_deferred_release;
42944 +atomic_unchecked_t fscache_n_op_release;
42945 +atomic_unchecked_t fscache_n_op_gc;
42946 +atomic_unchecked_t fscache_n_op_cancelled;
42947 +atomic_unchecked_t fscache_n_op_rejected;
42948 +
42949 +atomic_unchecked_t fscache_n_attr_changed;
42950 +atomic_unchecked_t fscache_n_attr_changed_ok;
42951 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
42952 +atomic_unchecked_t fscache_n_attr_changed_nomem;
42953 +atomic_unchecked_t fscache_n_attr_changed_calls;
42954 +
42955 +atomic_unchecked_t fscache_n_allocs;
42956 +atomic_unchecked_t fscache_n_allocs_ok;
42957 +atomic_unchecked_t fscache_n_allocs_wait;
42958 +atomic_unchecked_t fscache_n_allocs_nobufs;
42959 +atomic_unchecked_t fscache_n_allocs_intr;
42960 +atomic_unchecked_t fscache_n_allocs_object_dead;
42961 +atomic_unchecked_t fscache_n_alloc_ops;
42962 +atomic_unchecked_t fscache_n_alloc_op_waits;
42963 +
42964 +atomic_unchecked_t fscache_n_retrievals;
42965 +atomic_unchecked_t fscache_n_retrievals_ok;
42966 +atomic_unchecked_t fscache_n_retrievals_wait;
42967 +atomic_unchecked_t fscache_n_retrievals_nodata;
42968 +atomic_unchecked_t fscache_n_retrievals_nobufs;
42969 +atomic_unchecked_t fscache_n_retrievals_intr;
42970 +atomic_unchecked_t fscache_n_retrievals_nomem;
42971 +atomic_unchecked_t fscache_n_retrievals_object_dead;
42972 +atomic_unchecked_t fscache_n_retrieval_ops;
42973 +atomic_unchecked_t fscache_n_retrieval_op_waits;
42974 +
42975 +atomic_unchecked_t fscache_n_stores;
42976 +atomic_unchecked_t fscache_n_stores_ok;
42977 +atomic_unchecked_t fscache_n_stores_again;
42978 +atomic_unchecked_t fscache_n_stores_nobufs;
42979 +atomic_unchecked_t fscache_n_stores_oom;
42980 +atomic_unchecked_t fscache_n_store_ops;
42981 +atomic_unchecked_t fscache_n_store_calls;
42982 +atomic_unchecked_t fscache_n_store_pages;
42983 +atomic_unchecked_t fscache_n_store_radix_deletes;
42984 +atomic_unchecked_t fscache_n_store_pages_over_limit;
42985 +
42986 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42987 +atomic_unchecked_t fscache_n_store_vmscan_gone;
42988 +atomic_unchecked_t fscache_n_store_vmscan_busy;
42989 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42990 +
42991 +atomic_unchecked_t fscache_n_marks;
42992 +atomic_unchecked_t fscache_n_uncaches;
42993 +
42994 +atomic_unchecked_t fscache_n_acquires;
42995 +atomic_unchecked_t fscache_n_acquires_null;
42996 +atomic_unchecked_t fscache_n_acquires_no_cache;
42997 +atomic_unchecked_t fscache_n_acquires_ok;
42998 +atomic_unchecked_t fscache_n_acquires_nobufs;
42999 +atomic_unchecked_t fscache_n_acquires_oom;
43000 +
43001 +atomic_unchecked_t fscache_n_updates;
43002 +atomic_unchecked_t fscache_n_updates_null;
43003 +atomic_unchecked_t fscache_n_updates_run;
43004 +
43005 +atomic_unchecked_t fscache_n_relinquishes;
43006 +atomic_unchecked_t fscache_n_relinquishes_null;
43007 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43008 +atomic_unchecked_t fscache_n_relinquishes_retire;
43009 +
43010 +atomic_unchecked_t fscache_n_cookie_index;
43011 +atomic_unchecked_t fscache_n_cookie_data;
43012 +atomic_unchecked_t fscache_n_cookie_special;
43013 +
43014 +atomic_unchecked_t fscache_n_object_alloc;
43015 +atomic_unchecked_t fscache_n_object_no_alloc;
43016 +atomic_unchecked_t fscache_n_object_lookups;
43017 +atomic_unchecked_t fscache_n_object_lookups_negative;
43018 +atomic_unchecked_t fscache_n_object_lookups_positive;
43019 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
43020 +atomic_unchecked_t fscache_n_object_created;
43021 +atomic_unchecked_t fscache_n_object_avail;
43022 +atomic_unchecked_t fscache_n_object_dead;
43023 +
43024 +atomic_unchecked_t fscache_n_checkaux_none;
43025 +atomic_unchecked_t fscache_n_checkaux_okay;
43026 +atomic_unchecked_t fscache_n_checkaux_update;
43027 +atomic_unchecked_t fscache_n_checkaux_obsolete;
43028
43029 atomic_t fscache_n_cop_alloc_object;
43030 atomic_t fscache_n_cop_lookup_object;
43031 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
43032 seq_puts(m, "FS-Cache statistics\n");
43033
43034 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
43035 - atomic_read(&fscache_n_cookie_index),
43036 - atomic_read(&fscache_n_cookie_data),
43037 - atomic_read(&fscache_n_cookie_special));
43038 + atomic_read_unchecked(&fscache_n_cookie_index),
43039 + atomic_read_unchecked(&fscache_n_cookie_data),
43040 + atomic_read_unchecked(&fscache_n_cookie_special));
43041
43042 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
43043 - atomic_read(&fscache_n_object_alloc),
43044 - atomic_read(&fscache_n_object_no_alloc),
43045 - atomic_read(&fscache_n_object_avail),
43046 - atomic_read(&fscache_n_object_dead));
43047 + atomic_read_unchecked(&fscache_n_object_alloc),
43048 + atomic_read_unchecked(&fscache_n_object_no_alloc),
43049 + atomic_read_unchecked(&fscache_n_object_avail),
43050 + atomic_read_unchecked(&fscache_n_object_dead));
43051 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
43052 - atomic_read(&fscache_n_checkaux_none),
43053 - atomic_read(&fscache_n_checkaux_okay),
43054 - atomic_read(&fscache_n_checkaux_update),
43055 - atomic_read(&fscache_n_checkaux_obsolete));
43056 + atomic_read_unchecked(&fscache_n_checkaux_none),
43057 + atomic_read_unchecked(&fscache_n_checkaux_okay),
43058 + atomic_read_unchecked(&fscache_n_checkaux_update),
43059 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
43060
43061 seq_printf(m, "Pages : mrk=%u unc=%u\n",
43062 - atomic_read(&fscache_n_marks),
43063 - atomic_read(&fscache_n_uncaches));
43064 + atomic_read_unchecked(&fscache_n_marks),
43065 + atomic_read_unchecked(&fscache_n_uncaches));
43066
43067 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
43068 " oom=%u\n",
43069 - atomic_read(&fscache_n_acquires),
43070 - atomic_read(&fscache_n_acquires_null),
43071 - atomic_read(&fscache_n_acquires_no_cache),
43072 - atomic_read(&fscache_n_acquires_ok),
43073 - atomic_read(&fscache_n_acquires_nobufs),
43074 - atomic_read(&fscache_n_acquires_oom));
43075 + atomic_read_unchecked(&fscache_n_acquires),
43076 + atomic_read_unchecked(&fscache_n_acquires_null),
43077 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
43078 + atomic_read_unchecked(&fscache_n_acquires_ok),
43079 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
43080 + atomic_read_unchecked(&fscache_n_acquires_oom));
43081
43082 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
43083 - atomic_read(&fscache_n_object_lookups),
43084 - atomic_read(&fscache_n_object_lookups_negative),
43085 - atomic_read(&fscache_n_object_lookups_positive),
43086 - atomic_read(&fscache_n_object_created),
43087 - atomic_read(&fscache_n_object_lookups_timed_out));
43088 + atomic_read_unchecked(&fscache_n_object_lookups),
43089 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
43090 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
43091 + atomic_read_unchecked(&fscache_n_object_created),
43092 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
43093
43094 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
43095 - atomic_read(&fscache_n_updates),
43096 - atomic_read(&fscache_n_updates_null),
43097 - atomic_read(&fscache_n_updates_run));
43098 + atomic_read_unchecked(&fscache_n_updates),
43099 + atomic_read_unchecked(&fscache_n_updates_null),
43100 + atomic_read_unchecked(&fscache_n_updates_run));
43101
43102 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
43103 - atomic_read(&fscache_n_relinquishes),
43104 - atomic_read(&fscache_n_relinquishes_null),
43105 - atomic_read(&fscache_n_relinquishes_waitcrt),
43106 - atomic_read(&fscache_n_relinquishes_retire));
43107 + atomic_read_unchecked(&fscache_n_relinquishes),
43108 + atomic_read_unchecked(&fscache_n_relinquishes_null),
43109 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
43110 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
43111
43112 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
43113 - atomic_read(&fscache_n_attr_changed),
43114 - atomic_read(&fscache_n_attr_changed_ok),
43115 - atomic_read(&fscache_n_attr_changed_nobufs),
43116 - atomic_read(&fscache_n_attr_changed_nomem),
43117 - atomic_read(&fscache_n_attr_changed_calls));
43118 + atomic_read_unchecked(&fscache_n_attr_changed),
43119 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
43120 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
43121 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
43122 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
43123
43124 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
43125 - atomic_read(&fscache_n_allocs),
43126 - atomic_read(&fscache_n_allocs_ok),
43127 - atomic_read(&fscache_n_allocs_wait),
43128 - atomic_read(&fscache_n_allocs_nobufs),
43129 - atomic_read(&fscache_n_allocs_intr));
43130 + atomic_read_unchecked(&fscache_n_allocs),
43131 + atomic_read_unchecked(&fscache_n_allocs_ok),
43132 + atomic_read_unchecked(&fscache_n_allocs_wait),
43133 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
43134 + atomic_read_unchecked(&fscache_n_allocs_intr));
43135 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
43136 - atomic_read(&fscache_n_alloc_ops),
43137 - atomic_read(&fscache_n_alloc_op_waits),
43138 - atomic_read(&fscache_n_allocs_object_dead));
43139 + atomic_read_unchecked(&fscache_n_alloc_ops),
43140 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
43141 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
43142
43143 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
43144 " int=%u oom=%u\n",
43145 - atomic_read(&fscache_n_retrievals),
43146 - atomic_read(&fscache_n_retrievals_ok),
43147 - atomic_read(&fscache_n_retrievals_wait),
43148 - atomic_read(&fscache_n_retrievals_nodata),
43149 - atomic_read(&fscache_n_retrievals_nobufs),
43150 - atomic_read(&fscache_n_retrievals_intr),
43151 - atomic_read(&fscache_n_retrievals_nomem));
43152 + atomic_read_unchecked(&fscache_n_retrievals),
43153 + atomic_read_unchecked(&fscache_n_retrievals_ok),
43154 + atomic_read_unchecked(&fscache_n_retrievals_wait),
43155 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
43156 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
43157 + atomic_read_unchecked(&fscache_n_retrievals_intr),
43158 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
43159 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
43160 - atomic_read(&fscache_n_retrieval_ops),
43161 - atomic_read(&fscache_n_retrieval_op_waits),
43162 - atomic_read(&fscache_n_retrievals_object_dead));
43163 + atomic_read_unchecked(&fscache_n_retrieval_ops),
43164 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
43165 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
43166
43167 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
43168 - atomic_read(&fscache_n_stores),
43169 - atomic_read(&fscache_n_stores_ok),
43170 - atomic_read(&fscache_n_stores_again),
43171 - atomic_read(&fscache_n_stores_nobufs),
43172 - atomic_read(&fscache_n_stores_oom));
43173 + atomic_read_unchecked(&fscache_n_stores),
43174 + atomic_read_unchecked(&fscache_n_stores_ok),
43175 + atomic_read_unchecked(&fscache_n_stores_again),
43176 + atomic_read_unchecked(&fscache_n_stores_nobufs),
43177 + atomic_read_unchecked(&fscache_n_stores_oom));
43178 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
43179 - atomic_read(&fscache_n_store_ops),
43180 - atomic_read(&fscache_n_store_calls),
43181 - atomic_read(&fscache_n_store_pages),
43182 - atomic_read(&fscache_n_store_radix_deletes),
43183 - atomic_read(&fscache_n_store_pages_over_limit));
43184 + atomic_read_unchecked(&fscache_n_store_ops),
43185 + atomic_read_unchecked(&fscache_n_store_calls),
43186 + atomic_read_unchecked(&fscache_n_store_pages),
43187 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
43188 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
43189
43190 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
43191 - atomic_read(&fscache_n_store_vmscan_not_storing),
43192 - atomic_read(&fscache_n_store_vmscan_gone),
43193 - atomic_read(&fscache_n_store_vmscan_busy),
43194 - atomic_read(&fscache_n_store_vmscan_cancelled));
43195 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
43196 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
43197 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
43198 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
43199
43200 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
43201 - atomic_read(&fscache_n_op_pend),
43202 - atomic_read(&fscache_n_op_run),
43203 - atomic_read(&fscache_n_op_enqueue),
43204 - atomic_read(&fscache_n_op_cancelled),
43205 - atomic_read(&fscache_n_op_rejected));
43206 + atomic_read_unchecked(&fscache_n_op_pend),
43207 + atomic_read_unchecked(&fscache_n_op_run),
43208 + atomic_read_unchecked(&fscache_n_op_enqueue),
43209 + atomic_read_unchecked(&fscache_n_op_cancelled),
43210 + atomic_read_unchecked(&fscache_n_op_rejected));
43211 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
43212 - atomic_read(&fscache_n_op_deferred_release),
43213 - atomic_read(&fscache_n_op_release),
43214 - atomic_read(&fscache_n_op_gc));
43215 + atomic_read_unchecked(&fscache_n_op_deferred_release),
43216 + atomic_read_unchecked(&fscache_n_op_release),
43217 + atomic_read_unchecked(&fscache_n_op_gc));
43218
43219 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
43220 atomic_read(&fscache_n_cop_alloc_object),
43221 diff -urNp linux-3.0.7/fs/fs_struct.c linux-3.0.7/fs/fs_struct.c
43222 --- linux-3.0.7/fs/fs_struct.c 2011-07-21 22:17:23.000000000 -0400
43223 +++ linux-3.0.7/fs/fs_struct.c 2011-08-23 21:48:14.000000000 -0400
43224 @@ -4,6 +4,7 @@
43225 #include <linux/path.h>
43226 #include <linux/slab.h>
43227 #include <linux/fs_struct.h>
43228 +#include <linux/grsecurity.h>
43229 #include "internal.h"
43230
43231 static inline void path_get_longterm(struct path *path)
43232 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
43233 old_root = fs->root;
43234 fs->root = *path;
43235 path_get_longterm(path);
43236 + gr_set_chroot_entries(current, path);
43237 write_seqcount_end(&fs->seq);
43238 spin_unlock(&fs->lock);
43239 if (old_root.dentry)
43240 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
43241 && fs->root.mnt == old_root->mnt) {
43242 path_get_longterm(new_root);
43243 fs->root = *new_root;
43244 + gr_set_chroot_entries(p, new_root);
43245 count++;
43246 }
43247 if (fs->pwd.dentry == old_root->dentry
43248 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
43249 spin_lock(&fs->lock);
43250 write_seqcount_begin(&fs->seq);
43251 tsk->fs = NULL;
43252 - kill = !--fs->users;
43253 + gr_clear_chroot_entries(tsk);
43254 + kill = !atomic_dec_return(&fs->users);
43255 write_seqcount_end(&fs->seq);
43256 spin_unlock(&fs->lock);
43257 task_unlock(tsk);
43258 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
43259 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
43260 /* We don't need to lock fs - think why ;-) */
43261 if (fs) {
43262 - fs->users = 1;
43263 + atomic_set(&fs->users, 1);
43264 fs->in_exec = 0;
43265 spin_lock_init(&fs->lock);
43266 seqcount_init(&fs->seq);
43267 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
43268 spin_lock(&old->lock);
43269 fs->root = old->root;
43270 path_get_longterm(&fs->root);
43271 + /* instead of calling gr_set_chroot_entries here,
43272 + we call it from every caller of this function
43273 + */
43274 fs->pwd = old->pwd;
43275 path_get_longterm(&fs->pwd);
43276 spin_unlock(&old->lock);
43277 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
43278
43279 task_lock(current);
43280 spin_lock(&fs->lock);
43281 - kill = !--fs->users;
43282 + kill = !atomic_dec_return(&fs->users);
43283 current->fs = new_fs;
43284 + gr_set_chroot_entries(current, &new_fs->root);
43285 spin_unlock(&fs->lock);
43286 task_unlock(current);
43287
43288 @@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
43289
43290 /* to be mentioned only in INIT_TASK */
43291 struct fs_struct init_fs = {
43292 - .users = 1,
43293 + .users = ATOMIC_INIT(1),
43294 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
43295 .seq = SEQCNT_ZERO,
43296 .umask = 0022,
43297 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
43298 task_lock(current);
43299
43300 spin_lock(&init_fs.lock);
43301 - init_fs.users++;
43302 + atomic_inc(&init_fs.users);
43303 spin_unlock(&init_fs.lock);
43304
43305 spin_lock(&fs->lock);
43306 current->fs = &init_fs;
43307 - kill = !--fs->users;
43308 + gr_set_chroot_entries(current, &current->fs->root);
43309 + kill = !atomic_dec_return(&fs->users);
43310 spin_unlock(&fs->lock);
43311
43312 task_unlock(current);
43313 diff -urNp linux-3.0.7/fs/fuse/cuse.c linux-3.0.7/fs/fuse/cuse.c
43314 --- linux-3.0.7/fs/fuse/cuse.c 2011-07-21 22:17:23.000000000 -0400
43315 +++ linux-3.0.7/fs/fuse/cuse.c 2011-08-23 21:47:56.000000000 -0400
43316 @@ -586,10 +586,12 @@ static int __init cuse_init(void)
43317 INIT_LIST_HEAD(&cuse_conntbl[i]);
43318
43319 /* inherit and extend fuse_dev_operations */
43320 - cuse_channel_fops = fuse_dev_operations;
43321 - cuse_channel_fops.owner = THIS_MODULE;
43322 - cuse_channel_fops.open = cuse_channel_open;
43323 - cuse_channel_fops.release = cuse_channel_release;
43324 + pax_open_kernel();
43325 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
43326 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
43327 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
43328 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
43329 + pax_close_kernel();
43330
43331 cuse_class = class_create(THIS_MODULE, "cuse");
43332 if (IS_ERR(cuse_class))
43333 diff -urNp linux-3.0.7/fs/fuse/dev.c linux-3.0.7/fs/fuse/dev.c
43334 --- linux-3.0.7/fs/fuse/dev.c 2011-09-02 18:11:26.000000000 -0400
43335 +++ linux-3.0.7/fs/fuse/dev.c 2011-08-29 23:26:27.000000000 -0400
43336 @@ -1238,7 +1238,7 @@ static ssize_t fuse_dev_splice_read(stru
43337 ret = 0;
43338 pipe_lock(pipe);
43339
43340 - if (!pipe->readers) {
43341 + if (!atomic_read(&pipe->readers)) {
43342 send_sig(SIGPIPE, current, 0);
43343 if (!ret)
43344 ret = -EPIPE;
43345 diff -urNp linux-3.0.7/fs/fuse/dir.c linux-3.0.7/fs/fuse/dir.c
43346 --- linux-3.0.7/fs/fuse/dir.c 2011-07-21 22:17:23.000000000 -0400
43347 +++ linux-3.0.7/fs/fuse/dir.c 2011-08-23 21:47:56.000000000 -0400
43348 @@ -1148,7 +1148,7 @@ static char *read_link(struct dentry *de
43349 return link;
43350 }
43351
43352 -static void free_link(char *link)
43353 +static void free_link(const char *link)
43354 {
43355 if (!IS_ERR(link))
43356 free_page((unsigned long) link);
43357 diff -urNp linux-3.0.7/fs/gfs2/inode.c linux-3.0.7/fs/gfs2/inode.c
43358 --- linux-3.0.7/fs/gfs2/inode.c 2011-07-21 22:17:23.000000000 -0400
43359 +++ linux-3.0.7/fs/gfs2/inode.c 2011-08-23 21:47:56.000000000 -0400
43360 @@ -1525,7 +1525,7 @@ out:
43361
43362 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
43363 {
43364 - char *s = nd_get_link(nd);
43365 + const char *s = nd_get_link(nd);
43366 if (!IS_ERR(s))
43367 kfree(s);
43368 }
43369 diff -urNp linux-3.0.7/fs/hfsplus/catalog.c linux-3.0.7/fs/hfsplus/catalog.c
43370 --- linux-3.0.7/fs/hfsplus/catalog.c 2011-07-21 22:17:23.000000000 -0400
43371 +++ linux-3.0.7/fs/hfsplus/catalog.c 2011-08-23 21:48:14.000000000 -0400
43372 @@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
43373 int err;
43374 u16 type;
43375
43376 + pax_track_stack();
43377 +
43378 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
43379 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
43380 if (err)
43381 @@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
43382 int entry_size;
43383 int err;
43384
43385 + pax_track_stack();
43386 +
43387 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
43388 str->name, cnid, inode->i_nlink);
43389 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
43390 @@ -349,6 +353,8 @@ int hfsplus_rename_cat(u32 cnid,
43391 int entry_size, type;
43392 int err = 0;
43393
43394 + pax_track_stack();
43395 +
43396 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
43397 cnid, src_dir->i_ino, src_name->name,
43398 dst_dir->i_ino, dst_name->name);
43399 diff -urNp linux-3.0.7/fs/hfsplus/dir.c linux-3.0.7/fs/hfsplus/dir.c
43400 --- linux-3.0.7/fs/hfsplus/dir.c 2011-07-21 22:17:23.000000000 -0400
43401 +++ linux-3.0.7/fs/hfsplus/dir.c 2011-08-23 21:48:14.000000000 -0400
43402 @@ -129,6 +129,8 @@ static int hfsplus_readdir(struct file *
43403 struct hfsplus_readdir_data *rd;
43404 u16 type;
43405
43406 + pax_track_stack();
43407 +
43408 if (filp->f_pos >= inode->i_size)
43409 return 0;
43410
43411 diff -urNp linux-3.0.7/fs/hfsplus/inode.c linux-3.0.7/fs/hfsplus/inode.c
43412 --- linux-3.0.7/fs/hfsplus/inode.c 2011-07-21 22:17:23.000000000 -0400
43413 +++ linux-3.0.7/fs/hfsplus/inode.c 2011-08-23 21:48:14.000000000 -0400
43414 @@ -489,6 +489,8 @@ int hfsplus_cat_read_inode(struct inode
43415 int res = 0;
43416 u16 type;
43417
43418 + pax_track_stack();
43419 +
43420 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
43421
43422 HFSPLUS_I(inode)->linkid = 0;
43423 @@ -552,6 +554,8 @@ int hfsplus_cat_write_inode(struct inode
43424 struct hfs_find_data fd;
43425 hfsplus_cat_entry entry;
43426
43427 + pax_track_stack();
43428 +
43429 if (HFSPLUS_IS_RSRC(inode))
43430 main_inode = HFSPLUS_I(inode)->rsrc_inode;
43431
43432 diff -urNp linux-3.0.7/fs/hfsplus/ioctl.c linux-3.0.7/fs/hfsplus/ioctl.c
43433 --- linux-3.0.7/fs/hfsplus/ioctl.c 2011-07-21 22:17:23.000000000 -0400
43434 +++ linux-3.0.7/fs/hfsplus/ioctl.c 2011-08-23 21:48:14.000000000 -0400
43435 @@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
43436 struct hfsplus_cat_file *file;
43437 int res;
43438
43439 + pax_track_stack();
43440 +
43441 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43442 return -EOPNOTSUPP;
43443
43444 @@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
43445 struct hfsplus_cat_file *file;
43446 ssize_t res = 0;
43447
43448 + pax_track_stack();
43449 +
43450 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43451 return -EOPNOTSUPP;
43452
43453 diff -urNp linux-3.0.7/fs/hfsplus/super.c linux-3.0.7/fs/hfsplus/super.c
43454 --- linux-3.0.7/fs/hfsplus/super.c 2011-07-21 22:17:23.000000000 -0400
43455 +++ linux-3.0.7/fs/hfsplus/super.c 2011-08-23 21:48:14.000000000 -0400
43456 @@ -340,6 +340,8 @@ static int hfsplus_fill_super(struct sup
43457 struct nls_table *nls = NULL;
43458 int err;
43459
43460 + pax_track_stack();
43461 +
43462 err = -EINVAL;
43463 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
43464 if (!sbi)
43465 diff -urNp linux-3.0.7/fs/hugetlbfs/inode.c linux-3.0.7/fs/hugetlbfs/inode.c
43466 --- linux-3.0.7/fs/hugetlbfs/inode.c 2011-07-21 22:17:23.000000000 -0400
43467 +++ linux-3.0.7/fs/hugetlbfs/inode.c 2011-08-23 21:48:14.000000000 -0400
43468 @@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs
43469 .kill_sb = kill_litter_super,
43470 };
43471
43472 -static struct vfsmount *hugetlbfs_vfsmount;
43473 +struct vfsmount *hugetlbfs_vfsmount;
43474
43475 static int can_do_hugetlb_shm(void)
43476 {
43477 diff -urNp linux-3.0.7/fs/inode.c linux-3.0.7/fs/inode.c
43478 --- linux-3.0.7/fs/inode.c 2011-07-21 22:17:23.000000000 -0400
43479 +++ linux-3.0.7/fs/inode.c 2011-08-23 21:47:56.000000000 -0400
43480 @@ -829,8 +829,8 @@ unsigned int get_next_ino(void)
43481
43482 #ifdef CONFIG_SMP
43483 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
43484 - static atomic_t shared_last_ino;
43485 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
43486 + static atomic_unchecked_t shared_last_ino;
43487 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
43488
43489 res = next - LAST_INO_BATCH;
43490 }
43491 diff -urNp linux-3.0.7/fs/jbd/checkpoint.c linux-3.0.7/fs/jbd/checkpoint.c
43492 --- linux-3.0.7/fs/jbd/checkpoint.c 2011-07-21 22:17:23.000000000 -0400
43493 +++ linux-3.0.7/fs/jbd/checkpoint.c 2011-08-23 21:48:14.000000000 -0400
43494 @@ -350,6 +350,8 @@ int log_do_checkpoint(journal_t *journal
43495 tid_t this_tid;
43496 int result;
43497
43498 + pax_track_stack();
43499 +
43500 jbd_debug(1, "Start checkpoint\n");
43501
43502 /*
43503 diff -urNp linux-3.0.7/fs/jffs2/compr_rtime.c linux-3.0.7/fs/jffs2/compr_rtime.c
43504 --- linux-3.0.7/fs/jffs2/compr_rtime.c 2011-07-21 22:17:23.000000000 -0400
43505 +++ linux-3.0.7/fs/jffs2/compr_rtime.c 2011-08-23 21:48:14.000000000 -0400
43506 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
43507 int outpos = 0;
43508 int pos=0;
43509
43510 + pax_track_stack();
43511 +
43512 memset(positions,0,sizeof(positions));
43513
43514 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
43515 @@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
43516 int outpos = 0;
43517 int pos=0;
43518
43519 + pax_track_stack();
43520 +
43521 memset(positions,0,sizeof(positions));
43522
43523 while (outpos<destlen) {
43524 diff -urNp linux-3.0.7/fs/jffs2/compr_rubin.c linux-3.0.7/fs/jffs2/compr_rubin.c
43525 --- linux-3.0.7/fs/jffs2/compr_rubin.c 2011-07-21 22:17:23.000000000 -0400
43526 +++ linux-3.0.7/fs/jffs2/compr_rubin.c 2011-08-23 21:48:14.000000000 -0400
43527 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
43528 int ret;
43529 uint32_t mysrclen, mydstlen;
43530
43531 + pax_track_stack();
43532 +
43533 mysrclen = *sourcelen;
43534 mydstlen = *dstlen - 8;
43535
43536 diff -urNp linux-3.0.7/fs/jffs2/erase.c linux-3.0.7/fs/jffs2/erase.c
43537 --- linux-3.0.7/fs/jffs2/erase.c 2011-07-21 22:17:23.000000000 -0400
43538 +++ linux-3.0.7/fs/jffs2/erase.c 2011-08-23 21:47:56.000000000 -0400
43539 @@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
43540 struct jffs2_unknown_node marker = {
43541 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
43542 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43543 - .totlen = cpu_to_je32(c->cleanmarker_size)
43544 + .totlen = cpu_to_je32(c->cleanmarker_size),
43545 + .hdr_crc = cpu_to_je32(0)
43546 };
43547
43548 jffs2_prealloc_raw_node_refs(c, jeb, 1);
43549 diff -urNp linux-3.0.7/fs/jffs2/wbuf.c linux-3.0.7/fs/jffs2/wbuf.c
43550 --- linux-3.0.7/fs/jffs2/wbuf.c 2011-07-21 22:17:23.000000000 -0400
43551 +++ linux-3.0.7/fs/jffs2/wbuf.c 2011-08-23 21:47:56.000000000 -0400
43552 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
43553 {
43554 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
43555 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43556 - .totlen = constant_cpu_to_je32(8)
43557 + .totlen = constant_cpu_to_je32(8),
43558 + .hdr_crc = constant_cpu_to_je32(0)
43559 };
43560
43561 /*
43562 diff -urNp linux-3.0.7/fs/jffs2/xattr.c linux-3.0.7/fs/jffs2/xattr.c
43563 --- linux-3.0.7/fs/jffs2/xattr.c 2011-07-21 22:17:23.000000000 -0400
43564 +++ linux-3.0.7/fs/jffs2/xattr.c 2011-08-23 21:48:14.000000000 -0400
43565 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
43566
43567 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
43568
43569 + pax_track_stack();
43570 +
43571 /* Phase.1 : Merge same xref */
43572 for (i=0; i < XREF_TMPHASH_SIZE; i++)
43573 xref_tmphash[i] = NULL;
43574 diff -urNp linux-3.0.7/fs/jfs/super.c linux-3.0.7/fs/jfs/super.c
43575 --- linux-3.0.7/fs/jfs/super.c 2011-07-21 22:17:23.000000000 -0400
43576 +++ linux-3.0.7/fs/jfs/super.c 2011-08-23 21:47:56.000000000 -0400
43577 @@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
43578
43579 jfs_inode_cachep =
43580 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
43581 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
43582 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
43583 init_once);
43584 if (jfs_inode_cachep == NULL)
43585 return -ENOMEM;
43586 diff -urNp linux-3.0.7/fs/Kconfig.binfmt linux-3.0.7/fs/Kconfig.binfmt
43587 --- linux-3.0.7/fs/Kconfig.binfmt 2011-07-21 22:17:23.000000000 -0400
43588 +++ linux-3.0.7/fs/Kconfig.binfmt 2011-08-23 21:47:56.000000000 -0400
43589 @@ -86,7 +86,7 @@ config HAVE_AOUT
43590
43591 config BINFMT_AOUT
43592 tristate "Kernel support for a.out and ECOFF binaries"
43593 - depends on HAVE_AOUT
43594 + depends on HAVE_AOUT && BROKEN
43595 ---help---
43596 A.out (Assembler.OUTput) is a set of formats for libraries and
43597 executables used in the earliest versions of UNIX. Linux used
43598 diff -urNp linux-3.0.7/fs/libfs.c linux-3.0.7/fs/libfs.c
43599 --- linux-3.0.7/fs/libfs.c 2011-07-21 22:17:23.000000000 -0400
43600 +++ linux-3.0.7/fs/libfs.c 2011-08-23 21:47:56.000000000 -0400
43601 @@ -163,6 +163,9 @@ int dcache_readdir(struct file * filp, v
43602
43603 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
43604 struct dentry *next;
43605 + char d_name[sizeof(next->d_iname)];
43606 + const unsigned char *name;
43607 +
43608 next = list_entry(p, struct dentry, d_u.d_child);
43609 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
43610 if (!simple_positive(next)) {
43611 @@ -172,7 +175,12 @@ int dcache_readdir(struct file * filp, v
43612
43613 spin_unlock(&next->d_lock);
43614 spin_unlock(&dentry->d_lock);
43615 - if (filldir(dirent, next->d_name.name,
43616 + name = next->d_name.name;
43617 + if (name == next->d_iname) {
43618 + memcpy(d_name, name, next->d_name.len);
43619 + name = d_name;
43620 + }
43621 + if (filldir(dirent, name,
43622 next->d_name.len, filp->f_pos,
43623 next->d_inode->i_ino,
43624 dt_type(next->d_inode)) < 0)
43625 diff -urNp linux-3.0.7/fs/lockd/clntproc.c linux-3.0.7/fs/lockd/clntproc.c
43626 --- linux-3.0.7/fs/lockd/clntproc.c 2011-07-21 22:17:23.000000000 -0400
43627 +++ linux-3.0.7/fs/lockd/clntproc.c 2011-08-23 21:48:14.000000000 -0400
43628 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
43629 /*
43630 * Cookie counter for NLM requests
43631 */
43632 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
43633 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
43634
43635 void nlmclnt_next_cookie(struct nlm_cookie *c)
43636 {
43637 - u32 cookie = atomic_inc_return(&nlm_cookie);
43638 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
43639
43640 memcpy(c->data, &cookie, 4);
43641 c->len=4;
43642 @@ -620,6 +620,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
43643 struct nlm_rqst reqst, *req;
43644 int status;
43645
43646 + pax_track_stack();
43647 +
43648 req = &reqst;
43649 memset(req, 0, sizeof(*req));
43650 locks_init_lock(&req->a_args.lock.fl);
43651 diff -urNp linux-3.0.7/fs/locks.c linux-3.0.7/fs/locks.c
43652 --- linux-3.0.7/fs/locks.c 2011-07-21 22:17:23.000000000 -0400
43653 +++ linux-3.0.7/fs/locks.c 2011-08-23 21:47:56.000000000 -0400
43654 @@ -2043,16 +2043,16 @@ void locks_remove_flock(struct file *fil
43655 return;
43656
43657 if (filp->f_op && filp->f_op->flock) {
43658 - struct file_lock fl = {
43659 + struct file_lock flock = {
43660 .fl_pid = current->tgid,
43661 .fl_file = filp,
43662 .fl_flags = FL_FLOCK,
43663 .fl_type = F_UNLCK,
43664 .fl_end = OFFSET_MAX,
43665 };
43666 - filp->f_op->flock(filp, F_SETLKW, &fl);
43667 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
43668 - fl.fl_ops->fl_release_private(&fl);
43669 + filp->f_op->flock(filp, F_SETLKW, &flock);
43670 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
43671 + flock.fl_ops->fl_release_private(&flock);
43672 }
43673
43674 lock_flocks();
43675 diff -urNp linux-3.0.7/fs/logfs/super.c linux-3.0.7/fs/logfs/super.c
43676 --- linux-3.0.7/fs/logfs/super.c 2011-07-21 22:17:23.000000000 -0400
43677 +++ linux-3.0.7/fs/logfs/super.c 2011-08-23 21:48:14.000000000 -0400
43678 @@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
43679 struct logfs_disk_super _ds1, *ds1 = &_ds1;
43680 int err, valid0, valid1;
43681
43682 + pax_track_stack();
43683 +
43684 /* read first superblock */
43685 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
43686 if (err)
43687 diff -urNp linux-3.0.7/fs/namei.c linux-3.0.7/fs/namei.c
43688 --- linux-3.0.7/fs/namei.c 2011-10-16 21:54:54.000000000 -0400
43689 +++ linux-3.0.7/fs/namei.c 2011-10-18 06:55:15.000000000 -0400
43690 @@ -237,21 +237,23 @@ int generic_permission(struct inode *ino
43691 return ret;
43692
43693 /*
43694 - * Read/write DACs are always overridable.
43695 - * Executable DACs are overridable for all directories and
43696 - * for non-directories that have least one exec bit set.
43697 + * Searching includes executable on directories, else just read.
43698 */
43699 - if (!(mask & MAY_EXEC) || execute_ok(inode))
43700 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
43701 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
43702 + if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) {
43703 + if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
43704 return 0;
43705 + }
43706
43707 /*
43708 - * Searching includes executable on directories, else just read.
43709 + * Read/write DACs are always overridable.
43710 + * Executable DACs are overridable for all directories and
43711 + * for non-directories that have least one exec bit set.
43712 */
43713 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
43714 - if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
43715 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
43716 + if (!(mask & MAY_EXEC) || execute_ok(inode)) {
43717 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
43718 return 0;
43719 + }
43720
43721 return -EACCES;
43722 }
43723 @@ -547,6 +549,9 @@ static int complete_walk(struct nameidat
43724 br_read_unlock(vfsmount_lock);
43725 }
43726
43727 + if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
43728 + return -ENOENT;
43729 +
43730 if (likely(!(nd->flags & LOOKUP_JUMPED)))
43731 return 0;
43732
43733 @@ -593,9 +598,12 @@ static inline int exec_permission(struct
43734 if (ret == -ECHILD)
43735 return ret;
43736
43737 - if (ns_capable(ns, CAP_DAC_OVERRIDE) ||
43738 - ns_capable(ns, CAP_DAC_READ_SEARCH))
43739 + if (ns_capable_nolog(ns, CAP_DAC_OVERRIDE))
43740 goto ok;
43741 + else {
43742 + if (ns_capable(ns, CAP_DAC_READ_SEARCH) || ns_capable(ns, CAP_DAC_OVERRIDE))
43743 + goto ok;
43744 + }
43745
43746 return ret;
43747 ok:
43748 @@ -703,11 +711,26 @@ follow_link(struct path *link, struct na
43749 return error;
43750 }
43751
43752 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
43753 + dentry->d_inode, dentry, nd->path.mnt)) {
43754 + error = -EACCES;
43755 + *p = ERR_PTR(error); /* no ->put_link(), please */
43756 + path_put(&nd->path);
43757 + return error;
43758 + }
43759 +
43760 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
43761 + error = -ENOENT;
43762 + *p = ERR_PTR(error); /* no ->put_link(), please */
43763 + path_put(&nd->path);
43764 + return error;
43765 + }
43766 +
43767 nd->last_type = LAST_BIND;
43768 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
43769 error = PTR_ERR(*p);
43770 if (!IS_ERR(*p)) {
43771 - char *s = nd_get_link(nd);
43772 + const char *s = nd_get_link(nd);
43773 error = 0;
43774 if (s)
43775 error = __vfs_follow_link(nd, s);
43776 @@ -1625,6 +1648,9 @@ static int do_path_lookup(int dfd, const
43777 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
43778
43779 if (likely(!retval)) {
43780 + if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
43781 + return -ENOENT;
43782 +
43783 if (unlikely(!audit_dummy_context())) {
43784 if (nd->path.dentry && nd->inode)
43785 audit_inode(name, nd->path.dentry);
43786 @@ -1935,6 +1961,30 @@ int vfs_create(struct inode *dir, struct
43787 return error;
43788 }
43789
43790 +/*
43791 + * Note that while the flag value (low two bits) for sys_open means:
43792 + * 00 - read-only
43793 + * 01 - write-only
43794 + * 10 - read-write
43795 + * 11 - special
43796 + * it is changed into
43797 + * 00 - no permissions needed
43798 + * 01 - read-permission
43799 + * 10 - write-permission
43800 + * 11 - read-write
43801 + * for the internal routines (ie open_namei()/follow_link() etc)
43802 + * This is more logical, and also allows the 00 "no perm needed"
43803 + * to be used for symlinks (where the permissions are checked
43804 + * later).
43805 + *
43806 +*/
43807 +static inline int open_to_namei_flags(int flag)
43808 +{
43809 + if ((flag+1) & O_ACCMODE)
43810 + flag++;
43811 + return flag;
43812 +}
43813 +
43814 static int may_open(struct path *path, int acc_mode, int flag)
43815 {
43816 struct dentry *dentry = path->dentry;
43817 @@ -1987,7 +2037,27 @@ static int may_open(struct path *path, i
43818 /*
43819 * Ensure there are no outstanding leases on the file.
43820 */
43821 - return break_lease(inode, flag);
43822 + error = break_lease(inode, flag);
43823 +
43824 + if (error)
43825 + return error;
43826 +
43827 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
43828 + error = -EPERM;
43829 + goto exit;
43830 + }
43831 +
43832 + if (gr_handle_rawio(inode)) {
43833 + error = -EPERM;
43834 + goto exit;
43835 + }
43836 +
43837 + if (!gr_acl_handle_open(dentry, path->mnt, open_to_namei_flags(flag))) {
43838 + error = -EACCES;
43839 + goto exit;
43840 + }
43841 +exit:
43842 + return error;
43843 }
43844
43845 static int handle_truncate(struct file *filp)
43846 @@ -2013,30 +2083,6 @@ static int handle_truncate(struct file *
43847 }
43848
43849 /*
43850 - * Note that while the flag value (low two bits) for sys_open means:
43851 - * 00 - read-only
43852 - * 01 - write-only
43853 - * 10 - read-write
43854 - * 11 - special
43855 - * it is changed into
43856 - * 00 - no permissions needed
43857 - * 01 - read-permission
43858 - * 10 - write-permission
43859 - * 11 - read-write
43860 - * for the internal routines (ie open_namei()/follow_link() etc)
43861 - * This is more logical, and also allows the 00 "no perm needed"
43862 - * to be used for symlinks (where the permissions are checked
43863 - * later).
43864 - *
43865 -*/
43866 -static inline int open_to_namei_flags(int flag)
43867 -{
43868 - if ((flag+1) & O_ACCMODE)
43869 - flag++;
43870 - return flag;
43871 -}
43872 -
43873 -/*
43874 * Handle the last step of open()
43875 */
43876 static struct file *do_last(struct nameidata *nd, struct path *path,
43877 @@ -2045,6 +2091,7 @@ static struct file *do_last(struct namei
43878 struct dentry *dir = nd->path.dentry;
43879 struct dentry *dentry;
43880 int open_flag = op->open_flag;
43881 + int flag = open_to_namei_flags(open_flag);
43882 int will_truncate = open_flag & O_TRUNC;
43883 int want_write = 0;
43884 int acc_mode = op->acc_mode;
43885 @@ -2095,7 +2142,7 @@ static struct file *do_last(struct namei
43886 /* sayonara */
43887 error = complete_walk(nd);
43888 if (error)
43889 - return ERR_PTR(-ECHILD);
43890 + return ERR_PTR(error);
43891
43892 error = -ENOTDIR;
43893 if (nd->flags & LOOKUP_DIRECTORY) {
43894 @@ -2132,6 +2179,12 @@ static struct file *do_last(struct namei
43895 /* Negative dentry, just create the file */
43896 if (!dentry->d_inode) {
43897 int mode = op->mode;
43898 +
43899 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, flag, mode)) {
43900 + error = -EACCES;
43901 + goto exit_mutex_unlock;
43902 + }
43903 +
43904 if (!IS_POSIXACL(dir->d_inode))
43905 mode &= ~current_umask();
43906 /*
43907 @@ -2155,6 +2208,8 @@ static struct file *do_last(struct namei
43908 error = vfs_create(dir->d_inode, dentry, mode, nd);
43909 if (error)
43910 goto exit_mutex_unlock;
43911 + else
43912 + gr_handle_create(path->dentry, path->mnt);
43913 mutex_unlock(&dir->d_inode->i_mutex);
43914 dput(nd->path.dentry);
43915 nd->path.dentry = dentry;
43916 @@ -2164,6 +2219,14 @@ static struct file *do_last(struct namei
43917 /*
43918 * It already exists.
43919 */
43920 +
43921 + /* only check if O_CREAT is specified, all other checks need to go
43922 + into may_open */
43923 + if (gr_handle_fifo(path->dentry, path->mnt, dir, flag, acc_mode)) {
43924 + error = -EACCES;
43925 + goto exit_mutex_unlock;
43926 + }
43927 +
43928 mutex_unlock(&dir->d_inode->i_mutex);
43929 audit_inode(pathname, path->dentry);
43930
43931 @@ -2450,6 +2513,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
43932 error = may_mknod(mode);
43933 if (error)
43934 goto out_dput;
43935 +
43936 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
43937 + error = -EPERM;
43938 + goto out_dput;
43939 + }
43940 +
43941 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
43942 + error = -EACCES;
43943 + goto out_dput;
43944 + }
43945 +
43946 error = mnt_want_write(nd.path.mnt);
43947 if (error)
43948 goto out_dput;
43949 @@ -2470,6 +2544,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
43950 }
43951 out_drop_write:
43952 mnt_drop_write(nd.path.mnt);
43953 +
43954 + if (!error)
43955 + gr_handle_create(dentry, nd.path.mnt);
43956 out_dput:
43957 dput(dentry);
43958 out_unlock:
43959 @@ -2522,6 +2599,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
43960 if (IS_ERR(dentry))
43961 goto out_unlock;
43962
43963 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
43964 + error = -EACCES;
43965 + goto out_dput;
43966 + }
43967 +
43968 if (!IS_POSIXACL(nd.path.dentry->d_inode))
43969 mode &= ~current_umask();
43970 error = mnt_want_write(nd.path.mnt);
43971 @@ -2533,6 +2615,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
43972 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
43973 out_drop_write:
43974 mnt_drop_write(nd.path.mnt);
43975 +
43976 + if (!error)
43977 + gr_handle_create(dentry, nd.path.mnt);
43978 +
43979 out_dput:
43980 dput(dentry);
43981 out_unlock:
43982 @@ -2615,6 +2701,8 @@ static long do_rmdir(int dfd, const char
43983 char * name;
43984 struct dentry *dentry;
43985 struct nameidata nd;
43986 + ino_t saved_ino = 0;
43987 + dev_t saved_dev = 0;
43988
43989 error = user_path_parent(dfd, pathname, &nd, &name);
43990 if (error)
43991 @@ -2643,6 +2731,17 @@ static long do_rmdir(int dfd, const char
43992 error = -ENOENT;
43993 goto exit3;
43994 }
43995 +
43996 + if (dentry->d_inode->i_nlink <= 1) {
43997 + saved_ino = dentry->d_inode->i_ino;
43998 + saved_dev = gr_get_dev_from_dentry(dentry);
43999 + }
44000 +
44001 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
44002 + error = -EACCES;
44003 + goto exit3;
44004 + }
44005 +
44006 error = mnt_want_write(nd.path.mnt);
44007 if (error)
44008 goto exit3;
44009 @@ -2650,6 +2749,8 @@ static long do_rmdir(int dfd, const char
44010 if (error)
44011 goto exit4;
44012 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
44013 + if (!error && (saved_dev || saved_ino))
44014 + gr_handle_delete(saved_ino, saved_dev);
44015 exit4:
44016 mnt_drop_write(nd.path.mnt);
44017 exit3:
44018 @@ -2712,6 +2813,8 @@ static long do_unlinkat(int dfd, const c
44019 struct dentry *dentry;
44020 struct nameidata nd;
44021 struct inode *inode = NULL;
44022 + ino_t saved_ino = 0;
44023 + dev_t saved_dev = 0;
44024
44025 error = user_path_parent(dfd, pathname, &nd, &name);
44026 if (error)
44027 @@ -2734,6 +2837,16 @@ static long do_unlinkat(int dfd, const c
44028 if (!inode)
44029 goto slashes;
44030 ihold(inode);
44031 +
44032 + if (inode->i_nlink <= 1) {
44033 + saved_ino = inode->i_ino;
44034 + saved_dev = gr_get_dev_from_dentry(dentry);
44035 + }
44036 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
44037 + error = -EACCES;
44038 + goto exit2;
44039 + }
44040 +
44041 error = mnt_want_write(nd.path.mnt);
44042 if (error)
44043 goto exit2;
44044 @@ -2741,6 +2854,8 @@ static long do_unlinkat(int dfd, const c
44045 if (error)
44046 goto exit3;
44047 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
44048 + if (!error && (saved_ino || saved_dev))
44049 + gr_handle_delete(saved_ino, saved_dev);
44050 exit3:
44051 mnt_drop_write(nd.path.mnt);
44052 exit2:
44053 @@ -2818,6 +2933,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
44054 if (IS_ERR(dentry))
44055 goto out_unlock;
44056
44057 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
44058 + error = -EACCES;
44059 + goto out_dput;
44060 + }
44061 +
44062 error = mnt_want_write(nd.path.mnt);
44063 if (error)
44064 goto out_dput;
44065 @@ -2825,6 +2945,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
44066 if (error)
44067 goto out_drop_write;
44068 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
44069 + if (!error)
44070 + gr_handle_create(dentry, nd.path.mnt);
44071 out_drop_write:
44072 mnt_drop_write(nd.path.mnt);
44073 out_dput:
44074 @@ -2933,6 +3055,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44075 error = PTR_ERR(new_dentry);
44076 if (IS_ERR(new_dentry))
44077 goto out_unlock;
44078 +
44079 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
44080 + old_path.dentry->d_inode,
44081 + old_path.dentry->d_inode->i_mode, to)) {
44082 + error = -EACCES;
44083 + goto out_dput;
44084 + }
44085 +
44086 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
44087 + old_path.dentry, old_path.mnt, to)) {
44088 + error = -EACCES;
44089 + goto out_dput;
44090 + }
44091 +
44092 error = mnt_want_write(nd.path.mnt);
44093 if (error)
44094 goto out_dput;
44095 @@ -2940,6 +3076,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44096 if (error)
44097 goto out_drop_write;
44098 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
44099 + if (!error)
44100 + gr_handle_create(new_dentry, nd.path.mnt);
44101 out_drop_write:
44102 mnt_drop_write(nd.path.mnt);
44103 out_dput:
44104 @@ -3117,6 +3255,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44105 char *to;
44106 int error;
44107
44108 + pax_track_stack();
44109 +
44110 error = user_path_parent(olddfd, oldname, &oldnd, &from);
44111 if (error)
44112 goto exit;
44113 @@ -3173,6 +3313,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44114 if (new_dentry == trap)
44115 goto exit5;
44116
44117 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
44118 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
44119 + to);
44120 + if (error)
44121 + goto exit5;
44122 +
44123 error = mnt_want_write(oldnd.path.mnt);
44124 if (error)
44125 goto exit5;
44126 @@ -3182,6 +3328,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44127 goto exit6;
44128 error = vfs_rename(old_dir->d_inode, old_dentry,
44129 new_dir->d_inode, new_dentry);
44130 + if (!error)
44131 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
44132 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
44133 exit6:
44134 mnt_drop_write(oldnd.path.mnt);
44135 exit5:
44136 @@ -3207,6 +3356,8 @@ SYSCALL_DEFINE2(rename, const char __use
44137
44138 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
44139 {
44140 + char tmpbuf[64];
44141 + const char *newlink;
44142 int len;
44143
44144 len = PTR_ERR(link);
44145 @@ -3216,7 +3367,14 @@ int vfs_readlink(struct dentry *dentry,
44146 len = strlen(link);
44147 if (len > (unsigned) buflen)
44148 len = buflen;
44149 - if (copy_to_user(buffer, link, len))
44150 +
44151 + if (len < sizeof(tmpbuf)) {
44152 + memcpy(tmpbuf, link, len);
44153 + newlink = tmpbuf;
44154 + } else
44155 + newlink = link;
44156 +
44157 + if (copy_to_user(buffer, newlink, len))
44158 len = -EFAULT;
44159 out:
44160 return len;
44161 diff -urNp linux-3.0.7/fs/namespace.c linux-3.0.7/fs/namespace.c
44162 --- linux-3.0.7/fs/namespace.c 2011-07-21 22:17:23.000000000 -0400
44163 +++ linux-3.0.7/fs/namespace.c 2011-08-23 21:48:14.000000000 -0400
44164 @@ -1328,6 +1328,9 @@ static int do_umount(struct vfsmount *mn
44165 if (!(sb->s_flags & MS_RDONLY))
44166 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
44167 up_write(&sb->s_umount);
44168 +
44169 + gr_log_remount(mnt->mnt_devname, retval);
44170 +
44171 return retval;
44172 }
44173
44174 @@ -1347,6 +1350,9 @@ static int do_umount(struct vfsmount *mn
44175 br_write_unlock(vfsmount_lock);
44176 up_write(&namespace_sem);
44177 release_mounts(&umount_list);
44178 +
44179 + gr_log_unmount(mnt->mnt_devname, retval);
44180 +
44181 return retval;
44182 }
44183
44184 @@ -2338,6 +2344,16 @@ long do_mount(char *dev_name, char *dir_
44185 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
44186 MS_STRICTATIME);
44187
44188 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
44189 + retval = -EPERM;
44190 + goto dput_out;
44191 + }
44192 +
44193 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
44194 + retval = -EPERM;
44195 + goto dput_out;
44196 + }
44197 +
44198 if (flags & MS_REMOUNT)
44199 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
44200 data_page);
44201 @@ -2352,6 +2368,9 @@ long do_mount(char *dev_name, char *dir_
44202 dev_name, data_page);
44203 dput_out:
44204 path_put(&path);
44205 +
44206 + gr_log_mount(dev_name, dir_name, retval);
44207 +
44208 return retval;
44209 }
44210
44211 @@ -2575,6 +2594,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
44212 if (error)
44213 goto out2;
44214
44215 + if (gr_handle_chroot_pivot()) {
44216 + error = -EPERM;
44217 + goto out2;
44218 + }
44219 +
44220 get_fs_root(current->fs, &root);
44221 error = lock_mount(&old);
44222 if (error)
44223 diff -urNp linux-3.0.7/fs/ncpfs/dir.c linux-3.0.7/fs/ncpfs/dir.c
44224 --- linux-3.0.7/fs/ncpfs/dir.c 2011-07-21 22:17:23.000000000 -0400
44225 +++ linux-3.0.7/fs/ncpfs/dir.c 2011-08-23 21:48:14.000000000 -0400
44226 @@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
44227 int res, val = 0, len;
44228 __u8 __name[NCP_MAXPATHLEN + 1];
44229
44230 + pax_track_stack();
44231 +
44232 if (dentry == dentry->d_sb->s_root)
44233 return 1;
44234
44235 @@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
44236 int error, res, len;
44237 __u8 __name[NCP_MAXPATHLEN + 1];
44238
44239 + pax_track_stack();
44240 +
44241 error = -EIO;
44242 if (!ncp_conn_valid(server))
44243 goto finished;
44244 @@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
44245 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
44246 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
44247
44248 + pax_track_stack();
44249 +
44250 ncp_age_dentry(server, dentry);
44251 len = sizeof(__name);
44252 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
44253 @@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
44254 int error, len;
44255 __u8 __name[NCP_MAXPATHLEN + 1];
44256
44257 + pax_track_stack();
44258 +
44259 DPRINTK("ncp_mkdir: making %s/%s\n",
44260 dentry->d_parent->d_name.name, dentry->d_name.name);
44261
44262 @@ -1140,6 +1148,8 @@ static int ncp_rename(struct inode *old_
44263 int old_len, new_len;
44264 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
44265
44266 + pax_track_stack();
44267 +
44268 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
44269 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
44270 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
44271 diff -urNp linux-3.0.7/fs/ncpfs/inode.c linux-3.0.7/fs/ncpfs/inode.c
44272 --- linux-3.0.7/fs/ncpfs/inode.c 2011-07-21 22:17:23.000000000 -0400
44273 +++ linux-3.0.7/fs/ncpfs/inode.c 2011-08-23 21:48:14.000000000 -0400
44274 @@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
44275 #endif
44276 struct ncp_entry_info finfo;
44277
44278 + pax_track_stack();
44279 +
44280 memset(&data, 0, sizeof(data));
44281 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
44282 if (!server)
44283 diff -urNp linux-3.0.7/fs/nfs/inode.c linux-3.0.7/fs/nfs/inode.c
44284 --- linux-3.0.7/fs/nfs/inode.c 2011-07-21 22:17:23.000000000 -0400
44285 +++ linux-3.0.7/fs/nfs/inode.c 2011-08-23 21:47:56.000000000 -0400
44286 @@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
44287 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
44288 nfsi->attrtimeo_timestamp = jiffies;
44289
44290 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
44291 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
44292 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
44293 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
44294 else
44295 @@ -1000,16 +1000,16 @@ static int nfs_size_need_update(const st
44296 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
44297 }
44298
44299 -static atomic_long_t nfs_attr_generation_counter;
44300 +static atomic_long_unchecked_t nfs_attr_generation_counter;
44301
44302 static unsigned long nfs_read_attr_generation_counter(void)
44303 {
44304 - return atomic_long_read(&nfs_attr_generation_counter);
44305 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
44306 }
44307
44308 unsigned long nfs_inc_attr_generation_counter(void)
44309 {
44310 - return atomic_long_inc_return(&nfs_attr_generation_counter);
44311 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
44312 }
44313
44314 void nfs_fattr_init(struct nfs_fattr *fattr)
44315 diff -urNp linux-3.0.7/fs/nfsd/nfs4state.c linux-3.0.7/fs/nfsd/nfs4state.c
44316 --- linux-3.0.7/fs/nfsd/nfs4state.c 2011-09-02 18:11:21.000000000 -0400
44317 +++ linux-3.0.7/fs/nfsd/nfs4state.c 2011-08-23 21:48:14.000000000 -0400
44318 @@ -3794,6 +3794,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
44319 unsigned int strhashval;
44320 int err;
44321
44322 + pax_track_stack();
44323 +
44324 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
44325 (long long) lock->lk_offset,
44326 (long long) lock->lk_length);
44327 diff -urNp linux-3.0.7/fs/nfsd/nfs4xdr.c linux-3.0.7/fs/nfsd/nfs4xdr.c
44328 --- linux-3.0.7/fs/nfsd/nfs4xdr.c 2011-07-21 22:17:23.000000000 -0400
44329 +++ linux-3.0.7/fs/nfsd/nfs4xdr.c 2011-08-23 21:48:14.000000000 -0400
44330 @@ -1788,6 +1788,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
44331 .dentry = dentry,
44332 };
44333
44334 + pax_track_stack();
44335 +
44336 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
44337 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
44338 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
44339 diff -urNp linux-3.0.7/fs/nfsd/vfs.c linux-3.0.7/fs/nfsd/vfs.c
44340 --- linux-3.0.7/fs/nfsd/vfs.c 2011-07-21 22:17:23.000000000 -0400
44341 +++ linux-3.0.7/fs/nfsd/vfs.c 2011-10-06 04:17:55.000000000 -0400
44342 @@ -896,7 +896,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
44343 } else {
44344 oldfs = get_fs();
44345 set_fs(KERNEL_DS);
44346 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
44347 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
44348 set_fs(oldfs);
44349 }
44350
44351 @@ -1000,7 +1000,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
44352
44353 /* Write the data. */
44354 oldfs = get_fs(); set_fs(KERNEL_DS);
44355 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
44356 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
44357 set_fs(oldfs);
44358 if (host_err < 0)
44359 goto out_nfserr;
44360 @@ -1535,7 +1535,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
44361 */
44362
44363 oldfs = get_fs(); set_fs(KERNEL_DS);
44364 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
44365 + host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
44366 set_fs(oldfs);
44367
44368 if (host_err < 0)
44369 diff -urNp linux-3.0.7/fs/notify/fanotify/fanotify_user.c linux-3.0.7/fs/notify/fanotify/fanotify_user.c
44370 --- linux-3.0.7/fs/notify/fanotify/fanotify_user.c 2011-07-21 22:17:23.000000000 -0400
44371 +++ linux-3.0.7/fs/notify/fanotify/fanotify_user.c 2011-08-23 21:48:14.000000000 -0400
44372 @@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct
44373 goto out_close_fd;
44374
44375 ret = -EFAULT;
44376 - if (copy_to_user(buf, &fanotify_event_metadata,
44377 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
44378 + copy_to_user(buf, &fanotify_event_metadata,
44379 fanotify_event_metadata.event_len))
44380 goto out_kill_access_response;
44381
44382 diff -urNp linux-3.0.7/fs/notify/notification.c linux-3.0.7/fs/notify/notification.c
44383 --- linux-3.0.7/fs/notify/notification.c 2011-07-21 22:17:23.000000000 -0400
44384 +++ linux-3.0.7/fs/notify/notification.c 2011-08-23 21:47:56.000000000 -0400
44385 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
44386 * get set to 0 so it will never get 'freed'
44387 */
44388 static struct fsnotify_event *q_overflow_event;
44389 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44390 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44391
44392 /**
44393 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
44394 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
44395 */
44396 u32 fsnotify_get_cookie(void)
44397 {
44398 - return atomic_inc_return(&fsnotify_sync_cookie);
44399 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
44400 }
44401 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
44402
44403 diff -urNp linux-3.0.7/fs/ntfs/dir.c linux-3.0.7/fs/ntfs/dir.c
44404 --- linux-3.0.7/fs/ntfs/dir.c 2011-07-21 22:17:23.000000000 -0400
44405 +++ linux-3.0.7/fs/ntfs/dir.c 2011-08-23 21:47:56.000000000 -0400
44406 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
44407 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
44408 ~(s64)(ndir->itype.index.block_size - 1)));
44409 /* Bounds checks. */
44410 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44411 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44412 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
44413 "inode 0x%lx or driver bug.", vdir->i_ino);
44414 goto err_out;
44415 diff -urNp linux-3.0.7/fs/ntfs/file.c linux-3.0.7/fs/ntfs/file.c
44416 --- linux-3.0.7/fs/ntfs/file.c 2011-07-21 22:17:23.000000000 -0400
44417 +++ linux-3.0.7/fs/ntfs/file.c 2011-08-23 21:47:56.000000000 -0400
44418 @@ -2222,6 +2222,6 @@ const struct inode_operations ntfs_file_
44419 #endif /* NTFS_RW */
44420 };
44421
44422 -const struct file_operations ntfs_empty_file_ops = {};
44423 +const struct file_operations ntfs_empty_file_ops __read_only;
44424
44425 -const struct inode_operations ntfs_empty_inode_ops = {};
44426 +const struct inode_operations ntfs_empty_inode_ops __read_only;
44427 diff -urNp linux-3.0.7/fs/ocfs2/localalloc.c linux-3.0.7/fs/ocfs2/localalloc.c
44428 --- linux-3.0.7/fs/ocfs2/localalloc.c 2011-07-21 22:17:23.000000000 -0400
44429 +++ linux-3.0.7/fs/ocfs2/localalloc.c 2011-08-23 21:47:56.000000000 -0400
44430 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
44431 goto bail;
44432 }
44433
44434 - atomic_inc(&osb->alloc_stats.moves);
44435 + atomic_inc_unchecked(&osb->alloc_stats.moves);
44436
44437 bail:
44438 if (handle)
44439 diff -urNp linux-3.0.7/fs/ocfs2/namei.c linux-3.0.7/fs/ocfs2/namei.c
44440 --- linux-3.0.7/fs/ocfs2/namei.c 2011-07-21 22:17:23.000000000 -0400
44441 +++ linux-3.0.7/fs/ocfs2/namei.c 2011-08-23 21:48:14.000000000 -0400
44442 @@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
44443 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
44444 struct ocfs2_dir_lookup_result target_insert = { NULL, };
44445
44446 + pax_track_stack();
44447 +
44448 /* At some point it might be nice to break this function up a
44449 * bit. */
44450
44451 diff -urNp linux-3.0.7/fs/ocfs2/ocfs2.h linux-3.0.7/fs/ocfs2/ocfs2.h
44452 --- linux-3.0.7/fs/ocfs2/ocfs2.h 2011-07-21 22:17:23.000000000 -0400
44453 +++ linux-3.0.7/fs/ocfs2/ocfs2.h 2011-08-23 21:47:56.000000000 -0400
44454 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
44455
44456 struct ocfs2_alloc_stats
44457 {
44458 - atomic_t moves;
44459 - atomic_t local_data;
44460 - atomic_t bitmap_data;
44461 - atomic_t bg_allocs;
44462 - atomic_t bg_extends;
44463 + atomic_unchecked_t moves;
44464 + atomic_unchecked_t local_data;
44465 + atomic_unchecked_t bitmap_data;
44466 + atomic_unchecked_t bg_allocs;
44467 + atomic_unchecked_t bg_extends;
44468 };
44469
44470 enum ocfs2_local_alloc_state
44471 diff -urNp linux-3.0.7/fs/ocfs2/suballoc.c linux-3.0.7/fs/ocfs2/suballoc.c
44472 --- linux-3.0.7/fs/ocfs2/suballoc.c 2011-07-21 22:17:23.000000000 -0400
44473 +++ linux-3.0.7/fs/ocfs2/suballoc.c 2011-08-23 21:47:56.000000000 -0400
44474 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
44475 mlog_errno(status);
44476 goto bail;
44477 }
44478 - atomic_inc(&osb->alloc_stats.bg_extends);
44479 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
44480
44481 /* You should never ask for this much metadata */
44482 BUG_ON(bits_wanted >
44483 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
44484 mlog_errno(status);
44485 goto bail;
44486 }
44487 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44488 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44489
44490 *suballoc_loc = res.sr_bg_blkno;
44491 *suballoc_bit_start = res.sr_bit_offset;
44492 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
44493 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
44494 res->sr_bits);
44495
44496 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44497 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44498
44499 BUG_ON(res->sr_bits != 1);
44500
44501 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
44502 mlog_errno(status);
44503 goto bail;
44504 }
44505 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44506 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44507
44508 BUG_ON(res.sr_bits != 1);
44509
44510 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
44511 cluster_start,
44512 num_clusters);
44513 if (!status)
44514 - atomic_inc(&osb->alloc_stats.local_data);
44515 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
44516 } else {
44517 if (min_clusters > (osb->bitmap_cpg - 1)) {
44518 /* The only paths asking for contiguousness
44519 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
44520 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
44521 res.sr_bg_blkno,
44522 res.sr_bit_offset);
44523 - atomic_inc(&osb->alloc_stats.bitmap_data);
44524 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
44525 *num_clusters = res.sr_bits;
44526 }
44527 }
44528 diff -urNp linux-3.0.7/fs/ocfs2/super.c linux-3.0.7/fs/ocfs2/super.c
44529 --- linux-3.0.7/fs/ocfs2/super.c 2011-07-21 22:17:23.000000000 -0400
44530 +++ linux-3.0.7/fs/ocfs2/super.c 2011-08-23 21:47:56.000000000 -0400
44531 @@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
44532 "%10s => GlobalAllocs: %d LocalAllocs: %d "
44533 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
44534 "Stats",
44535 - atomic_read(&osb->alloc_stats.bitmap_data),
44536 - atomic_read(&osb->alloc_stats.local_data),
44537 - atomic_read(&osb->alloc_stats.bg_allocs),
44538 - atomic_read(&osb->alloc_stats.moves),
44539 - atomic_read(&osb->alloc_stats.bg_extends));
44540 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
44541 + atomic_read_unchecked(&osb->alloc_stats.local_data),
44542 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
44543 + atomic_read_unchecked(&osb->alloc_stats.moves),
44544 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
44545
44546 out += snprintf(buf + out, len - out,
44547 "%10s => State: %u Descriptor: %llu Size: %u bits "
44548 @@ -2112,11 +2112,11 @@ static int ocfs2_initialize_super(struct
44549 spin_lock_init(&osb->osb_xattr_lock);
44550 ocfs2_init_steal_slots(osb);
44551
44552 - atomic_set(&osb->alloc_stats.moves, 0);
44553 - atomic_set(&osb->alloc_stats.local_data, 0);
44554 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
44555 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
44556 - atomic_set(&osb->alloc_stats.bg_extends, 0);
44557 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
44558 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
44559 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
44560 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
44561 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
44562
44563 /* Copy the blockcheck stats from the superblock probe */
44564 osb->osb_ecc_stats = *stats;
44565 diff -urNp linux-3.0.7/fs/ocfs2/symlink.c linux-3.0.7/fs/ocfs2/symlink.c
44566 --- linux-3.0.7/fs/ocfs2/symlink.c 2011-07-21 22:17:23.000000000 -0400
44567 +++ linux-3.0.7/fs/ocfs2/symlink.c 2011-08-23 21:47:56.000000000 -0400
44568 @@ -142,7 +142,7 @@ bail:
44569
44570 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
44571 {
44572 - char *link = nd_get_link(nd);
44573 + const char *link = nd_get_link(nd);
44574 if (!IS_ERR(link))
44575 kfree(link);
44576 }
44577 diff -urNp linux-3.0.7/fs/open.c linux-3.0.7/fs/open.c
44578 --- linux-3.0.7/fs/open.c 2011-07-21 22:17:23.000000000 -0400
44579 +++ linux-3.0.7/fs/open.c 2011-09-14 09:16:46.000000000 -0400
44580 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
44581 error = locks_verify_truncate(inode, NULL, length);
44582 if (!error)
44583 error = security_path_truncate(&path);
44584 +
44585 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
44586 + error = -EACCES;
44587 +
44588 if (!error)
44589 error = do_truncate(path.dentry, length, 0, NULL);
44590
44591 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
44592 if (__mnt_is_readonly(path.mnt))
44593 res = -EROFS;
44594
44595 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
44596 + res = -EACCES;
44597 +
44598 out_path_release:
44599 path_put(&path);
44600 out:
44601 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
44602 if (error)
44603 goto dput_and_out;
44604
44605 + gr_log_chdir(path.dentry, path.mnt);
44606 +
44607 set_fs_pwd(current->fs, &path);
44608
44609 dput_and_out:
44610 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
44611 goto out_putf;
44612
44613 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
44614 +
44615 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
44616 + error = -EPERM;
44617 +
44618 + if (!error)
44619 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
44620 +
44621 if (!error)
44622 set_fs_pwd(current->fs, &file->f_path);
44623 out_putf:
44624 @@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __use
44625 if (error)
44626 goto dput_and_out;
44627
44628 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
44629 + goto dput_and_out;
44630 +
44631 set_fs_root(current->fs, &path);
44632 +
44633 + gr_handle_chroot_chdir(&path);
44634 +
44635 error = 0;
44636 dput_and_out:
44637 path_put(&path);
44638 @@ -466,12 +488,25 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
44639 err = mnt_want_write_file(file);
44640 if (err)
44641 goto out_putf;
44642 +
44643 mutex_lock(&inode->i_mutex);
44644 +
44645 + if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
44646 + err = -EACCES;
44647 + goto out_unlock;
44648 + }
44649 +
44650 err = security_path_chmod(dentry, file->f_vfsmnt, mode);
44651 if (err)
44652 goto out_unlock;
44653 if (mode == (mode_t) -1)
44654 mode = inode->i_mode;
44655 +
44656 + if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
44657 + err = -EACCES;
44658 + goto out_unlock;
44659 + }
44660 +
44661 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
44662 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
44663 err = notify_change(dentry, &newattrs);
44664 @@ -499,12 +534,25 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
44665 error = mnt_want_write(path.mnt);
44666 if (error)
44667 goto dput_and_out;
44668 +
44669 mutex_lock(&inode->i_mutex);
44670 +
44671 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
44672 + error = -EACCES;
44673 + goto out_unlock;
44674 + }
44675 +
44676 error = security_path_chmod(path.dentry, path.mnt, mode);
44677 if (error)
44678 goto out_unlock;
44679 if (mode == (mode_t) -1)
44680 mode = inode->i_mode;
44681 +
44682 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
44683 + error = -EACCES;
44684 + goto out_unlock;
44685 + }
44686 +
44687 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
44688 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
44689 error = notify_change(path.dentry, &newattrs);
44690 @@ -528,6 +576,9 @@ static int chown_common(struct path *pat
44691 int error;
44692 struct iattr newattrs;
44693
44694 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
44695 + return -EACCES;
44696 +
44697 newattrs.ia_valid = ATTR_CTIME;
44698 if (user != (uid_t) -1) {
44699 newattrs.ia_valid |= ATTR_UID;
44700 @@ -998,7 +1049,10 @@ long do_sys_open(int dfd, const char __u
44701 if (!IS_ERR(tmp)) {
44702 fd = get_unused_fd_flags(flags);
44703 if (fd >= 0) {
44704 - struct file *f = do_filp_open(dfd, tmp, &op, lookup);
44705 + struct file *f;
44706 + /* don't allow to be set by userland */
44707 + flags &= ~FMODE_GREXEC;
44708 + f = do_filp_open(dfd, tmp, &op, lookup);
44709 if (IS_ERR(f)) {
44710 put_unused_fd(fd);
44711 fd = PTR_ERR(f);
44712 diff -urNp linux-3.0.7/fs/partitions/ldm.c linux-3.0.7/fs/partitions/ldm.c
44713 --- linux-3.0.7/fs/partitions/ldm.c 2011-07-21 22:17:23.000000000 -0400
44714 +++ linux-3.0.7/fs/partitions/ldm.c 2011-08-23 21:48:14.000000000 -0400
44715 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
44716 ldm_error ("A VBLK claims to have %d parts.", num);
44717 return false;
44718 }
44719 +
44720 if (rec >= num) {
44721 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
44722 return false;
44723 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
44724 goto found;
44725 }
44726
44727 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
44728 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
44729 if (!f) {
44730 ldm_crit ("Out of memory.");
44731 return false;
44732 diff -urNp linux-3.0.7/fs/pipe.c linux-3.0.7/fs/pipe.c
44733 --- linux-3.0.7/fs/pipe.c 2011-07-21 22:17:23.000000000 -0400
44734 +++ linux-3.0.7/fs/pipe.c 2011-08-23 21:48:14.000000000 -0400
44735 @@ -420,9 +420,9 @@ redo:
44736 }
44737 if (bufs) /* More to do? */
44738 continue;
44739 - if (!pipe->writers)
44740 + if (!atomic_read(&pipe->writers))
44741 break;
44742 - if (!pipe->waiting_writers) {
44743 + if (!atomic_read(&pipe->waiting_writers)) {
44744 /* syscall merging: Usually we must not sleep
44745 * if O_NONBLOCK is set, or if we got some data.
44746 * But if a writer sleeps in kernel space, then
44747 @@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
44748 mutex_lock(&inode->i_mutex);
44749 pipe = inode->i_pipe;
44750
44751 - if (!pipe->readers) {
44752 + if (!atomic_read(&pipe->readers)) {
44753 send_sig(SIGPIPE, current, 0);
44754 ret = -EPIPE;
44755 goto out;
44756 @@ -530,7 +530,7 @@ redo1:
44757 for (;;) {
44758 int bufs;
44759
44760 - if (!pipe->readers) {
44761 + if (!atomic_read(&pipe->readers)) {
44762 send_sig(SIGPIPE, current, 0);
44763 if (!ret)
44764 ret = -EPIPE;
44765 @@ -616,9 +616,9 @@ redo2:
44766 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44767 do_wakeup = 0;
44768 }
44769 - pipe->waiting_writers++;
44770 + atomic_inc(&pipe->waiting_writers);
44771 pipe_wait(pipe);
44772 - pipe->waiting_writers--;
44773 + atomic_dec(&pipe->waiting_writers);
44774 }
44775 out:
44776 mutex_unlock(&inode->i_mutex);
44777 @@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
44778 mask = 0;
44779 if (filp->f_mode & FMODE_READ) {
44780 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
44781 - if (!pipe->writers && filp->f_version != pipe->w_counter)
44782 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
44783 mask |= POLLHUP;
44784 }
44785
44786 @@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
44787 * Most Unices do not set POLLERR for FIFOs but on Linux they
44788 * behave exactly like pipes for poll().
44789 */
44790 - if (!pipe->readers)
44791 + if (!atomic_read(&pipe->readers))
44792 mask |= POLLERR;
44793 }
44794
44795 @@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
44796
44797 mutex_lock(&inode->i_mutex);
44798 pipe = inode->i_pipe;
44799 - pipe->readers -= decr;
44800 - pipe->writers -= decw;
44801 + atomic_sub(decr, &pipe->readers);
44802 + atomic_sub(decw, &pipe->writers);
44803
44804 - if (!pipe->readers && !pipe->writers) {
44805 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
44806 free_pipe_info(inode);
44807 } else {
44808 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
44809 @@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
44810
44811 if (inode->i_pipe) {
44812 ret = 0;
44813 - inode->i_pipe->readers++;
44814 + atomic_inc(&inode->i_pipe->readers);
44815 }
44816
44817 mutex_unlock(&inode->i_mutex);
44818 @@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
44819
44820 if (inode->i_pipe) {
44821 ret = 0;
44822 - inode->i_pipe->writers++;
44823 + atomic_inc(&inode->i_pipe->writers);
44824 }
44825
44826 mutex_unlock(&inode->i_mutex);
44827 @@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
44828 if (inode->i_pipe) {
44829 ret = 0;
44830 if (filp->f_mode & FMODE_READ)
44831 - inode->i_pipe->readers++;
44832 + atomic_inc(&inode->i_pipe->readers);
44833 if (filp->f_mode & FMODE_WRITE)
44834 - inode->i_pipe->writers++;
44835 + atomic_inc(&inode->i_pipe->writers);
44836 }
44837
44838 mutex_unlock(&inode->i_mutex);
44839 @@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
44840 inode->i_pipe = NULL;
44841 }
44842
44843 -static struct vfsmount *pipe_mnt __read_mostly;
44844 +struct vfsmount *pipe_mnt __read_mostly;
44845
44846 /*
44847 * pipefs_dname() is called from d_path().
44848 @@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
44849 goto fail_iput;
44850 inode->i_pipe = pipe;
44851
44852 - pipe->readers = pipe->writers = 1;
44853 + atomic_set(&pipe->readers, 1);
44854 + atomic_set(&pipe->writers, 1);
44855 inode->i_fop = &rdwr_pipefifo_fops;
44856
44857 /*
44858 diff -urNp linux-3.0.7/fs/proc/array.c linux-3.0.7/fs/proc/array.c
44859 --- linux-3.0.7/fs/proc/array.c 2011-07-21 22:17:23.000000000 -0400
44860 +++ linux-3.0.7/fs/proc/array.c 2011-08-23 21:48:14.000000000 -0400
44861 @@ -60,6 +60,7 @@
44862 #include <linux/tty.h>
44863 #include <linux/string.h>
44864 #include <linux/mman.h>
44865 +#include <linux/grsecurity.h>
44866 #include <linux/proc_fs.h>
44867 #include <linux/ioport.h>
44868 #include <linux/uaccess.h>
44869 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
44870 seq_putc(m, '\n');
44871 }
44872
44873 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44874 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
44875 +{
44876 + if (p->mm)
44877 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
44878 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
44879 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
44880 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
44881 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
44882 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
44883 + else
44884 + seq_printf(m, "PaX:\t-----\n");
44885 +}
44886 +#endif
44887 +
44888 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
44889 struct pid *pid, struct task_struct *task)
44890 {
44891 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
44892 task_cpus_allowed(m, task);
44893 cpuset_task_status_allowed(m, task);
44894 task_context_switch_counts(m, task);
44895 +
44896 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44897 + task_pax(m, task);
44898 +#endif
44899 +
44900 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
44901 + task_grsec_rbac(m, task);
44902 +#endif
44903 +
44904 return 0;
44905 }
44906
44907 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44908 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
44909 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
44910 + _mm->pax_flags & MF_PAX_SEGMEXEC))
44911 +#endif
44912 +
44913 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
44914 struct pid *pid, struct task_struct *task, int whole)
44915 {
44916 @@ -375,9 +406,11 @@ static int do_task_stat(struct seq_file
44917 cputime_t cutime, cstime, utime, stime;
44918 cputime_t cgtime, gtime;
44919 unsigned long rsslim = 0;
44920 - char tcomm[sizeof(task->comm)];
44921 + char tcomm[sizeof(task->comm)] = { 0 };
44922 unsigned long flags;
44923
44924 + pax_track_stack();
44925 +
44926 state = *get_task_state(task);
44927 vsize = eip = esp = 0;
44928 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
44929 @@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
44930 gtime = task->gtime;
44931 }
44932
44933 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44934 + if (PAX_RAND_FLAGS(mm)) {
44935 + eip = 0;
44936 + esp = 0;
44937 + wchan = 0;
44938 + }
44939 +#endif
44940 +#ifdef CONFIG_GRKERNSEC_HIDESYM
44941 + wchan = 0;
44942 + eip =0;
44943 + esp =0;
44944 +#endif
44945 +
44946 /* scale priority and nice values from timeslices to -20..20 */
44947 /* to make it look like a "normal" Unix priority/nice value */
44948 priority = task_prio(task);
44949 @@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
44950 vsize,
44951 mm ? get_mm_rss(mm) : 0,
44952 rsslim,
44953 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44954 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
44955 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
44956 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
44957 +#else
44958 mm ? (permitted ? mm->start_code : 1) : 0,
44959 mm ? (permitted ? mm->end_code : 1) : 0,
44960 (permitted && mm) ? mm->start_stack : 0,
44961 +#endif
44962 esp,
44963 eip,
44964 /* The signal information here is obsolete.
44965 @@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
44966
44967 return 0;
44968 }
44969 +
44970 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
44971 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
44972 +{
44973 + u32 curr_ip = 0;
44974 + unsigned long flags;
44975 +
44976 + if (lock_task_sighand(task, &flags)) {
44977 + curr_ip = task->signal->curr_ip;
44978 + unlock_task_sighand(task, &flags);
44979 + }
44980 +
44981 + return sprintf(buffer, "%pI4\n", &curr_ip);
44982 +}
44983 +#endif
44984 diff -urNp linux-3.0.7/fs/proc/base.c linux-3.0.7/fs/proc/base.c
44985 --- linux-3.0.7/fs/proc/base.c 2011-09-02 18:11:21.000000000 -0400
44986 +++ linux-3.0.7/fs/proc/base.c 2011-10-17 06:38:10.000000000 -0400
44987 @@ -107,6 +107,22 @@ struct pid_entry {
44988 union proc_op op;
44989 };
44990
44991 +struct getdents_callback {
44992 + struct linux_dirent __user * current_dir;
44993 + struct linux_dirent __user * previous;
44994 + struct file * file;
44995 + int count;
44996 + int error;
44997 +};
44998 +
44999 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
45000 + loff_t offset, u64 ino, unsigned int d_type)
45001 +{
45002 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
45003 + buf->error = -EINVAL;
45004 + return 0;
45005 +}
45006 +
45007 #define NOD(NAME, MODE, IOP, FOP, OP) { \
45008 .name = (NAME), \
45009 .len = sizeof(NAME) - 1, \
45010 @@ -209,6 +225,9 @@ static struct mm_struct *__check_mem_per
45011 if (task == current)
45012 return mm;
45013
45014 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
45015 + return ERR_PTR(-EPERM);
45016 +
45017 /*
45018 * If current is actively ptrace'ing, and would also be
45019 * permitted to freshly attach with ptrace now, permit it.
45020 @@ -282,6 +301,9 @@ static int proc_pid_cmdline(struct task_
45021 if (!mm->arg_end)
45022 goto out_mm; /* Shh! No looking before we're done */
45023
45024 + if (gr_acl_handle_procpidmem(task))
45025 + goto out_mm;
45026 +
45027 len = mm->arg_end - mm->arg_start;
45028
45029 if (len > PAGE_SIZE)
45030 @@ -309,12 +331,28 @@ out:
45031 return res;
45032 }
45033
45034 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45035 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45036 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
45037 + _mm->pax_flags & MF_PAX_SEGMEXEC))
45038 +#endif
45039 +
45040 static int proc_pid_auxv(struct task_struct *task, char *buffer)
45041 {
45042 struct mm_struct *mm = mm_for_maps(task);
45043 int res = PTR_ERR(mm);
45044 if (mm && !IS_ERR(mm)) {
45045 unsigned int nwords = 0;
45046 +
45047 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45048 + /* allow if we're currently ptracing this task */
45049 + if (PAX_RAND_FLAGS(mm) &&
45050 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
45051 + mmput(mm);
45052 + return 0;
45053 + }
45054 +#endif
45055 +
45056 do {
45057 nwords += 2;
45058 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
45059 @@ -328,7 +366,7 @@ static int proc_pid_auxv(struct task_str
45060 }
45061
45062
45063 -#ifdef CONFIG_KALLSYMS
45064 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45065 /*
45066 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
45067 * Returns the resolved symbol. If that fails, simply return the address.
45068 @@ -367,7 +405,7 @@ static void unlock_trace(struct task_str
45069 mutex_unlock(&task->signal->cred_guard_mutex);
45070 }
45071
45072 -#ifdef CONFIG_STACKTRACE
45073 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45074
45075 #define MAX_STACK_TRACE_DEPTH 64
45076
45077 @@ -558,7 +596,7 @@ static int proc_pid_limits(struct task_s
45078 return count;
45079 }
45080
45081 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45082 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45083 static int proc_pid_syscall(struct task_struct *task, char *buffer)
45084 {
45085 long nr;
45086 @@ -587,7 +625,7 @@ static int proc_pid_syscall(struct task_
45087 /************************************************************************/
45088
45089 /* permission checks */
45090 -static int proc_fd_access_allowed(struct inode *inode)
45091 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
45092 {
45093 struct task_struct *task;
45094 int allowed = 0;
45095 @@ -597,7 +635,10 @@ static int proc_fd_access_allowed(struct
45096 */
45097 task = get_proc_task(inode);
45098 if (task) {
45099 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45100 + if (log)
45101 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
45102 + else
45103 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45104 put_task_struct(task);
45105 }
45106 return allowed;
45107 @@ -978,6 +1019,9 @@ static ssize_t environ_read(struct file
45108 if (!task)
45109 goto out_no_task;
45110
45111 + if (gr_acl_handle_procpidmem(task))
45112 + goto out;
45113 +
45114 ret = -ENOMEM;
45115 page = (char *)__get_free_page(GFP_TEMPORARY);
45116 if (!page)
45117 @@ -1614,7 +1658,7 @@ static void *proc_pid_follow_link(struct
45118 path_put(&nd->path);
45119
45120 /* Are we allowed to snoop on the tasks file descriptors? */
45121 - if (!proc_fd_access_allowed(inode))
45122 + if (!proc_fd_access_allowed(inode,0))
45123 goto out;
45124
45125 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
45126 @@ -1653,8 +1697,18 @@ static int proc_pid_readlink(struct dent
45127 struct path path;
45128
45129 /* Are we allowed to snoop on the tasks file descriptors? */
45130 - if (!proc_fd_access_allowed(inode))
45131 - goto out;
45132 + /* logging this is needed for learning on chromium to work properly,
45133 + but we don't want to flood the logs from 'ps' which does a readlink
45134 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
45135 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
45136 + */
45137 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
45138 + if (!proc_fd_access_allowed(inode,0))
45139 + goto out;
45140 + } else {
45141 + if (!proc_fd_access_allowed(inode,1))
45142 + goto out;
45143 + }
45144
45145 error = PROC_I(inode)->op.proc_get_link(inode, &path);
45146 if (error)
45147 @@ -1719,7 +1773,11 @@ struct inode *proc_pid_make_inode(struct
45148 rcu_read_lock();
45149 cred = __task_cred(task);
45150 inode->i_uid = cred->euid;
45151 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45152 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45153 +#else
45154 inode->i_gid = cred->egid;
45155 +#endif
45156 rcu_read_unlock();
45157 }
45158 security_task_to_inode(task, inode);
45159 @@ -1737,6 +1795,9 @@ int pid_getattr(struct vfsmount *mnt, st
45160 struct inode *inode = dentry->d_inode;
45161 struct task_struct *task;
45162 const struct cred *cred;
45163 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45164 + const struct cred *tmpcred = current_cred();
45165 +#endif
45166
45167 generic_fillattr(inode, stat);
45168
45169 @@ -1744,13 +1805,41 @@ int pid_getattr(struct vfsmount *mnt, st
45170 stat->uid = 0;
45171 stat->gid = 0;
45172 task = pid_task(proc_pid(inode), PIDTYPE_PID);
45173 +
45174 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
45175 + rcu_read_unlock();
45176 + return -ENOENT;
45177 + }
45178 +
45179 if (task) {
45180 + cred = __task_cred(task);
45181 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45182 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
45183 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45184 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45185 +#endif
45186 + ) {
45187 +#endif
45188 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45189 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45190 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45191 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45192 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45193 +#endif
45194 task_dumpable(task)) {
45195 - cred = __task_cred(task);
45196 stat->uid = cred->euid;
45197 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45198 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
45199 +#else
45200 stat->gid = cred->egid;
45201 +#endif
45202 + }
45203 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45204 + } else {
45205 + rcu_read_unlock();
45206 + return -ENOENT;
45207 }
45208 +#endif
45209 }
45210 rcu_read_unlock();
45211 return 0;
45212 @@ -1787,11 +1876,20 @@ int pid_revalidate(struct dentry *dentry
45213
45214 if (task) {
45215 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45216 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45217 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45218 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45219 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45220 +#endif
45221 task_dumpable(task)) {
45222 rcu_read_lock();
45223 cred = __task_cred(task);
45224 inode->i_uid = cred->euid;
45225 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45226 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45227 +#else
45228 inode->i_gid = cred->egid;
45229 +#endif
45230 rcu_read_unlock();
45231 } else {
45232 inode->i_uid = 0;
45233 @@ -1909,7 +2007,8 @@ static int proc_fd_info(struct inode *in
45234 int fd = proc_fd(inode);
45235
45236 if (task) {
45237 - files = get_files_struct(task);
45238 + if (!gr_acl_handle_procpidmem(task))
45239 + files = get_files_struct(task);
45240 put_task_struct(task);
45241 }
45242 if (files) {
45243 @@ -2169,11 +2268,21 @@ static const struct file_operations proc
45244 */
45245 static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
45246 {
45247 + struct task_struct *task;
45248 int rv = generic_permission(inode, mask, flags, NULL);
45249 - if (rv == 0)
45250 - return 0;
45251 +
45252 if (task_pid(current) == proc_pid(inode))
45253 rv = 0;
45254 +
45255 + task = get_proc_task(inode);
45256 + if (task == NULL)
45257 + return rv;
45258 +
45259 + if (gr_acl_handle_procpidmem(task))
45260 + rv = -EACCES;
45261 +
45262 + put_task_struct(task);
45263 +
45264 return rv;
45265 }
45266
45267 @@ -2283,6 +2392,9 @@ static struct dentry *proc_pident_lookup
45268 if (!task)
45269 goto out_no_task;
45270
45271 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45272 + goto out;
45273 +
45274 /*
45275 * Yes, it does not scale. And it should not. Don't add
45276 * new entries into /proc/<tgid>/ without very good reasons.
45277 @@ -2327,6 +2439,9 @@ static int proc_pident_readdir(struct fi
45278 if (!task)
45279 goto out_no_task;
45280
45281 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45282 + goto out;
45283 +
45284 ret = 0;
45285 i = filp->f_pos;
45286 switch (i) {
45287 @@ -2597,7 +2712,7 @@ static void *proc_self_follow_link(struc
45288 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
45289 void *cookie)
45290 {
45291 - char *s = nd_get_link(nd);
45292 + const char *s = nd_get_link(nd);
45293 if (!IS_ERR(s))
45294 __putname(s);
45295 }
45296 @@ -2656,6 +2771,9 @@ static struct dentry *proc_base_instanti
45297 if (p->fop)
45298 inode->i_fop = p->fop;
45299 ei->op = p->op;
45300 +
45301 + gr_handle_proc_create(dentry, inode);
45302 +
45303 d_add(dentry, inode);
45304 error = NULL;
45305 out:
45306 @@ -2795,7 +2913,7 @@ static const struct pid_entry tgid_base_
45307 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
45308 #endif
45309 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
45310 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45311 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45312 INF("syscall", S_IRUGO, proc_pid_syscall),
45313 #endif
45314 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45315 @@ -2820,10 +2938,10 @@ static const struct pid_entry tgid_base_
45316 #ifdef CONFIG_SECURITY
45317 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45318 #endif
45319 -#ifdef CONFIG_KALLSYMS
45320 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45321 INF("wchan", S_IRUGO, proc_pid_wchan),
45322 #endif
45323 -#ifdef CONFIG_STACKTRACE
45324 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45325 ONE("stack", S_IRUGO, proc_pid_stack),
45326 #endif
45327 #ifdef CONFIG_SCHEDSTATS
45328 @@ -2857,6 +2975,9 @@ static const struct pid_entry tgid_base_
45329 #ifdef CONFIG_HARDWALL
45330 INF("hardwall", S_IRUGO, proc_pid_hardwall),
45331 #endif
45332 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45333 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
45334 +#endif
45335 };
45336
45337 static int proc_tgid_base_readdir(struct file * filp,
45338 @@ -2982,7 +3103,14 @@ static struct dentry *proc_pid_instantia
45339 if (!inode)
45340 goto out;
45341
45342 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45343 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
45344 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45345 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45346 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
45347 +#else
45348 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
45349 +#endif
45350 inode->i_op = &proc_tgid_base_inode_operations;
45351 inode->i_fop = &proc_tgid_base_operations;
45352 inode->i_flags|=S_IMMUTABLE;
45353 @@ -3024,7 +3152,14 @@ struct dentry *proc_pid_lookup(struct in
45354 if (!task)
45355 goto out;
45356
45357 + if (!has_group_leader_pid(task))
45358 + goto out_put_task;
45359 +
45360 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45361 + goto out_put_task;
45362 +
45363 result = proc_pid_instantiate(dir, dentry, task, NULL);
45364 +out_put_task:
45365 put_task_struct(task);
45366 out:
45367 return result;
45368 @@ -3089,6 +3224,11 @@ int proc_pid_readdir(struct file * filp,
45369 {
45370 unsigned int nr;
45371 struct task_struct *reaper;
45372 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45373 + const struct cred *tmpcred = current_cred();
45374 + const struct cred *itercred;
45375 +#endif
45376 + filldir_t __filldir = filldir;
45377 struct tgid_iter iter;
45378 struct pid_namespace *ns;
45379
45380 @@ -3112,8 +3252,27 @@ int proc_pid_readdir(struct file * filp,
45381 for (iter = next_tgid(ns, iter);
45382 iter.task;
45383 iter.tgid += 1, iter = next_tgid(ns, iter)) {
45384 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45385 + rcu_read_lock();
45386 + itercred = __task_cred(iter.task);
45387 +#endif
45388 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
45389 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45390 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
45391 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45392 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45393 +#endif
45394 + )
45395 +#endif
45396 + )
45397 + __filldir = &gr_fake_filldir;
45398 + else
45399 + __filldir = filldir;
45400 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45401 + rcu_read_unlock();
45402 +#endif
45403 filp->f_pos = iter.tgid + TGID_OFFSET;
45404 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
45405 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
45406 put_task_struct(iter.task);
45407 goto out;
45408 }
45409 @@ -3141,7 +3300,7 @@ static const struct pid_entry tid_base_s
45410 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45411 #endif
45412 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
45413 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45414 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45415 INF("syscall", S_IRUGO, proc_pid_syscall),
45416 #endif
45417 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45418 @@ -3165,10 +3324,10 @@ static const struct pid_entry tid_base_s
45419 #ifdef CONFIG_SECURITY
45420 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45421 #endif
45422 -#ifdef CONFIG_KALLSYMS
45423 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45424 INF("wchan", S_IRUGO, proc_pid_wchan),
45425 #endif
45426 -#ifdef CONFIG_STACKTRACE
45427 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45428 ONE("stack", S_IRUGO, proc_pid_stack),
45429 #endif
45430 #ifdef CONFIG_SCHEDSTATS
45431 diff -urNp linux-3.0.7/fs/proc/cmdline.c linux-3.0.7/fs/proc/cmdline.c
45432 --- linux-3.0.7/fs/proc/cmdline.c 2011-07-21 22:17:23.000000000 -0400
45433 +++ linux-3.0.7/fs/proc/cmdline.c 2011-08-23 21:48:14.000000000 -0400
45434 @@ -23,7 +23,11 @@ static const struct file_operations cmdl
45435
45436 static int __init proc_cmdline_init(void)
45437 {
45438 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
45439 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
45440 +#else
45441 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
45442 +#endif
45443 return 0;
45444 }
45445 module_init(proc_cmdline_init);
45446 diff -urNp linux-3.0.7/fs/proc/devices.c linux-3.0.7/fs/proc/devices.c
45447 --- linux-3.0.7/fs/proc/devices.c 2011-07-21 22:17:23.000000000 -0400
45448 +++ linux-3.0.7/fs/proc/devices.c 2011-08-23 21:48:14.000000000 -0400
45449 @@ -64,7 +64,11 @@ static const struct file_operations proc
45450
45451 static int __init proc_devices_init(void)
45452 {
45453 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
45454 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
45455 +#else
45456 proc_create("devices", 0, NULL, &proc_devinfo_operations);
45457 +#endif
45458 return 0;
45459 }
45460 module_init(proc_devices_init);
45461 diff -urNp linux-3.0.7/fs/proc/inode.c linux-3.0.7/fs/proc/inode.c
45462 --- linux-3.0.7/fs/proc/inode.c 2011-07-21 22:17:23.000000000 -0400
45463 +++ linux-3.0.7/fs/proc/inode.c 2011-10-17 06:56:02.000000000 -0400
45464 @@ -18,6 +18,7 @@
45465 #include <linux/module.h>
45466 #include <linux/sysctl.h>
45467 #include <linux/slab.h>
45468 +#include <linux/grsecurity.h>
45469
45470 #include <asm/system.h>
45471 #include <asm/uaccess.h>
45472 @@ -102,10 +103,16 @@ void __init proc_init_inodecache(void)
45473 init_once);
45474 }
45475
45476 +static int proc_drop_inode(struct inode *inode)
45477 +{
45478 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
45479 + return generic_delete_inode(inode);
45480 +}
45481 +
45482 static const struct super_operations proc_sops = {
45483 .alloc_inode = proc_alloc_inode,
45484 .destroy_inode = proc_destroy_inode,
45485 - .drop_inode = generic_delete_inode,
45486 + .drop_inode = proc_drop_inode,
45487 .evict_inode = proc_evict_inode,
45488 .statfs = simple_statfs,
45489 };
45490 @@ -440,7 +447,11 @@ struct inode *proc_get_inode(struct supe
45491 if (de->mode) {
45492 inode->i_mode = de->mode;
45493 inode->i_uid = de->uid;
45494 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45495 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45496 +#else
45497 inode->i_gid = de->gid;
45498 +#endif
45499 }
45500 if (de->size)
45501 inode->i_size = de->size;
45502 diff -urNp linux-3.0.7/fs/proc/internal.h linux-3.0.7/fs/proc/internal.h
45503 --- linux-3.0.7/fs/proc/internal.h 2011-07-21 22:17:23.000000000 -0400
45504 +++ linux-3.0.7/fs/proc/internal.h 2011-08-23 21:48:14.000000000 -0400
45505 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
45506 struct pid *pid, struct task_struct *task);
45507 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45508 struct pid *pid, struct task_struct *task);
45509 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45510 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
45511 +#endif
45512 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
45513
45514 extern const struct file_operations proc_maps_operations;
45515 diff -urNp linux-3.0.7/fs/proc/Kconfig linux-3.0.7/fs/proc/Kconfig
45516 --- linux-3.0.7/fs/proc/Kconfig 2011-07-21 22:17:23.000000000 -0400
45517 +++ linux-3.0.7/fs/proc/Kconfig 2011-08-23 21:48:14.000000000 -0400
45518 @@ -30,12 +30,12 @@ config PROC_FS
45519
45520 config PROC_KCORE
45521 bool "/proc/kcore support" if !ARM
45522 - depends on PROC_FS && MMU
45523 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45524
45525 config PROC_VMCORE
45526 bool "/proc/vmcore support"
45527 - depends on PROC_FS && CRASH_DUMP
45528 - default y
45529 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45530 + default n
45531 help
45532 Exports the dump image of crashed kernel in ELF format.
45533
45534 @@ -59,8 +59,8 @@ config PROC_SYSCTL
45535 limited in memory.
45536
45537 config PROC_PAGE_MONITOR
45538 - default y
45539 - depends on PROC_FS && MMU
45540 + default n
45541 + depends on PROC_FS && MMU && !GRKERNSEC
45542 bool "Enable /proc page monitoring" if EXPERT
45543 help
45544 Various /proc files exist to monitor process memory utilization:
45545 diff -urNp linux-3.0.7/fs/proc/kcore.c linux-3.0.7/fs/proc/kcore.c
45546 --- linux-3.0.7/fs/proc/kcore.c 2011-07-21 22:17:23.000000000 -0400
45547 +++ linux-3.0.7/fs/proc/kcore.c 2011-08-23 21:48:14.000000000 -0400
45548 @@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
45549 off_t offset = 0;
45550 struct kcore_list *m;
45551
45552 + pax_track_stack();
45553 +
45554 /* setup ELF header */
45555 elf = (struct elfhdr *) bufp;
45556 bufp += sizeof(struct elfhdr);
45557 @@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
45558 * the addresses in the elf_phdr on our list.
45559 */
45560 start = kc_offset_to_vaddr(*fpos - elf_buflen);
45561 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
45562 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
45563 + if (tsz > buflen)
45564 tsz = buflen;
45565 -
45566 +
45567 while (buflen) {
45568 struct kcore_list *m;
45569
45570 @@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
45571 kfree(elf_buf);
45572 } else {
45573 if (kern_addr_valid(start)) {
45574 - unsigned long n;
45575 + char *elf_buf;
45576 + mm_segment_t oldfs;
45577
45578 - n = copy_to_user(buffer, (char *)start, tsz);
45579 - /*
45580 - * We cannot distingush between fault on source
45581 - * and fault on destination. When this happens
45582 - * we clear too and hope it will trigger the
45583 - * EFAULT again.
45584 - */
45585 - if (n) {
45586 - if (clear_user(buffer + tsz - n,
45587 - n))
45588 + elf_buf = kmalloc(tsz, GFP_KERNEL);
45589 + if (!elf_buf)
45590 + return -ENOMEM;
45591 + oldfs = get_fs();
45592 + set_fs(KERNEL_DS);
45593 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
45594 + set_fs(oldfs);
45595 + if (copy_to_user(buffer, elf_buf, tsz)) {
45596 + kfree(elf_buf);
45597 return -EFAULT;
45598 + }
45599 }
45600 + set_fs(oldfs);
45601 + kfree(elf_buf);
45602 } else {
45603 if (clear_user(buffer, tsz))
45604 return -EFAULT;
45605 @@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
45606
45607 static int open_kcore(struct inode *inode, struct file *filp)
45608 {
45609 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
45610 + return -EPERM;
45611 +#endif
45612 if (!capable(CAP_SYS_RAWIO))
45613 return -EPERM;
45614 if (kcore_need_update)
45615 diff -urNp linux-3.0.7/fs/proc/meminfo.c linux-3.0.7/fs/proc/meminfo.c
45616 --- linux-3.0.7/fs/proc/meminfo.c 2011-07-21 22:17:23.000000000 -0400
45617 +++ linux-3.0.7/fs/proc/meminfo.c 2011-08-23 21:48:14.000000000 -0400
45618 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
45619 unsigned long pages[NR_LRU_LISTS];
45620 int lru;
45621
45622 + pax_track_stack();
45623 +
45624 /*
45625 * display in kilobytes.
45626 */
45627 @@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
45628 vmi.used >> 10,
45629 vmi.largest_chunk >> 10
45630 #ifdef CONFIG_MEMORY_FAILURE
45631 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
45632 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
45633 #endif
45634 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
45635 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
45636 diff -urNp linux-3.0.7/fs/proc/nommu.c linux-3.0.7/fs/proc/nommu.c
45637 --- linux-3.0.7/fs/proc/nommu.c 2011-07-21 22:17:23.000000000 -0400
45638 +++ linux-3.0.7/fs/proc/nommu.c 2011-08-23 21:47:56.000000000 -0400
45639 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
45640 if (len < 1)
45641 len = 1;
45642 seq_printf(m, "%*c", len, ' ');
45643 - seq_path(m, &file->f_path, "");
45644 + seq_path(m, &file->f_path, "\n\\");
45645 }
45646
45647 seq_putc(m, '\n');
45648 diff -urNp linux-3.0.7/fs/proc/proc_net.c linux-3.0.7/fs/proc/proc_net.c
45649 --- linux-3.0.7/fs/proc/proc_net.c 2011-07-21 22:17:23.000000000 -0400
45650 +++ linux-3.0.7/fs/proc/proc_net.c 2011-08-23 21:48:14.000000000 -0400
45651 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
45652 struct task_struct *task;
45653 struct nsproxy *ns;
45654 struct net *net = NULL;
45655 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45656 + const struct cred *cred = current_cred();
45657 +#endif
45658 +
45659 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45660 + if (cred->fsuid)
45661 + return net;
45662 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45663 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
45664 + return net;
45665 +#endif
45666
45667 rcu_read_lock();
45668 task = pid_task(proc_pid(dir), PIDTYPE_PID);
45669 diff -urNp linux-3.0.7/fs/proc/proc_sysctl.c linux-3.0.7/fs/proc/proc_sysctl.c
45670 --- linux-3.0.7/fs/proc/proc_sysctl.c 2011-07-21 22:17:23.000000000 -0400
45671 +++ linux-3.0.7/fs/proc/proc_sysctl.c 2011-10-17 06:39:12.000000000 -0400
45672 @@ -8,6 +8,8 @@
45673 #include <linux/namei.h>
45674 #include "internal.h"
45675
45676 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
45677 +
45678 static const struct dentry_operations proc_sys_dentry_operations;
45679 static const struct file_operations proc_sys_file_operations;
45680 static const struct inode_operations proc_sys_inode_operations;
45681 @@ -111,6 +113,9 @@ static struct dentry *proc_sys_lookup(st
45682 if (!p)
45683 goto out;
45684
45685 + if (gr_handle_sysctl(p, MAY_EXEC))
45686 + goto out;
45687 +
45688 err = ERR_PTR(-ENOMEM);
45689 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
45690 if (h)
45691 @@ -121,6 +126,9 @@ static struct dentry *proc_sys_lookup(st
45692
45693 err = NULL;
45694 d_set_d_op(dentry, &proc_sys_dentry_operations);
45695 +
45696 + gr_handle_proc_create(dentry, inode);
45697 +
45698 d_add(dentry, inode);
45699
45700 out:
45701 @@ -202,6 +210,9 @@ static int proc_sys_fill_cache(struct fi
45702 return -ENOMEM;
45703 } else {
45704 d_set_d_op(child, &proc_sys_dentry_operations);
45705 +
45706 + gr_handle_proc_create(child, inode);
45707 +
45708 d_add(child, inode);
45709 }
45710 } else {
45711 @@ -230,6 +241,9 @@ static int scan(struct ctl_table_header
45712 if (*pos < file->f_pos)
45713 continue;
45714
45715 + if (gr_handle_sysctl(table, 0))
45716 + continue;
45717 +
45718 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
45719 if (res)
45720 return res;
45721 @@ -355,6 +369,9 @@ static int proc_sys_getattr(struct vfsmo
45722 if (IS_ERR(head))
45723 return PTR_ERR(head);
45724
45725 + if (table && gr_handle_sysctl(table, MAY_EXEC))
45726 + return -ENOENT;
45727 +
45728 generic_fillattr(inode, stat);
45729 if (table)
45730 stat->mode = (stat->mode & S_IFMT) | table->mode;
45731 diff -urNp linux-3.0.7/fs/proc/root.c linux-3.0.7/fs/proc/root.c
45732 --- linux-3.0.7/fs/proc/root.c 2011-07-21 22:17:23.000000000 -0400
45733 +++ linux-3.0.7/fs/proc/root.c 2011-08-23 21:48:14.000000000 -0400
45734 @@ -123,7 +123,15 @@ void __init proc_root_init(void)
45735 #ifdef CONFIG_PROC_DEVICETREE
45736 proc_device_tree_init();
45737 #endif
45738 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
45739 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45740 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
45741 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45742 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
45743 +#endif
45744 +#else
45745 proc_mkdir("bus", NULL);
45746 +#endif
45747 proc_sys_init();
45748 }
45749
45750 diff -urNp linux-3.0.7/fs/proc/task_mmu.c linux-3.0.7/fs/proc/task_mmu.c
45751 --- linux-3.0.7/fs/proc/task_mmu.c 2011-10-16 21:54:54.000000000 -0400
45752 +++ linux-3.0.7/fs/proc/task_mmu.c 2011-10-16 21:55:28.000000000 -0400
45753 @@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
45754 "VmExe:\t%8lu kB\n"
45755 "VmLib:\t%8lu kB\n"
45756 "VmPTE:\t%8lu kB\n"
45757 - "VmSwap:\t%8lu kB\n",
45758 - hiwater_vm << (PAGE_SHIFT-10),
45759 + "VmSwap:\t%8lu kB\n"
45760 +
45761 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
45762 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
45763 +#endif
45764 +
45765 + ,hiwater_vm << (PAGE_SHIFT-10),
45766 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
45767 mm->locked_vm << (PAGE_SHIFT-10),
45768 hiwater_rss << (PAGE_SHIFT-10),
45769 @@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
45770 data << (PAGE_SHIFT-10),
45771 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
45772 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
45773 - swap << (PAGE_SHIFT-10));
45774 + swap << (PAGE_SHIFT-10)
45775 +
45776 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
45777 + , mm->context.user_cs_base, mm->context.user_cs_limit
45778 +#endif
45779 +
45780 + );
45781 }
45782
45783 unsigned long task_vsize(struct mm_struct *mm)
45784 @@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
45785 return ret;
45786 }
45787
45788 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45789 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45790 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
45791 + _mm->pax_flags & MF_PAX_SEGMEXEC))
45792 +#endif
45793 +
45794 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
45795 {
45796 struct mm_struct *mm = vma->vm_mm;
45797 @@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
45798 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
45799 }
45800
45801 - /* We don't show the stack guard page in /proc/maps */
45802 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45803 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
45804 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
45805 +#else
45806 start = vma->vm_start;
45807 - if (stack_guard_page_start(vma, start))
45808 - start += PAGE_SIZE;
45809 end = vma->vm_end;
45810 - if (stack_guard_page_end(vma, end))
45811 - end -= PAGE_SIZE;
45812 +#endif
45813
45814 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
45815 start,
45816 @@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
45817 flags & VM_WRITE ? 'w' : '-',
45818 flags & VM_EXEC ? 'x' : '-',
45819 flags & VM_MAYSHARE ? 's' : 'p',
45820 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45821 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
45822 +#else
45823 pgoff,
45824 +#endif
45825 MAJOR(dev), MINOR(dev), ino, &len);
45826
45827 /*
45828 @@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
45829 */
45830 if (file) {
45831 pad_len_spaces(m, len);
45832 - seq_path(m, &file->f_path, "\n");
45833 + seq_path(m, &file->f_path, "\n\\");
45834 } else {
45835 const char *name = arch_vma_name(vma);
45836 if (!name) {
45837 @@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
45838 if (vma->vm_start <= mm->brk &&
45839 vma->vm_end >= mm->start_brk) {
45840 name = "[heap]";
45841 - } else if (vma->vm_start <= mm->start_stack &&
45842 - vma->vm_end >= mm->start_stack) {
45843 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
45844 + (vma->vm_start <= mm->start_stack &&
45845 + vma->vm_end >= mm->start_stack)) {
45846 name = "[stack]";
45847 }
45848 } else {
45849 @@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
45850 };
45851
45852 memset(&mss, 0, sizeof mss);
45853 - mss.vma = vma;
45854 - /* mmap_sem is held in m_start */
45855 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
45856 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
45857 -
45858 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45859 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
45860 +#endif
45861 + mss.vma = vma;
45862 + /* mmap_sem is held in m_start */
45863 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
45864 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
45865 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45866 + }
45867 +#endif
45868 show_map_vma(m, vma);
45869
45870 seq_printf(m,
45871 @@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
45872 "KernelPageSize: %8lu kB\n"
45873 "MMUPageSize: %8lu kB\n"
45874 "Locked: %8lu kB\n",
45875 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45876 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
45877 +#else
45878 (vma->vm_end - vma->vm_start) >> 10,
45879 +#endif
45880 mss.resident >> 10,
45881 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
45882 mss.shared_clean >> 10,
45883 @@ -1031,7 +1062,7 @@ static int show_numa_map(struct seq_file
45884
45885 if (file) {
45886 seq_printf(m, " file=");
45887 - seq_path(m, &file->f_path, "\n\t= ");
45888 + seq_path(m, &file->f_path, "\n\t\\= ");
45889 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
45890 seq_printf(m, " heap");
45891 } else if (vma->vm_start <= mm->start_stack &&
45892 diff -urNp linux-3.0.7/fs/proc/task_nommu.c linux-3.0.7/fs/proc/task_nommu.c
45893 --- linux-3.0.7/fs/proc/task_nommu.c 2011-07-21 22:17:23.000000000 -0400
45894 +++ linux-3.0.7/fs/proc/task_nommu.c 2011-08-23 21:47:56.000000000 -0400
45895 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
45896 else
45897 bytes += kobjsize(mm);
45898
45899 - if (current->fs && current->fs->users > 1)
45900 + if (current->fs && atomic_read(&current->fs->users) > 1)
45901 sbytes += kobjsize(current->fs);
45902 else
45903 bytes += kobjsize(current->fs);
45904 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
45905
45906 if (file) {
45907 pad_len_spaces(m, len);
45908 - seq_path(m, &file->f_path, "");
45909 + seq_path(m, &file->f_path, "\n\\");
45910 } else if (mm) {
45911 if (vma->vm_start <= mm->start_stack &&
45912 vma->vm_end >= mm->start_stack) {
45913 diff -urNp linux-3.0.7/fs/quota/netlink.c linux-3.0.7/fs/quota/netlink.c
45914 --- linux-3.0.7/fs/quota/netlink.c 2011-07-21 22:17:23.000000000 -0400
45915 +++ linux-3.0.7/fs/quota/netlink.c 2011-08-23 21:47:56.000000000 -0400
45916 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
45917 void quota_send_warning(short type, unsigned int id, dev_t dev,
45918 const char warntype)
45919 {
45920 - static atomic_t seq;
45921 + static atomic_unchecked_t seq;
45922 struct sk_buff *skb;
45923 void *msg_head;
45924 int ret;
45925 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
45926 "VFS: Not enough memory to send quota warning.\n");
45927 return;
45928 }
45929 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
45930 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
45931 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
45932 if (!msg_head) {
45933 printk(KERN_ERR
45934 diff -urNp linux-3.0.7/fs/readdir.c linux-3.0.7/fs/readdir.c
45935 --- linux-3.0.7/fs/readdir.c 2011-07-21 22:17:23.000000000 -0400
45936 +++ linux-3.0.7/fs/readdir.c 2011-10-06 04:17:55.000000000 -0400
45937 @@ -17,6 +17,7 @@
45938 #include <linux/security.h>
45939 #include <linux/syscalls.h>
45940 #include <linux/unistd.h>
45941 +#include <linux/namei.h>
45942
45943 #include <asm/uaccess.h>
45944
45945 @@ -67,6 +68,7 @@ struct old_linux_dirent {
45946
45947 struct readdir_callback {
45948 struct old_linux_dirent __user * dirent;
45949 + struct file * file;
45950 int result;
45951 };
45952
45953 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
45954 buf->result = -EOVERFLOW;
45955 return -EOVERFLOW;
45956 }
45957 +
45958 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45959 + return 0;
45960 +
45961 buf->result++;
45962 dirent = buf->dirent;
45963 if (!access_ok(VERIFY_WRITE, dirent,
45964 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
45965
45966 buf.result = 0;
45967 buf.dirent = dirent;
45968 + buf.file = file;
45969
45970 error = vfs_readdir(file, fillonedir, &buf);
45971 if (buf.result)
45972 @@ -142,6 +149,7 @@ struct linux_dirent {
45973 struct getdents_callback {
45974 struct linux_dirent __user * current_dir;
45975 struct linux_dirent __user * previous;
45976 + struct file * file;
45977 int count;
45978 int error;
45979 };
45980 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
45981 buf->error = -EOVERFLOW;
45982 return -EOVERFLOW;
45983 }
45984 +
45985 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45986 + return 0;
45987 +
45988 dirent = buf->previous;
45989 if (dirent) {
45990 if (__put_user(offset, &dirent->d_off))
45991 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
45992 buf.previous = NULL;
45993 buf.count = count;
45994 buf.error = 0;
45995 + buf.file = file;
45996
45997 error = vfs_readdir(file, filldir, &buf);
45998 if (error >= 0)
45999 @@ -229,6 +242,7 @@ out:
46000 struct getdents_callback64 {
46001 struct linux_dirent64 __user * current_dir;
46002 struct linux_dirent64 __user * previous;
46003 + struct file *file;
46004 int count;
46005 int error;
46006 };
46007 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
46008 buf->error = -EINVAL; /* only used if we fail.. */
46009 if (reclen > buf->count)
46010 return -EINVAL;
46011 +
46012 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46013 + return 0;
46014 +
46015 dirent = buf->previous;
46016 if (dirent) {
46017 if (__put_user(offset, &dirent->d_off))
46018 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
46019
46020 buf.current_dir = dirent;
46021 buf.previous = NULL;
46022 + buf.file = file;
46023 buf.count = count;
46024 buf.error = 0;
46025
46026 @@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
46027 error = buf.error;
46028 lastdirent = buf.previous;
46029 if (lastdirent) {
46030 - typeof(lastdirent->d_off) d_off = file->f_pos;
46031 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
46032 if (__put_user(d_off, &lastdirent->d_off))
46033 error = -EFAULT;
46034 else
46035 diff -urNp linux-3.0.7/fs/reiserfs/dir.c linux-3.0.7/fs/reiserfs/dir.c
46036 --- linux-3.0.7/fs/reiserfs/dir.c 2011-07-21 22:17:23.000000000 -0400
46037 +++ linux-3.0.7/fs/reiserfs/dir.c 2011-08-23 21:48:14.000000000 -0400
46038 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
46039 struct reiserfs_dir_entry de;
46040 int ret = 0;
46041
46042 + pax_track_stack();
46043 +
46044 reiserfs_write_lock(inode->i_sb);
46045
46046 reiserfs_check_lock_depth(inode->i_sb, "readdir");
46047 diff -urNp linux-3.0.7/fs/reiserfs/do_balan.c linux-3.0.7/fs/reiserfs/do_balan.c
46048 --- linux-3.0.7/fs/reiserfs/do_balan.c 2011-07-21 22:17:23.000000000 -0400
46049 +++ linux-3.0.7/fs/reiserfs/do_balan.c 2011-08-23 21:47:56.000000000 -0400
46050 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
46051 return;
46052 }
46053
46054 - atomic_inc(&(fs_generation(tb->tb_sb)));
46055 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
46056 do_balance_starts(tb);
46057
46058 /* balance leaf returns 0 except if combining L R and S into
46059 diff -urNp linux-3.0.7/fs/reiserfs/journal.c linux-3.0.7/fs/reiserfs/journal.c
46060 --- linux-3.0.7/fs/reiserfs/journal.c 2011-07-21 22:17:23.000000000 -0400
46061 +++ linux-3.0.7/fs/reiserfs/journal.c 2011-08-23 21:48:14.000000000 -0400
46062 @@ -2299,6 +2299,8 @@ static struct buffer_head *reiserfs_brea
46063 struct buffer_head *bh;
46064 int i, j;
46065
46066 + pax_track_stack();
46067 +
46068 bh = __getblk(dev, block, bufsize);
46069 if (buffer_uptodate(bh))
46070 return (bh);
46071 diff -urNp linux-3.0.7/fs/reiserfs/namei.c linux-3.0.7/fs/reiserfs/namei.c
46072 --- linux-3.0.7/fs/reiserfs/namei.c 2011-07-21 22:17:23.000000000 -0400
46073 +++ linux-3.0.7/fs/reiserfs/namei.c 2011-08-23 21:48:14.000000000 -0400
46074 @@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
46075 unsigned long savelink = 1;
46076 struct timespec ctime;
46077
46078 + pax_track_stack();
46079 +
46080 /* three balancings: (1) old name removal, (2) new name insertion
46081 and (3) maybe "save" link insertion
46082 stat data updates: (1) old directory,
46083 diff -urNp linux-3.0.7/fs/reiserfs/procfs.c linux-3.0.7/fs/reiserfs/procfs.c
46084 --- linux-3.0.7/fs/reiserfs/procfs.c 2011-07-21 22:17:23.000000000 -0400
46085 +++ linux-3.0.7/fs/reiserfs/procfs.c 2011-08-23 21:48:14.000000000 -0400
46086 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
46087 "SMALL_TAILS " : "NO_TAILS ",
46088 replay_only(sb) ? "REPLAY_ONLY " : "",
46089 convert_reiserfs(sb) ? "CONV " : "",
46090 - atomic_read(&r->s_generation_counter),
46091 + atomic_read_unchecked(&r->s_generation_counter),
46092 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
46093 SF(s_do_balance), SF(s_unneeded_left_neighbor),
46094 SF(s_good_search_by_key_reada), SF(s_bmaps),
46095 @@ -299,6 +299,8 @@ static int show_journal(struct seq_file
46096 struct journal_params *jp = &rs->s_v1.s_journal;
46097 char b[BDEVNAME_SIZE];
46098
46099 + pax_track_stack();
46100 +
46101 seq_printf(m, /* on-disk fields */
46102 "jp_journal_1st_block: \t%i\n"
46103 "jp_journal_dev: \t%s[%x]\n"
46104 diff -urNp linux-3.0.7/fs/reiserfs/stree.c linux-3.0.7/fs/reiserfs/stree.c
46105 --- linux-3.0.7/fs/reiserfs/stree.c 2011-07-21 22:17:23.000000000 -0400
46106 +++ linux-3.0.7/fs/reiserfs/stree.c 2011-08-23 21:48:14.000000000 -0400
46107 @@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
46108 int iter = 0;
46109 #endif
46110
46111 + pax_track_stack();
46112 +
46113 BUG_ON(!th->t_trans_id);
46114
46115 init_tb_struct(th, &s_del_balance, sb, path,
46116 @@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
46117 int retval;
46118 int quota_cut_bytes = 0;
46119
46120 + pax_track_stack();
46121 +
46122 BUG_ON(!th->t_trans_id);
46123
46124 le_key2cpu_key(&cpu_key, key);
46125 @@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
46126 int quota_cut_bytes;
46127 loff_t tail_pos = 0;
46128
46129 + pax_track_stack();
46130 +
46131 BUG_ON(!th->t_trans_id);
46132
46133 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
46134 @@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
46135 int retval;
46136 int fs_gen;
46137
46138 + pax_track_stack();
46139 +
46140 BUG_ON(!th->t_trans_id);
46141
46142 fs_gen = get_generation(inode->i_sb);
46143 @@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
46144 int fs_gen = 0;
46145 int quota_bytes = 0;
46146
46147 + pax_track_stack();
46148 +
46149 BUG_ON(!th->t_trans_id);
46150
46151 if (inode) { /* Do we count quotas for item? */
46152 diff -urNp linux-3.0.7/fs/reiserfs/super.c linux-3.0.7/fs/reiserfs/super.c
46153 --- linux-3.0.7/fs/reiserfs/super.c 2011-07-21 22:17:23.000000000 -0400
46154 +++ linux-3.0.7/fs/reiserfs/super.c 2011-08-23 21:48:14.000000000 -0400
46155 @@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
46156 {.option_name = NULL}
46157 };
46158
46159 + pax_track_stack();
46160 +
46161 *blocks = 0;
46162 if (!options || !*options)
46163 /* use default configuration: create tails, journaling on, no
46164 diff -urNp linux-3.0.7/fs/select.c linux-3.0.7/fs/select.c
46165 --- linux-3.0.7/fs/select.c 2011-07-21 22:17:23.000000000 -0400
46166 +++ linux-3.0.7/fs/select.c 2011-08-23 21:48:14.000000000 -0400
46167 @@ -20,6 +20,7 @@
46168 #include <linux/module.h>
46169 #include <linux/slab.h>
46170 #include <linux/poll.h>
46171 +#include <linux/security.h>
46172 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
46173 #include <linux/file.h>
46174 #include <linux/fdtable.h>
46175 @@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
46176 int retval, i, timed_out = 0;
46177 unsigned long slack = 0;
46178
46179 + pax_track_stack();
46180 +
46181 rcu_read_lock();
46182 retval = max_select_fd(n, fds);
46183 rcu_read_unlock();
46184 @@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
46185 /* Allocate small arguments on the stack to save memory and be faster */
46186 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
46187
46188 + pax_track_stack();
46189 +
46190 ret = -EINVAL;
46191 if (n < 0)
46192 goto out_nofds;
46193 @@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
46194 struct poll_list *walk = head;
46195 unsigned long todo = nfds;
46196
46197 + pax_track_stack();
46198 +
46199 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
46200 if (nfds > rlimit(RLIMIT_NOFILE))
46201 return -EINVAL;
46202
46203 diff -urNp linux-3.0.7/fs/seq_file.c linux-3.0.7/fs/seq_file.c
46204 --- linux-3.0.7/fs/seq_file.c 2011-07-21 22:17:23.000000000 -0400
46205 +++ linux-3.0.7/fs/seq_file.c 2011-08-23 21:47:56.000000000 -0400
46206 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
46207 return 0;
46208 }
46209 if (!m->buf) {
46210 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46211 + m->size = PAGE_SIZE;
46212 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46213 if (!m->buf)
46214 return -ENOMEM;
46215 }
46216 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
46217 Eoverflow:
46218 m->op->stop(m, p);
46219 kfree(m->buf);
46220 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46221 + m->size <<= 1;
46222 + m->buf = kmalloc(m->size, GFP_KERNEL);
46223 return !m->buf ? -ENOMEM : -EAGAIN;
46224 }
46225
46226 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
46227 m->version = file->f_version;
46228 /* grab buffer if we didn't have one */
46229 if (!m->buf) {
46230 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46231 + m->size = PAGE_SIZE;
46232 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46233 if (!m->buf)
46234 goto Enomem;
46235 }
46236 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
46237 goto Fill;
46238 m->op->stop(m, p);
46239 kfree(m->buf);
46240 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46241 + m->size <<= 1;
46242 + m->buf = kmalloc(m->size, GFP_KERNEL);
46243 if (!m->buf)
46244 goto Enomem;
46245 m->count = 0;
46246 @@ -549,7 +553,7 @@ static void single_stop(struct seq_file
46247 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
46248 void *data)
46249 {
46250 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
46251 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
46252 int res = -ENOMEM;
46253
46254 if (op) {
46255 diff -urNp linux-3.0.7/fs/splice.c linux-3.0.7/fs/splice.c
46256 --- linux-3.0.7/fs/splice.c 2011-07-21 22:17:23.000000000 -0400
46257 +++ linux-3.0.7/fs/splice.c 2011-10-06 04:17:55.000000000 -0400
46258 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode
46259 pipe_lock(pipe);
46260
46261 for (;;) {
46262 - if (!pipe->readers) {
46263 + if (!atomic_read(&pipe->readers)) {
46264 send_sig(SIGPIPE, current, 0);
46265 if (!ret)
46266 ret = -EPIPE;
46267 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode
46268 do_wakeup = 0;
46269 }
46270
46271 - pipe->waiting_writers++;
46272 + atomic_inc(&pipe->waiting_writers);
46273 pipe_wait(pipe);
46274 - pipe->waiting_writers--;
46275 + atomic_dec(&pipe->waiting_writers);
46276 }
46277
46278 pipe_unlock(pipe);
46279 @@ -320,6 +320,8 @@ __generic_file_splice_read(struct file *
46280 .spd_release = spd_release_page,
46281 };
46282
46283 + pax_track_stack();
46284 +
46285 if (splice_grow_spd(pipe, &spd))
46286 return -ENOMEM;
46287
46288 @@ -560,7 +562,7 @@ static ssize_t kernel_readv(struct file
46289 old_fs = get_fs();
46290 set_fs(get_ds());
46291 /* The cast to a user pointer is valid due to the set_fs() */
46292 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46293 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
46294 set_fs(old_fs);
46295
46296 return res;
46297 @@ -575,7 +577,7 @@ static ssize_t kernel_write(struct file
46298 old_fs = get_fs();
46299 set_fs(get_ds());
46300 /* The cast to a user pointer is valid due to the set_fs() */
46301 - res = vfs_write(file, (const char __user *)buf, count, &pos);
46302 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
46303 set_fs(old_fs);
46304
46305 return res;
46306 @@ -603,6 +605,8 @@ ssize_t default_file_splice_read(struct
46307 .spd_release = spd_release_page,
46308 };
46309
46310 + pax_track_stack();
46311 +
46312 if (splice_grow_spd(pipe, &spd))
46313 return -ENOMEM;
46314
46315 @@ -626,7 +630,7 @@ ssize_t default_file_splice_read(struct
46316 goto err;
46317
46318 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46319 - vec[i].iov_base = (void __user *) page_address(page);
46320 + vec[i].iov_base = (void __force_user *) page_address(page);
46321 vec[i].iov_len = this_len;
46322 spd.pages[i] = page;
46323 spd.nr_pages++;
46324 @@ -846,10 +850,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46325 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46326 {
46327 while (!pipe->nrbufs) {
46328 - if (!pipe->writers)
46329 + if (!atomic_read(&pipe->writers))
46330 return 0;
46331
46332 - if (!pipe->waiting_writers && sd->num_spliced)
46333 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46334 return 0;
46335
46336 if (sd->flags & SPLICE_F_NONBLOCK)
46337 @@ -1182,7 +1186,7 @@ ssize_t splice_direct_to_actor(struct fi
46338 * out of the pipe right after the splice_to_pipe(). So set
46339 * PIPE_READERS appropriately.
46340 */
46341 - pipe->readers = 1;
46342 + atomic_set(&pipe->readers, 1);
46343
46344 current->splice_pipe = pipe;
46345 }
46346 @@ -1619,6 +1623,8 @@ static long vmsplice_to_pipe(struct file
46347 };
46348 long ret;
46349
46350 + pax_track_stack();
46351 +
46352 pipe = get_pipe_info(file);
46353 if (!pipe)
46354 return -EBADF;
46355 @@ -1734,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_
46356 ret = -ERESTARTSYS;
46357 break;
46358 }
46359 - if (!pipe->writers)
46360 + if (!atomic_read(&pipe->writers))
46361 break;
46362 - if (!pipe->waiting_writers) {
46363 + if (!atomic_read(&pipe->waiting_writers)) {
46364 if (flags & SPLICE_F_NONBLOCK) {
46365 ret = -EAGAIN;
46366 break;
46367 @@ -1768,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_
46368 pipe_lock(pipe);
46369
46370 while (pipe->nrbufs >= pipe->buffers) {
46371 - if (!pipe->readers) {
46372 + if (!atomic_read(&pipe->readers)) {
46373 send_sig(SIGPIPE, current, 0);
46374 ret = -EPIPE;
46375 break;
46376 @@ -1781,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_
46377 ret = -ERESTARTSYS;
46378 break;
46379 }
46380 - pipe->waiting_writers++;
46381 + atomic_inc(&pipe->waiting_writers);
46382 pipe_wait(pipe);
46383 - pipe->waiting_writers--;
46384 + atomic_dec(&pipe->waiting_writers);
46385 }
46386
46387 pipe_unlock(pipe);
46388 @@ -1819,14 +1825,14 @@ retry:
46389 pipe_double_lock(ipipe, opipe);
46390
46391 do {
46392 - if (!opipe->readers) {
46393 + if (!atomic_read(&opipe->readers)) {
46394 send_sig(SIGPIPE, current, 0);
46395 if (!ret)
46396 ret = -EPIPE;
46397 break;
46398 }
46399
46400 - if (!ipipe->nrbufs && !ipipe->writers)
46401 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
46402 break;
46403
46404 /*
46405 @@ -1923,7 +1929,7 @@ static int link_pipe(struct pipe_inode_i
46406 pipe_double_lock(ipipe, opipe);
46407
46408 do {
46409 - if (!opipe->readers) {
46410 + if (!atomic_read(&opipe->readers)) {
46411 send_sig(SIGPIPE, current, 0);
46412 if (!ret)
46413 ret = -EPIPE;
46414 @@ -1968,7 +1974,7 @@ static int link_pipe(struct pipe_inode_i
46415 * return EAGAIN if we have the potential of some data in the
46416 * future, otherwise just return 0
46417 */
46418 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
46419 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
46420 ret = -EAGAIN;
46421
46422 pipe_unlock(ipipe);
46423 diff -urNp linux-3.0.7/fs/sysfs/file.c linux-3.0.7/fs/sysfs/file.c
46424 --- linux-3.0.7/fs/sysfs/file.c 2011-07-21 22:17:23.000000000 -0400
46425 +++ linux-3.0.7/fs/sysfs/file.c 2011-08-23 21:47:56.000000000 -0400
46426 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
46427
46428 struct sysfs_open_dirent {
46429 atomic_t refcnt;
46430 - atomic_t event;
46431 + atomic_unchecked_t event;
46432 wait_queue_head_t poll;
46433 struct list_head buffers; /* goes through sysfs_buffer.list */
46434 };
46435 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentr
46436 if (!sysfs_get_active(attr_sd))
46437 return -ENODEV;
46438
46439 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
46440 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
46441 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
46442
46443 sysfs_put_active(attr_sd);
46444 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct
46445 return -ENOMEM;
46446
46447 atomic_set(&new_od->refcnt, 0);
46448 - atomic_set(&new_od->event, 1);
46449 + atomic_set_unchecked(&new_od->event, 1);
46450 init_waitqueue_head(&new_od->poll);
46451 INIT_LIST_HEAD(&new_od->buffers);
46452 goto retry;
46453 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct fi
46454
46455 sysfs_put_active(attr_sd);
46456
46457 - if (buffer->event != atomic_read(&od->event))
46458 + if (buffer->event != atomic_read_unchecked(&od->event))
46459 goto trigger;
46460
46461 return DEFAULT_POLLMASK;
46462 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_di
46463
46464 od = sd->s_attr.open;
46465 if (od) {
46466 - atomic_inc(&od->event);
46467 + atomic_inc_unchecked(&od->event);
46468 wake_up_interruptible(&od->poll);
46469 }
46470
46471 diff -urNp linux-3.0.7/fs/sysfs/mount.c linux-3.0.7/fs/sysfs/mount.c
46472 --- linux-3.0.7/fs/sysfs/mount.c 2011-07-21 22:17:23.000000000 -0400
46473 +++ linux-3.0.7/fs/sysfs/mount.c 2011-08-23 21:48:14.000000000 -0400
46474 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
46475 .s_name = "",
46476 .s_count = ATOMIC_INIT(1),
46477 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
46478 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
46479 + .s_mode = S_IFDIR | S_IRWXU,
46480 +#else
46481 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
46482 +#endif
46483 .s_ino = 1,
46484 };
46485
46486 diff -urNp linux-3.0.7/fs/sysfs/symlink.c linux-3.0.7/fs/sysfs/symlink.c
46487 --- linux-3.0.7/fs/sysfs/symlink.c 2011-07-21 22:17:23.000000000 -0400
46488 +++ linux-3.0.7/fs/sysfs/symlink.c 2011-08-23 21:47:56.000000000 -0400
46489 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
46490
46491 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46492 {
46493 - char *page = nd_get_link(nd);
46494 + const char *page = nd_get_link(nd);
46495 if (!IS_ERR(page))
46496 free_page((unsigned long)page);
46497 }
46498 diff -urNp linux-3.0.7/fs/udf/inode.c linux-3.0.7/fs/udf/inode.c
46499 --- linux-3.0.7/fs/udf/inode.c 2011-07-21 22:17:23.000000000 -0400
46500 +++ linux-3.0.7/fs/udf/inode.c 2011-08-23 21:48:14.000000000 -0400
46501 @@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
46502 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
46503 int lastblock = 0;
46504
46505 + pax_track_stack();
46506 +
46507 prev_epos.offset = udf_file_entry_alloc_offset(inode);
46508 prev_epos.block = iinfo->i_location;
46509 prev_epos.bh = NULL;
46510 diff -urNp linux-3.0.7/fs/udf/misc.c linux-3.0.7/fs/udf/misc.c
46511 --- linux-3.0.7/fs/udf/misc.c 2011-07-21 22:17:23.000000000 -0400
46512 +++ linux-3.0.7/fs/udf/misc.c 2011-08-23 21:47:56.000000000 -0400
46513 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
46514
46515 u8 udf_tag_checksum(const struct tag *t)
46516 {
46517 - u8 *data = (u8 *)t;
46518 + const u8 *data = (const u8 *)t;
46519 u8 checksum = 0;
46520 int i;
46521 for (i = 0; i < sizeof(struct tag); ++i)
46522 diff -urNp linux-3.0.7/fs/utimes.c linux-3.0.7/fs/utimes.c
46523 --- linux-3.0.7/fs/utimes.c 2011-07-21 22:17:23.000000000 -0400
46524 +++ linux-3.0.7/fs/utimes.c 2011-08-23 21:48:14.000000000 -0400
46525 @@ -1,6 +1,7 @@
46526 #include <linux/compiler.h>
46527 #include <linux/file.h>
46528 #include <linux/fs.h>
46529 +#include <linux/security.h>
46530 #include <linux/linkage.h>
46531 #include <linux/mount.h>
46532 #include <linux/namei.h>
46533 @@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
46534 goto mnt_drop_write_and_out;
46535 }
46536 }
46537 +
46538 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
46539 + error = -EACCES;
46540 + goto mnt_drop_write_and_out;
46541 + }
46542 +
46543 mutex_lock(&inode->i_mutex);
46544 error = notify_change(path->dentry, &newattrs);
46545 mutex_unlock(&inode->i_mutex);
46546 diff -urNp linux-3.0.7/fs/xattr_acl.c linux-3.0.7/fs/xattr_acl.c
46547 --- linux-3.0.7/fs/xattr_acl.c 2011-07-21 22:17:23.000000000 -0400
46548 +++ linux-3.0.7/fs/xattr_acl.c 2011-08-23 21:47:56.000000000 -0400
46549 @@ -17,8 +17,8 @@
46550 struct posix_acl *
46551 posix_acl_from_xattr(const void *value, size_t size)
46552 {
46553 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
46554 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
46555 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
46556 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
46557 int count;
46558 struct posix_acl *acl;
46559 struct posix_acl_entry *acl_e;
46560 diff -urNp linux-3.0.7/fs/xattr.c linux-3.0.7/fs/xattr.c
46561 --- linux-3.0.7/fs/xattr.c 2011-07-21 22:17:23.000000000 -0400
46562 +++ linux-3.0.7/fs/xattr.c 2011-08-23 21:48:14.000000000 -0400
46563 @@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
46564 * Extended attribute SET operations
46565 */
46566 static long
46567 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
46568 +setxattr(struct path *path, const char __user *name, const void __user *value,
46569 size_t size, int flags)
46570 {
46571 int error;
46572 @@ -278,7 +278,13 @@ setxattr(struct dentry *d, const char __
46573 return PTR_ERR(kvalue);
46574 }
46575
46576 - error = vfs_setxattr(d, kname, kvalue, size, flags);
46577 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
46578 + error = -EACCES;
46579 + goto out;
46580 + }
46581 +
46582 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
46583 +out:
46584 kfree(kvalue);
46585 return error;
46586 }
46587 @@ -295,7 +301,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
46588 return error;
46589 error = mnt_want_write(path.mnt);
46590 if (!error) {
46591 - error = setxattr(path.dentry, name, value, size, flags);
46592 + error = setxattr(&path, name, value, size, flags);
46593 mnt_drop_write(path.mnt);
46594 }
46595 path_put(&path);
46596 @@ -314,7 +320,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
46597 return error;
46598 error = mnt_want_write(path.mnt);
46599 if (!error) {
46600 - error = setxattr(path.dentry, name, value, size, flags);
46601 + error = setxattr(&path, name, value, size, flags);
46602 mnt_drop_write(path.mnt);
46603 }
46604 path_put(&path);
46605 @@ -325,17 +331,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
46606 const void __user *,value, size_t, size, int, flags)
46607 {
46608 struct file *f;
46609 - struct dentry *dentry;
46610 int error = -EBADF;
46611
46612 f = fget(fd);
46613 if (!f)
46614 return error;
46615 - dentry = f->f_path.dentry;
46616 - audit_inode(NULL, dentry);
46617 + audit_inode(NULL, f->f_path.dentry);
46618 error = mnt_want_write_file(f);
46619 if (!error) {
46620 - error = setxattr(dentry, name, value, size, flags);
46621 + error = setxattr(&f->f_path, name, value, size, flags);
46622 mnt_drop_write(f->f_path.mnt);
46623 }
46624 fput(f);
46625 diff -urNp linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl32.c linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl32.c
46626 --- linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-07-21 22:17:23.000000000 -0400
46627 +++ linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-08-23 21:48:14.000000000 -0400
46628 @@ -73,6 +73,7 @@ xfs_compat_ioc_fsgeometry_v1(
46629 xfs_fsop_geom_t fsgeo;
46630 int error;
46631
46632 + memset(&fsgeo, 0, sizeof(fsgeo));
46633 error = xfs_fs_geometry(mp, &fsgeo, 3);
46634 if (error)
46635 return -error;
46636 diff -urNp linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl.c linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl.c
46637 --- linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl.c 2011-07-21 22:17:23.000000000 -0400
46638 +++ linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl.c 2011-08-23 21:47:56.000000000 -0400
46639 @@ -128,7 +128,7 @@ xfs_find_handle(
46640 }
46641
46642 error = -EFAULT;
46643 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
46644 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
46645 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
46646 goto out_put;
46647
46648 diff -urNp linux-3.0.7/fs/xfs/linux-2.6/xfs_iops.c linux-3.0.7/fs/xfs/linux-2.6/xfs_iops.c
46649 --- linux-3.0.7/fs/xfs/linux-2.6/xfs_iops.c 2011-07-21 22:17:23.000000000 -0400
46650 +++ linux-3.0.7/fs/xfs/linux-2.6/xfs_iops.c 2011-08-23 21:47:56.000000000 -0400
46651 @@ -437,7 +437,7 @@ xfs_vn_put_link(
46652 struct nameidata *nd,
46653 void *p)
46654 {
46655 - char *s = nd_get_link(nd);
46656 + const char *s = nd_get_link(nd);
46657
46658 if (!IS_ERR(s))
46659 kfree(s);
46660 diff -urNp linux-3.0.7/fs/xfs/xfs_bmap.c linux-3.0.7/fs/xfs/xfs_bmap.c
46661 --- linux-3.0.7/fs/xfs/xfs_bmap.c 2011-07-21 22:17:23.000000000 -0400
46662 +++ linux-3.0.7/fs/xfs/xfs_bmap.c 2011-08-23 21:47:56.000000000 -0400
46663 @@ -253,7 +253,7 @@ xfs_bmap_validate_ret(
46664 int nmap,
46665 int ret_nmap);
46666 #else
46667 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
46668 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
46669 #endif /* DEBUG */
46670
46671 STATIC int
46672 diff -urNp linux-3.0.7/fs/xfs/xfs_dir2_sf.c linux-3.0.7/fs/xfs/xfs_dir2_sf.c
46673 --- linux-3.0.7/fs/xfs/xfs_dir2_sf.c 2011-07-21 22:17:23.000000000 -0400
46674 +++ linux-3.0.7/fs/xfs/xfs_dir2_sf.c 2011-08-23 21:47:56.000000000 -0400
46675 @@ -780,7 +780,15 @@ xfs_dir2_sf_getdents(
46676 }
46677
46678 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
46679 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
46680 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
46681 + char name[sfep->namelen];
46682 + memcpy(name, sfep->name, sfep->namelen);
46683 + if (filldir(dirent, name, sfep->namelen,
46684 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
46685 + *offset = off & 0x7fffffff;
46686 + return 0;
46687 + }
46688 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
46689 off & 0x7fffffff, ino, DT_UNKNOWN)) {
46690 *offset = off & 0x7fffffff;
46691 return 0;
46692 diff -urNp linux-3.0.7/grsecurity/gracl_alloc.c linux-3.0.7/grsecurity/gracl_alloc.c
46693 --- linux-3.0.7/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
46694 +++ linux-3.0.7/grsecurity/gracl_alloc.c 2011-08-23 21:48:14.000000000 -0400
46695 @@ -0,0 +1,105 @@
46696 +#include <linux/kernel.h>
46697 +#include <linux/mm.h>
46698 +#include <linux/slab.h>
46699 +#include <linux/vmalloc.h>
46700 +#include <linux/gracl.h>
46701 +#include <linux/grsecurity.h>
46702 +
46703 +static unsigned long alloc_stack_next = 1;
46704 +static unsigned long alloc_stack_size = 1;
46705 +static void **alloc_stack;
46706 +
46707 +static __inline__ int
46708 +alloc_pop(void)
46709 +{
46710 + if (alloc_stack_next == 1)
46711 + return 0;
46712 +
46713 + kfree(alloc_stack[alloc_stack_next - 2]);
46714 +
46715 + alloc_stack_next--;
46716 +
46717 + return 1;
46718 +}
46719 +
46720 +static __inline__ int
46721 +alloc_push(void *buf)
46722 +{
46723 + if (alloc_stack_next >= alloc_stack_size)
46724 + return 1;
46725 +
46726 + alloc_stack[alloc_stack_next - 1] = buf;
46727 +
46728 + alloc_stack_next++;
46729 +
46730 + return 0;
46731 +}
46732 +
46733 +void *
46734 +acl_alloc(unsigned long len)
46735 +{
46736 + void *ret = NULL;
46737 +
46738 + if (!len || len > PAGE_SIZE)
46739 + goto out;
46740 +
46741 + ret = kmalloc(len, GFP_KERNEL);
46742 +
46743 + if (ret) {
46744 + if (alloc_push(ret)) {
46745 + kfree(ret);
46746 + ret = NULL;
46747 + }
46748 + }
46749 +
46750 +out:
46751 + return ret;
46752 +}
46753 +
46754 +void *
46755 +acl_alloc_num(unsigned long num, unsigned long len)
46756 +{
46757 + if (!len || (num > (PAGE_SIZE / len)))
46758 + return NULL;
46759 +
46760 + return acl_alloc(num * len);
46761 +}
46762 +
46763 +void
46764 +acl_free_all(void)
46765 +{
46766 + if (gr_acl_is_enabled() || !alloc_stack)
46767 + return;
46768 +
46769 + while (alloc_pop()) ;
46770 +
46771 + if (alloc_stack) {
46772 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
46773 + kfree(alloc_stack);
46774 + else
46775 + vfree(alloc_stack);
46776 + }
46777 +
46778 + alloc_stack = NULL;
46779 + alloc_stack_size = 1;
46780 + alloc_stack_next = 1;
46781 +
46782 + return;
46783 +}
46784 +
46785 +int
46786 +acl_alloc_stack_init(unsigned long size)
46787 +{
46788 + if ((size * sizeof (void *)) <= PAGE_SIZE)
46789 + alloc_stack =
46790 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
46791 + else
46792 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
46793 +
46794 + alloc_stack_size = size;
46795 +
46796 + if (!alloc_stack)
46797 + return 0;
46798 + else
46799 + return 1;
46800 +}
46801 diff -urNp linux-3.0.7/grsecurity/gracl.c linux-3.0.7/grsecurity/gracl.c
46802 --- linux-3.0.7/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
46803 +++ linux-3.0.7/grsecurity/gracl.c 2011-10-17 06:42:59.000000000 -0400
46804 @@ -0,0 +1,4154 @@
46805 +#include <linux/kernel.h>
46806 +#include <linux/module.h>
46807 +#include <linux/sched.h>
46808 +#include <linux/mm.h>
46809 +#include <linux/file.h>
46810 +#include <linux/fs.h>
46811 +#include <linux/namei.h>
46812 +#include <linux/mount.h>
46813 +#include <linux/tty.h>
46814 +#include <linux/proc_fs.h>
46815 +#include <linux/lglock.h>
46816 +#include <linux/slab.h>
46817 +#include <linux/vmalloc.h>
46818 +#include <linux/types.h>
46819 +#include <linux/sysctl.h>
46820 +#include <linux/netdevice.h>
46821 +#include <linux/ptrace.h>
46822 +#include <linux/gracl.h>
46823 +#include <linux/gralloc.h>
46824 +#include <linux/grsecurity.h>
46825 +#include <linux/grinternal.h>
46826 +#include <linux/pid_namespace.h>
46827 +#include <linux/fdtable.h>
46828 +#include <linux/percpu.h>
46829 +
46830 +#include <asm/uaccess.h>
46831 +#include <asm/errno.h>
46832 +#include <asm/mman.h>
46833 +
46834 +static struct acl_role_db acl_role_set;
46835 +static struct name_db name_set;
46836 +static struct inodev_db inodev_set;
46837 +
46838 +/* for keeping track of userspace pointers used for subjects, so we
46839 + can share references in the kernel as well
46840 +*/
46841 +
46842 +static struct path real_root;
46843 +
46844 +static struct acl_subj_map_db subj_map_set;
46845 +
46846 +static struct acl_role_label *default_role;
46847 +
46848 +static struct acl_role_label *role_list;
46849 +
46850 +static u16 acl_sp_role_value;
46851 +
46852 +extern char *gr_shared_page[4];
46853 +static DEFINE_MUTEX(gr_dev_mutex);
46854 +DEFINE_RWLOCK(gr_inode_lock);
46855 +
46856 +struct gr_arg *gr_usermode;
46857 +
46858 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
46859 +
46860 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
46861 +extern void gr_clear_learn_entries(void);
46862 +
46863 +#ifdef CONFIG_GRKERNSEC_RESLOG
46864 +extern void gr_log_resource(const struct task_struct *task,
46865 + const int res, const unsigned long wanted, const int gt);
46866 +#endif
46867 +
46868 +unsigned char *gr_system_salt;
46869 +unsigned char *gr_system_sum;
46870 +
46871 +static struct sprole_pw **acl_special_roles = NULL;
46872 +static __u16 num_sprole_pws = 0;
46873 +
46874 +static struct acl_role_label *kernel_role = NULL;
46875 +
46876 +static unsigned int gr_auth_attempts = 0;
46877 +static unsigned long gr_auth_expires = 0UL;
46878 +
46879 +#ifdef CONFIG_NET
46880 +extern struct vfsmount *sock_mnt;
46881 +#endif
46882 +
46883 +extern struct vfsmount *pipe_mnt;
46884 +extern struct vfsmount *shm_mnt;
46885 +#ifdef CONFIG_HUGETLBFS
46886 +extern struct vfsmount *hugetlbfs_vfsmount;
46887 +#endif
46888 +
46889 +static struct acl_object_label *fakefs_obj_rw;
46890 +static struct acl_object_label *fakefs_obj_rwx;
46891 +
46892 +extern int gr_init_uidset(void);
46893 +extern void gr_free_uidset(void);
46894 +extern void gr_remove_uid(uid_t uid);
46895 +extern int gr_find_uid(uid_t uid);
46896 +
46897 +DECLARE_BRLOCK(vfsmount_lock);
46898 +
46899 +__inline__ int
46900 +gr_acl_is_enabled(void)
46901 +{
46902 + return (gr_status & GR_READY);
46903 +}
46904 +
46905 +#ifdef CONFIG_BTRFS_FS
46906 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
46907 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
46908 +#endif
46909 +
46910 +static inline dev_t __get_dev(const struct dentry *dentry)
46911 +{
46912 +#ifdef CONFIG_BTRFS_FS
46913 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
46914 + return get_btrfs_dev_from_inode(dentry->d_inode);
46915 + else
46916 +#endif
46917 + return dentry->d_inode->i_sb->s_dev;
46918 +}
46919 +
46920 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
46921 +{
46922 + return __get_dev(dentry);
46923 +}
46924 +
46925 +static char gr_task_roletype_to_char(struct task_struct *task)
46926 +{
46927 + switch (task->role->roletype &
46928 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
46929 + GR_ROLE_SPECIAL)) {
46930 + case GR_ROLE_DEFAULT:
46931 + return 'D';
46932 + case GR_ROLE_USER:
46933 + return 'U';
46934 + case GR_ROLE_GROUP:
46935 + return 'G';
46936 + case GR_ROLE_SPECIAL:
46937 + return 'S';
46938 + }
46939 +
46940 + return 'X';
46941 +}
46942 +
46943 +char gr_roletype_to_char(void)
46944 +{
46945 + return gr_task_roletype_to_char(current);
46946 +}
46947 +
46948 +__inline__ int
46949 +gr_acl_tpe_check(void)
46950 +{
46951 + if (unlikely(!(gr_status & GR_READY)))
46952 + return 0;
46953 + if (current->role->roletype & GR_ROLE_TPE)
46954 + return 1;
46955 + else
46956 + return 0;
46957 +}
46958 +
46959 +int
46960 +gr_handle_rawio(const struct inode *inode)
46961 +{
46962 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
46963 + if (inode && S_ISBLK(inode->i_mode) &&
46964 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
46965 + !capable(CAP_SYS_RAWIO))
46966 + return 1;
46967 +#endif
46968 + return 0;
46969 +}
46970 +
46971 +static int
46972 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
46973 +{
46974 + if (likely(lena != lenb))
46975 + return 0;
46976 +
46977 + return !memcmp(a, b, lena);
46978 +}
46979 +
46980 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
46981 +{
46982 + *buflen -= namelen;
46983 + if (*buflen < 0)
46984 + return -ENAMETOOLONG;
46985 + *buffer -= namelen;
46986 + memcpy(*buffer, str, namelen);
46987 + return 0;
46988 +}
46989 +
46990 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
46991 +{
46992 + return prepend(buffer, buflen, name->name, name->len);
46993 +}
46994 +
46995 +static int prepend_path(const struct path *path, struct path *root,
46996 + char **buffer, int *buflen)
46997 +{
46998 + struct dentry *dentry = path->dentry;
46999 + struct vfsmount *vfsmnt = path->mnt;
47000 + bool slash = false;
47001 + int error = 0;
47002 +
47003 + while (dentry != root->dentry || vfsmnt != root->mnt) {
47004 + struct dentry * parent;
47005 +
47006 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
47007 + /* Global root? */
47008 + if (vfsmnt->mnt_parent == vfsmnt) {
47009 + goto out;
47010 + }
47011 + dentry = vfsmnt->mnt_mountpoint;
47012 + vfsmnt = vfsmnt->mnt_parent;
47013 + continue;
47014 + }
47015 + parent = dentry->d_parent;
47016 + prefetch(parent);
47017 + spin_lock(&dentry->d_lock);
47018 + error = prepend_name(buffer, buflen, &dentry->d_name);
47019 + spin_unlock(&dentry->d_lock);
47020 + if (!error)
47021 + error = prepend(buffer, buflen, "/", 1);
47022 + if (error)
47023 + break;
47024 +
47025 + slash = true;
47026 + dentry = parent;
47027 + }
47028 +
47029 +out:
47030 + if (!error && !slash)
47031 + error = prepend(buffer, buflen, "/", 1);
47032 +
47033 + return error;
47034 +}
47035 +
47036 +/* this must be called with vfsmount_lock and rename_lock held */
47037 +
47038 +static char *__our_d_path(const struct path *path, struct path *root,
47039 + char *buf, int buflen)
47040 +{
47041 + char *res = buf + buflen;
47042 + int error;
47043 +
47044 + prepend(&res, &buflen, "\0", 1);
47045 + error = prepend_path(path, root, &res, &buflen);
47046 + if (error)
47047 + return ERR_PTR(error);
47048 +
47049 + return res;
47050 +}
47051 +
47052 +static char *
47053 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
47054 +{
47055 + char *retval;
47056 +
47057 + retval = __our_d_path(path, root, buf, buflen);
47058 + if (unlikely(IS_ERR(retval)))
47059 + retval = strcpy(buf, "<path too long>");
47060 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
47061 + retval[1] = '\0';
47062 +
47063 + return retval;
47064 +}
47065 +
47066 +static char *
47067 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
47068 + char *buf, int buflen)
47069 +{
47070 + struct path path;
47071 + char *res;
47072 +
47073 + path.dentry = (struct dentry *)dentry;
47074 + path.mnt = (struct vfsmount *)vfsmnt;
47075 +
47076 + /* we can use real_root.dentry, real_root.mnt, because this is only called
47077 + by the RBAC system */
47078 + res = gen_full_path(&path, &real_root, buf, buflen);
47079 +
47080 + return res;
47081 +}
47082 +
47083 +static char *
47084 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
47085 + char *buf, int buflen)
47086 +{
47087 + char *res;
47088 + struct path path;
47089 + struct path root;
47090 + struct task_struct *reaper = &init_task;
47091 +
47092 + path.dentry = (struct dentry *)dentry;
47093 + path.mnt = (struct vfsmount *)vfsmnt;
47094 +
47095 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
47096 + get_fs_root(reaper->fs, &root);
47097 +
47098 + write_seqlock(&rename_lock);
47099 + br_read_lock(vfsmount_lock);
47100 + res = gen_full_path(&path, &root, buf, buflen);
47101 + br_read_unlock(vfsmount_lock);
47102 + write_sequnlock(&rename_lock);
47103 +
47104 + path_put(&root);
47105 + return res;
47106 +}
47107 +
47108 +static char *
47109 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
47110 +{
47111 + char *ret;
47112 + write_seqlock(&rename_lock);
47113 + br_read_lock(vfsmount_lock);
47114 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
47115 + PAGE_SIZE);
47116 + br_read_unlock(vfsmount_lock);
47117 + write_sequnlock(&rename_lock);
47118 + return ret;
47119 +}
47120 +
47121 +static char *
47122 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
47123 +{
47124 + char *ret;
47125 + char *buf;
47126 + int buflen;
47127 +
47128 + write_seqlock(&rename_lock);
47129 + br_read_lock(vfsmount_lock);
47130 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
47131 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
47132 + buflen = (int)(ret - buf);
47133 + if (buflen >= 5)
47134 + prepend(&ret, &buflen, "/proc", 5);
47135 + else
47136 + ret = strcpy(buf, "<path too long>");
47137 + br_read_unlock(vfsmount_lock);
47138 + write_sequnlock(&rename_lock);
47139 + return ret;
47140 +}
47141 +
47142 +char *
47143 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
47144 +{
47145 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
47146 + PAGE_SIZE);
47147 +}
47148 +
47149 +char *
47150 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
47151 +{
47152 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
47153 + PAGE_SIZE);
47154 +}
47155 +
47156 +char *
47157 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
47158 +{
47159 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
47160 + PAGE_SIZE);
47161 +}
47162 +
47163 +char *
47164 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
47165 +{
47166 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
47167 + PAGE_SIZE);
47168 +}
47169 +
47170 +char *
47171 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
47172 +{
47173 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
47174 + PAGE_SIZE);
47175 +}
47176 +
47177 +__inline__ __u32
47178 +to_gr_audit(const __u32 reqmode)
47179 +{
47180 + /* masks off auditable permission flags, then shifts them to create
47181 + auditing flags, and adds the special case of append auditing if
47182 + we're requesting write */
47183 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
47184 +}
47185 +
47186 +struct acl_subject_label *
47187 +lookup_subject_map(const struct acl_subject_label *userp)
47188 +{
47189 + unsigned int index = shash(userp, subj_map_set.s_size);
47190 + struct subject_map *match;
47191 +
47192 + match = subj_map_set.s_hash[index];
47193 +
47194 + while (match && match->user != userp)
47195 + match = match->next;
47196 +
47197 + if (match != NULL)
47198 + return match->kernel;
47199 + else
47200 + return NULL;
47201 +}
47202 +
47203 +static void
47204 +insert_subj_map_entry(struct subject_map *subjmap)
47205 +{
47206 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
47207 + struct subject_map **curr;
47208 +
47209 + subjmap->prev = NULL;
47210 +
47211 + curr = &subj_map_set.s_hash[index];
47212 + if (*curr != NULL)
47213 + (*curr)->prev = subjmap;
47214 +
47215 + subjmap->next = *curr;
47216 + *curr = subjmap;
47217 +
47218 + return;
47219 +}
47220 +
47221 +static struct acl_role_label *
47222 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
47223 + const gid_t gid)
47224 +{
47225 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
47226 + struct acl_role_label *match;
47227 + struct role_allowed_ip *ipp;
47228 + unsigned int x;
47229 + u32 curr_ip = task->signal->curr_ip;
47230 +
47231 + task->signal->saved_ip = curr_ip;
47232 +
47233 + match = acl_role_set.r_hash[index];
47234 +
47235 + while (match) {
47236 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
47237 + for (x = 0; x < match->domain_child_num; x++) {
47238 + if (match->domain_children[x] == uid)
47239 + goto found;
47240 + }
47241 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
47242 + break;
47243 + match = match->next;
47244 + }
47245 +found:
47246 + if (match == NULL) {
47247 + try_group:
47248 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
47249 + match = acl_role_set.r_hash[index];
47250 +
47251 + while (match) {
47252 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
47253 + for (x = 0; x < match->domain_child_num; x++) {
47254 + if (match->domain_children[x] == gid)
47255 + goto found2;
47256 + }
47257 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
47258 + break;
47259 + match = match->next;
47260 + }
47261 +found2:
47262 + if (match == NULL)
47263 + match = default_role;
47264 + if (match->allowed_ips == NULL)
47265 + return match;
47266 + else {
47267 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47268 + if (likely
47269 + ((ntohl(curr_ip) & ipp->netmask) ==
47270 + (ntohl(ipp->addr) & ipp->netmask)))
47271 + return match;
47272 + }
47273 + match = default_role;
47274 + }
47275 + } else if (match->allowed_ips == NULL) {
47276 + return match;
47277 + } else {
47278 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47279 + if (likely
47280 + ((ntohl(curr_ip) & ipp->netmask) ==
47281 + (ntohl(ipp->addr) & ipp->netmask)))
47282 + return match;
47283 + }
47284 + goto try_group;
47285 + }
47286 +
47287 + return match;
47288 +}
47289 +
47290 +struct acl_subject_label *
47291 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
47292 + const struct acl_role_label *role)
47293 +{
47294 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
47295 + struct acl_subject_label *match;
47296 +
47297 + match = role->subj_hash[index];
47298 +
47299 + while (match && (match->inode != ino || match->device != dev ||
47300 + (match->mode & GR_DELETED))) {
47301 + match = match->next;
47302 + }
47303 +
47304 + if (match && !(match->mode & GR_DELETED))
47305 + return match;
47306 + else
47307 + return NULL;
47308 +}
47309 +
47310 +struct acl_subject_label *
47311 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
47312 + const struct acl_role_label *role)
47313 +{
47314 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
47315 + struct acl_subject_label *match;
47316 +
47317 + match = role->subj_hash[index];
47318 +
47319 + while (match && (match->inode != ino || match->device != dev ||
47320 + !(match->mode & GR_DELETED))) {
47321 + match = match->next;
47322 + }
47323 +
47324 + if (match && (match->mode & GR_DELETED))
47325 + return match;
47326 + else
47327 + return NULL;
47328 +}
47329 +
47330 +static struct acl_object_label *
47331 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
47332 + const struct acl_subject_label *subj)
47333 +{
47334 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47335 + struct acl_object_label *match;
47336 +
47337 + match = subj->obj_hash[index];
47338 +
47339 + while (match && (match->inode != ino || match->device != dev ||
47340 + (match->mode & GR_DELETED))) {
47341 + match = match->next;
47342 + }
47343 +
47344 + if (match && !(match->mode & GR_DELETED))
47345 + return match;
47346 + else
47347 + return NULL;
47348 +}
47349 +
47350 +static struct acl_object_label *
47351 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
47352 + const struct acl_subject_label *subj)
47353 +{
47354 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47355 + struct acl_object_label *match;
47356 +
47357 + match = subj->obj_hash[index];
47358 +
47359 + while (match && (match->inode != ino || match->device != dev ||
47360 + !(match->mode & GR_DELETED))) {
47361 + match = match->next;
47362 + }
47363 +
47364 + if (match && (match->mode & GR_DELETED))
47365 + return match;
47366 +
47367 + match = subj->obj_hash[index];
47368 +
47369 + while (match && (match->inode != ino || match->device != dev ||
47370 + (match->mode & GR_DELETED))) {
47371 + match = match->next;
47372 + }
47373 +
47374 + if (match && !(match->mode & GR_DELETED))
47375 + return match;
47376 + else
47377 + return NULL;
47378 +}
47379 +
47380 +static struct name_entry *
47381 +lookup_name_entry(const char *name)
47382 +{
47383 + unsigned int len = strlen(name);
47384 + unsigned int key = full_name_hash(name, len);
47385 + unsigned int index = key % name_set.n_size;
47386 + struct name_entry *match;
47387 +
47388 + match = name_set.n_hash[index];
47389 +
47390 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
47391 + match = match->next;
47392 +
47393 + return match;
47394 +}
47395 +
47396 +static struct name_entry *
47397 +lookup_name_entry_create(const char *name)
47398 +{
47399 + unsigned int len = strlen(name);
47400 + unsigned int key = full_name_hash(name, len);
47401 + unsigned int index = key % name_set.n_size;
47402 + struct name_entry *match;
47403 +
47404 + match = name_set.n_hash[index];
47405 +
47406 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47407 + !match->deleted))
47408 + match = match->next;
47409 +
47410 + if (match && match->deleted)
47411 + return match;
47412 +
47413 + match = name_set.n_hash[index];
47414 +
47415 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47416 + match->deleted))
47417 + match = match->next;
47418 +
47419 + if (match && !match->deleted)
47420 + return match;
47421 + else
47422 + return NULL;
47423 +}
47424 +
47425 +static struct inodev_entry *
47426 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
47427 +{
47428 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
47429 + struct inodev_entry *match;
47430 +
47431 + match = inodev_set.i_hash[index];
47432 +
47433 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
47434 + match = match->next;
47435 +
47436 + return match;
47437 +}
47438 +
47439 +static void
47440 +insert_inodev_entry(struct inodev_entry *entry)
47441 +{
47442 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
47443 + inodev_set.i_size);
47444 + struct inodev_entry **curr;
47445 +
47446 + entry->prev = NULL;
47447 +
47448 + curr = &inodev_set.i_hash[index];
47449 + if (*curr != NULL)
47450 + (*curr)->prev = entry;
47451 +
47452 + entry->next = *curr;
47453 + *curr = entry;
47454 +
47455 + return;
47456 +}
47457 +
47458 +static void
47459 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
47460 +{
47461 + unsigned int index =
47462 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
47463 + struct acl_role_label **curr;
47464 + struct acl_role_label *tmp;
47465 +
47466 + curr = &acl_role_set.r_hash[index];
47467 +
47468 + /* if role was already inserted due to domains and already has
47469 + a role in the same bucket as it attached, then we need to
47470 + combine these two buckets
47471 + */
47472 + if (role->next) {
47473 + tmp = role->next;
47474 + while (tmp->next)
47475 + tmp = tmp->next;
47476 + tmp->next = *curr;
47477 + } else
47478 + role->next = *curr;
47479 + *curr = role;
47480 +
47481 + return;
47482 +}
47483 +
47484 +static void
47485 +insert_acl_role_label(struct acl_role_label *role)
47486 +{
47487 + int i;
47488 +
47489 + if (role_list == NULL) {
47490 + role_list = role;
47491 + role->prev = NULL;
47492 + } else {
47493 + role->prev = role_list;
47494 + role_list = role;
47495 + }
47496 +
47497 + /* used for hash chains */
47498 + role->next = NULL;
47499 +
47500 + if (role->roletype & GR_ROLE_DOMAIN) {
47501 + for (i = 0; i < role->domain_child_num; i++)
47502 + __insert_acl_role_label(role, role->domain_children[i]);
47503 + } else
47504 + __insert_acl_role_label(role, role->uidgid);
47505 +}
47506 +
47507 +static int
47508 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
47509 +{
47510 + struct name_entry **curr, *nentry;
47511 + struct inodev_entry *ientry;
47512 + unsigned int len = strlen(name);
47513 + unsigned int key = full_name_hash(name, len);
47514 + unsigned int index = key % name_set.n_size;
47515 +
47516 + curr = &name_set.n_hash[index];
47517 +
47518 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
47519 + curr = &((*curr)->next);
47520 +
47521 + if (*curr != NULL)
47522 + return 1;
47523 +
47524 + nentry = acl_alloc(sizeof (struct name_entry));
47525 + if (nentry == NULL)
47526 + return 0;
47527 + ientry = acl_alloc(sizeof (struct inodev_entry));
47528 + if (ientry == NULL)
47529 + return 0;
47530 + ientry->nentry = nentry;
47531 +
47532 + nentry->key = key;
47533 + nentry->name = name;
47534 + nentry->inode = inode;
47535 + nentry->device = device;
47536 + nentry->len = len;
47537 + nentry->deleted = deleted;
47538 +
47539 + nentry->prev = NULL;
47540 + curr = &name_set.n_hash[index];
47541 + if (*curr != NULL)
47542 + (*curr)->prev = nentry;
47543 + nentry->next = *curr;
47544 + *curr = nentry;
47545 +
47546 + /* insert us into the table searchable by inode/dev */
47547 + insert_inodev_entry(ientry);
47548 +
47549 + return 1;
47550 +}
47551 +
47552 +static void
47553 +insert_acl_obj_label(struct acl_object_label *obj,
47554 + struct acl_subject_label *subj)
47555 +{
47556 + unsigned int index =
47557 + fhash(obj->inode, obj->device, subj->obj_hash_size);
47558 + struct acl_object_label **curr;
47559 +
47560 +
47561 + obj->prev = NULL;
47562 +
47563 + curr = &subj->obj_hash[index];
47564 + if (*curr != NULL)
47565 + (*curr)->prev = obj;
47566 +
47567 + obj->next = *curr;
47568 + *curr = obj;
47569 +
47570 + return;
47571 +}
47572 +
47573 +static void
47574 +insert_acl_subj_label(struct acl_subject_label *obj,
47575 + struct acl_role_label *role)
47576 +{
47577 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
47578 + struct acl_subject_label **curr;
47579 +
47580 + obj->prev = NULL;
47581 +
47582 + curr = &role->subj_hash[index];
47583 + if (*curr != NULL)
47584 + (*curr)->prev = obj;
47585 +
47586 + obj->next = *curr;
47587 + *curr = obj;
47588 +
47589 + return;
47590 +}
47591 +
47592 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
47593 +
47594 +static void *
47595 +create_table(__u32 * len, int elementsize)
47596 +{
47597 + unsigned int table_sizes[] = {
47598 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
47599 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
47600 + 4194301, 8388593, 16777213, 33554393, 67108859
47601 + };
47602 + void *newtable = NULL;
47603 + unsigned int pwr = 0;
47604 +
47605 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
47606 + table_sizes[pwr] <= *len)
47607 + pwr++;
47608 +
47609 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
47610 + return newtable;
47611 +
47612 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
47613 + newtable =
47614 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
47615 + else
47616 + newtable = vmalloc(table_sizes[pwr] * elementsize);
47617 +
47618 + *len = table_sizes[pwr];
47619 +
47620 + return newtable;
47621 +}
47622 +
47623 +static int
47624 +init_variables(const struct gr_arg *arg)
47625 +{
47626 + struct task_struct *reaper = &init_task;
47627 + unsigned int stacksize;
47628 +
47629 + subj_map_set.s_size = arg->role_db.num_subjects;
47630 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
47631 + name_set.n_size = arg->role_db.num_objects;
47632 + inodev_set.i_size = arg->role_db.num_objects;
47633 +
47634 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
47635 + !name_set.n_size || !inodev_set.i_size)
47636 + return 1;
47637 +
47638 + if (!gr_init_uidset())
47639 + return 1;
47640 +
47641 + /* set up the stack that holds allocation info */
47642 +
47643 + stacksize = arg->role_db.num_pointers + 5;
47644 +
47645 + if (!acl_alloc_stack_init(stacksize))
47646 + return 1;
47647 +
47648 + /* grab reference for the real root dentry and vfsmount */
47649 + get_fs_root(reaper->fs, &real_root);
47650 +
47651 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47652 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
47653 +#endif
47654 +
47655 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
47656 + if (fakefs_obj_rw == NULL)
47657 + return 1;
47658 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
47659 +
47660 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
47661 + if (fakefs_obj_rwx == NULL)
47662 + return 1;
47663 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
47664 +
47665 + subj_map_set.s_hash =
47666 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
47667 + acl_role_set.r_hash =
47668 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
47669 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
47670 + inodev_set.i_hash =
47671 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
47672 +
47673 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
47674 + !name_set.n_hash || !inodev_set.i_hash)
47675 + return 1;
47676 +
47677 + memset(subj_map_set.s_hash, 0,
47678 + sizeof(struct subject_map *) * subj_map_set.s_size);
47679 + memset(acl_role_set.r_hash, 0,
47680 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
47681 + memset(name_set.n_hash, 0,
47682 + sizeof (struct name_entry *) * name_set.n_size);
47683 + memset(inodev_set.i_hash, 0,
47684 + sizeof (struct inodev_entry *) * inodev_set.i_size);
47685 +
47686 + return 0;
47687 +}
47688 +
47689 +/* free information not needed after startup
47690 + currently contains user->kernel pointer mappings for subjects
47691 +*/
47692 +
47693 +static void
47694 +free_init_variables(void)
47695 +{
47696 + __u32 i;
47697 +
47698 + if (subj_map_set.s_hash) {
47699 + for (i = 0; i < subj_map_set.s_size; i++) {
47700 + if (subj_map_set.s_hash[i]) {
47701 + kfree(subj_map_set.s_hash[i]);
47702 + subj_map_set.s_hash[i] = NULL;
47703 + }
47704 + }
47705 +
47706 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
47707 + PAGE_SIZE)
47708 + kfree(subj_map_set.s_hash);
47709 + else
47710 + vfree(subj_map_set.s_hash);
47711 + }
47712 +
47713 + return;
47714 +}
47715 +
47716 +static void
47717 +free_variables(void)
47718 +{
47719 + struct acl_subject_label *s;
47720 + struct acl_role_label *r;
47721 + struct task_struct *task, *task2;
47722 + unsigned int x;
47723 +
47724 + gr_clear_learn_entries();
47725 +
47726 + read_lock(&tasklist_lock);
47727 + do_each_thread(task2, task) {
47728 + task->acl_sp_role = 0;
47729 + task->acl_role_id = 0;
47730 + task->acl = NULL;
47731 + task->role = NULL;
47732 + } while_each_thread(task2, task);
47733 + read_unlock(&tasklist_lock);
47734 +
47735 + /* release the reference to the real root dentry and vfsmount */
47736 + path_put(&real_root);
47737 +
47738 + /* free all object hash tables */
47739 +
47740 + FOR_EACH_ROLE_START(r)
47741 + if (r->subj_hash == NULL)
47742 + goto next_role;
47743 + FOR_EACH_SUBJECT_START(r, s, x)
47744 + if (s->obj_hash == NULL)
47745 + break;
47746 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
47747 + kfree(s->obj_hash);
47748 + else
47749 + vfree(s->obj_hash);
47750 + FOR_EACH_SUBJECT_END(s, x)
47751 + FOR_EACH_NESTED_SUBJECT_START(r, s)
47752 + if (s->obj_hash == NULL)
47753 + break;
47754 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
47755 + kfree(s->obj_hash);
47756 + else
47757 + vfree(s->obj_hash);
47758 + FOR_EACH_NESTED_SUBJECT_END(s)
47759 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
47760 + kfree(r->subj_hash);
47761 + else
47762 + vfree(r->subj_hash);
47763 + r->subj_hash = NULL;
47764 +next_role:
47765 + FOR_EACH_ROLE_END(r)
47766 +
47767 + acl_free_all();
47768 +
47769 + if (acl_role_set.r_hash) {
47770 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
47771 + PAGE_SIZE)
47772 + kfree(acl_role_set.r_hash);
47773 + else
47774 + vfree(acl_role_set.r_hash);
47775 + }
47776 + if (name_set.n_hash) {
47777 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
47778 + PAGE_SIZE)
47779 + kfree(name_set.n_hash);
47780 + else
47781 + vfree(name_set.n_hash);
47782 + }
47783 +
47784 + if (inodev_set.i_hash) {
47785 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
47786 + PAGE_SIZE)
47787 + kfree(inodev_set.i_hash);
47788 + else
47789 + vfree(inodev_set.i_hash);
47790 + }
47791 +
47792 + gr_free_uidset();
47793 +
47794 + memset(&name_set, 0, sizeof (struct name_db));
47795 + memset(&inodev_set, 0, sizeof (struct inodev_db));
47796 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
47797 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
47798 +
47799 + default_role = NULL;
47800 + role_list = NULL;
47801 +
47802 + return;
47803 +}
47804 +
47805 +static __u32
47806 +count_user_objs(struct acl_object_label *userp)
47807 +{
47808 + struct acl_object_label o_tmp;
47809 + __u32 num = 0;
47810 +
47811 + while (userp) {
47812 + if (copy_from_user(&o_tmp, userp,
47813 + sizeof (struct acl_object_label)))
47814 + break;
47815 +
47816 + userp = o_tmp.prev;
47817 + num++;
47818 + }
47819 +
47820 + return num;
47821 +}
47822 +
47823 +static struct acl_subject_label *
47824 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
47825 +
47826 +static int
47827 +copy_user_glob(struct acl_object_label *obj)
47828 +{
47829 + struct acl_object_label *g_tmp, **guser;
47830 + unsigned int len;
47831 + char *tmp;
47832 +
47833 + if (obj->globbed == NULL)
47834 + return 0;
47835 +
47836 + guser = &obj->globbed;
47837 + while (*guser) {
47838 + g_tmp = (struct acl_object_label *)
47839 + acl_alloc(sizeof (struct acl_object_label));
47840 + if (g_tmp == NULL)
47841 + return -ENOMEM;
47842 +
47843 + if (copy_from_user(g_tmp, *guser,
47844 + sizeof (struct acl_object_label)))
47845 + return -EFAULT;
47846 +
47847 + len = strnlen_user(g_tmp->filename, PATH_MAX);
47848 +
47849 + if (!len || len >= PATH_MAX)
47850 + return -EINVAL;
47851 +
47852 + if ((tmp = (char *) acl_alloc(len)) == NULL)
47853 + return -ENOMEM;
47854 +
47855 + if (copy_from_user(tmp, g_tmp->filename, len))
47856 + return -EFAULT;
47857 + tmp[len-1] = '\0';
47858 + g_tmp->filename = tmp;
47859 +
47860 + *guser = g_tmp;
47861 + guser = &(g_tmp->next);
47862 + }
47863 +
47864 + return 0;
47865 +}
47866 +
47867 +static int
47868 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
47869 + struct acl_role_label *role)
47870 +{
47871 + struct acl_object_label *o_tmp;
47872 + unsigned int len;
47873 + int ret;
47874 + char *tmp;
47875 +
47876 + while (userp) {
47877 + if ((o_tmp = (struct acl_object_label *)
47878 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
47879 + return -ENOMEM;
47880 +
47881 + if (copy_from_user(o_tmp, userp,
47882 + sizeof (struct acl_object_label)))
47883 + return -EFAULT;
47884 +
47885 + userp = o_tmp->prev;
47886 +
47887 + len = strnlen_user(o_tmp->filename, PATH_MAX);
47888 +
47889 + if (!len || len >= PATH_MAX)
47890 + return -EINVAL;
47891 +
47892 + if ((tmp = (char *) acl_alloc(len)) == NULL)
47893 + return -ENOMEM;
47894 +
47895 + if (copy_from_user(tmp, o_tmp->filename, len))
47896 + return -EFAULT;
47897 + tmp[len-1] = '\0';
47898 + o_tmp->filename = tmp;
47899 +
47900 + insert_acl_obj_label(o_tmp, subj);
47901 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
47902 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
47903 + return -ENOMEM;
47904 +
47905 + ret = copy_user_glob(o_tmp);
47906 + if (ret)
47907 + return ret;
47908 +
47909 + if (o_tmp->nested) {
47910 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
47911 + if (IS_ERR(o_tmp->nested))
47912 + return PTR_ERR(o_tmp->nested);
47913 +
47914 + /* insert into nested subject list */
47915 + o_tmp->nested->next = role->hash->first;
47916 + role->hash->first = o_tmp->nested;
47917 + }
47918 + }
47919 +
47920 + return 0;
47921 +}
47922 +
47923 +static __u32
47924 +count_user_subjs(struct acl_subject_label *userp)
47925 +{
47926 + struct acl_subject_label s_tmp;
47927 + __u32 num = 0;
47928 +
47929 + while (userp) {
47930 + if (copy_from_user(&s_tmp, userp,
47931 + sizeof (struct acl_subject_label)))
47932 + break;
47933 +
47934 + userp = s_tmp.prev;
47935 + /* do not count nested subjects against this count, since
47936 + they are not included in the hash table, but are
47937 + attached to objects. We have already counted
47938 + the subjects in userspace for the allocation
47939 + stack
47940 + */
47941 + if (!(s_tmp.mode & GR_NESTED))
47942 + num++;
47943 + }
47944 +
47945 + return num;
47946 +}
47947 +
47948 +static int
47949 +copy_user_allowedips(struct acl_role_label *rolep)
47950 +{
47951 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
47952 +
47953 + ruserip = rolep->allowed_ips;
47954 +
47955 + while (ruserip) {
47956 + rlast = rtmp;
47957 +
47958 + if ((rtmp = (struct role_allowed_ip *)
47959 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
47960 + return -ENOMEM;
47961 +
47962 + if (copy_from_user(rtmp, ruserip,
47963 + sizeof (struct role_allowed_ip)))
47964 + return -EFAULT;
47965 +
47966 + ruserip = rtmp->prev;
47967 +
47968 + if (!rlast) {
47969 + rtmp->prev = NULL;
47970 + rolep->allowed_ips = rtmp;
47971 + } else {
47972 + rlast->next = rtmp;
47973 + rtmp->prev = rlast;
47974 + }
47975 +
47976 + if (!ruserip)
47977 + rtmp->next = NULL;
47978 + }
47979 +
47980 + return 0;
47981 +}
47982 +
47983 +static int
47984 +copy_user_transitions(struct acl_role_label *rolep)
47985 +{
47986 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
47987 +
47988 + unsigned int len;
47989 + char *tmp;
47990 +
47991 + rusertp = rolep->transitions;
47992 +
47993 + while (rusertp) {
47994 + rlast = rtmp;
47995 +
47996 + if ((rtmp = (struct role_transition *)
47997 + acl_alloc(sizeof (struct role_transition))) == NULL)
47998 + return -ENOMEM;
47999 +
48000 + if (copy_from_user(rtmp, rusertp,
48001 + sizeof (struct role_transition)))
48002 + return -EFAULT;
48003 +
48004 + rusertp = rtmp->prev;
48005 +
48006 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
48007 +
48008 + if (!len || len >= GR_SPROLE_LEN)
48009 + return -EINVAL;
48010 +
48011 + if ((tmp = (char *) acl_alloc(len)) == NULL)
48012 + return -ENOMEM;
48013 +
48014 + if (copy_from_user(tmp, rtmp->rolename, len))
48015 + return -EFAULT;
48016 + tmp[len-1] = '\0';
48017 + rtmp->rolename = tmp;
48018 +
48019 + if (!rlast) {
48020 + rtmp->prev = NULL;
48021 + rolep->transitions = rtmp;
48022 + } else {
48023 + rlast->next = rtmp;
48024 + rtmp->prev = rlast;
48025 + }
48026 +
48027 + if (!rusertp)
48028 + rtmp->next = NULL;
48029 + }
48030 +
48031 + return 0;
48032 +}
48033 +
48034 +static struct acl_subject_label *
48035 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
48036 +{
48037 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
48038 + unsigned int len;
48039 + char *tmp;
48040 + __u32 num_objs;
48041 + struct acl_ip_label **i_tmp, *i_utmp2;
48042 + struct gr_hash_struct ghash;
48043 + struct subject_map *subjmap;
48044 + unsigned int i_num;
48045 + int err;
48046 +
48047 + s_tmp = lookup_subject_map(userp);
48048 +
48049 + /* we've already copied this subject into the kernel, just return
48050 + the reference to it, and don't copy it over again
48051 + */
48052 + if (s_tmp)
48053 + return(s_tmp);
48054 +
48055 + if ((s_tmp = (struct acl_subject_label *)
48056 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
48057 + return ERR_PTR(-ENOMEM);
48058 +
48059 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
48060 + if (subjmap == NULL)
48061 + return ERR_PTR(-ENOMEM);
48062 +
48063 + subjmap->user = userp;
48064 + subjmap->kernel = s_tmp;
48065 + insert_subj_map_entry(subjmap);
48066 +
48067 + if (copy_from_user(s_tmp, userp,
48068 + sizeof (struct acl_subject_label)))
48069 + return ERR_PTR(-EFAULT);
48070 +
48071 + len = strnlen_user(s_tmp->filename, PATH_MAX);
48072 +
48073 + if (!len || len >= PATH_MAX)
48074 + return ERR_PTR(-EINVAL);
48075 +
48076 + if ((tmp = (char *) acl_alloc(len)) == NULL)
48077 + return ERR_PTR(-ENOMEM);
48078 +
48079 + if (copy_from_user(tmp, s_tmp->filename, len))
48080 + return ERR_PTR(-EFAULT);
48081 + tmp[len-1] = '\0';
48082 + s_tmp->filename = tmp;
48083 +
48084 + if (!strcmp(s_tmp->filename, "/"))
48085 + role->root_label = s_tmp;
48086 +
48087 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
48088 + return ERR_PTR(-EFAULT);
48089 +
48090 + /* copy user and group transition tables */
48091 +
48092 + if (s_tmp->user_trans_num) {
48093 + uid_t *uidlist;
48094 +
48095 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
48096 + if (uidlist == NULL)
48097 + return ERR_PTR(-ENOMEM);
48098 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
48099 + return ERR_PTR(-EFAULT);
48100 +
48101 + s_tmp->user_transitions = uidlist;
48102 + }
48103 +
48104 + if (s_tmp->group_trans_num) {
48105 + gid_t *gidlist;
48106 +
48107 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
48108 + if (gidlist == NULL)
48109 + return ERR_PTR(-ENOMEM);
48110 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
48111 + return ERR_PTR(-EFAULT);
48112 +
48113 + s_tmp->group_transitions = gidlist;
48114 + }
48115 +
48116 + /* set up object hash table */
48117 + num_objs = count_user_objs(ghash.first);
48118 +
48119 + s_tmp->obj_hash_size = num_objs;
48120 + s_tmp->obj_hash =
48121 + (struct acl_object_label **)
48122 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
48123 +
48124 + if (!s_tmp->obj_hash)
48125 + return ERR_PTR(-ENOMEM);
48126 +
48127 + memset(s_tmp->obj_hash, 0,
48128 + s_tmp->obj_hash_size *
48129 + sizeof (struct acl_object_label *));
48130 +
48131 + /* add in objects */
48132 + err = copy_user_objs(ghash.first, s_tmp, role);
48133 +
48134 + if (err)
48135 + return ERR_PTR(err);
48136 +
48137 + /* set pointer for parent subject */
48138 + if (s_tmp->parent_subject) {
48139 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
48140 +
48141 + if (IS_ERR(s_tmp2))
48142 + return s_tmp2;
48143 +
48144 + s_tmp->parent_subject = s_tmp2;
48145 + }
48146 +
48147 + /* add in ip acls */
48148 +
48149 + if (!s_tmp->ip_num) {
48150 + s_tmp->ips = NULL;
48151 + goto insert;
48152 + }
48153 +
48154 + i_tmp =
48155 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
48156 + sizeof (struct acl_ip_label *));
48157 +
48158 + if (!i_tmp)
48159 + return ERR_PTR(-ENOMEM);
48160 +
48161 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
48162 + *(i_tmp + i_num) =
48163 + (struct acl_ip_label *)
48164 + acl_alloc(sizeof (struct acl_ip_label));
48165 + if (!*(i_tmp + i_num))
48166 + return ERR_PTR(-ENOMEM);
48167 +
48168 + if (copy_from_user
48169 + (&i_utmp2, s_tmp->ips + i_num,
48170 + sizeof (struct acl_ip_label *)))
48171 + return ERR_PTR(-EFAULT);
48172 +
48173 + if (copy_from_user
48174 + (*(i_tmp + i_num), i_utmp2,
48175 + sizeof (struct acl_ip_label)))
48176 + return ERR_PTR(-EFAULT);
48177 +
48178 + if ((*(i_tmp + i_num))->iface == NULL)
48179 + continue;
48180 +
48181 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
48182 + if (!len || len >= IFNAMSIZ)
48183 + return ERR_PTR(-EINVAL);
48184 + tmp = acl_alloc(len);
48185 + if (tmp == NULL)
48186 + return ERR_PTR(-ENOMEM);
48187 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
48188 + return ERR_PTR(-EFAULT);
48189 + (*(i_tmp + i_num))->iface = tmp;
48190 + }
48191 +
48192 + s_tmp->ips = i_tmp;
48193 +
48194 +insert:
48195 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
48196 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
48197 + return ERR_PTR(-ENOMEM);
48198 +
48199 + return s_tmp;
48200 +}
48201 +
48202 +static int
48203 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
48204 +{
48205 + struct acl_subject_label s_pre;
48206 + struct acl_subject_label * ret;
48207 + int err;
48208 +
48209 + while (userp) {
48210 + if (copy_from_user(&s_pre, userp,
48211 + sizeof (struct acl_subject_label)))
48212 + return -EFAULT;
48213 +
48214 + /* do not add nested subjects here, add
48215 + while parsing objects
48216 + */
48217 +
48218 + if (s_pre.mode & GR_NESTED) {
48219 + userp = s_pre.prev;
48220 + continue;
48221 + }
48222 +
48223 + ret = do_copy_user_subj(userp, role);
48224 +
48225 + err = PTR_ERR(ret);
48226 + if (IS_ERR(ret))
48227 + return err;
48228 +
48229 + insert_acl_subj_label(ret, role);
48230 +
48231 + userp = s_pre.prev;
48232 + }
48233 +
48234 + return 0;
48235 +}
48236 +
48237 +static int
48238 +copy_user_acl(struct gr_arg *arg)
48239 +{
48240 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
48241 + struct sprole_pw *sptmp;
48242 + struct gr_hash_struct *ghash;
48243 + uid_t *domainlist;
48244 + unsigned int r_num;
48245 + unsigned int len;
48246 + char *tmp;
48247 + int err = 0;
48248 + __u16 i;
48249 + __u32 num_subjs;
48250 +
48251 + /* we need a default and kernel role */
48252 + if (arg->role_db.num_roles < 2)
48253 + return -EINVAL;
48254 +
48255 + /* copy special role authentication info from userspace */
48256 +
48257 + num_sprole_pws = arg->num_sprole_pws;
48258 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
48259 +
48260 + if (!acl_special_roles) {
48261 + err = -ENOMEM;
48262 + goto cleanup;
48263 + }
48264 +
48265 + for (i = 0; i < num_sprole_pws; i++) {
48266 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
48267 + if (!sptmp) {
48268 + err = -ENOMEM;
48269 + goto cleanup;
48270 + }
48271 + if (copy_from_user(sptmp, arg->sprole_pws + i,
48272 + sizeof (struct sprole_pw))) {
48273 + err = -EFAULT;
48274 + goto cleanup;
48275 + }
48276 +
48277 + len =
48278 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
48279 +
48280 + if (!len || len >= GR_SPROLE_LEN) {
48281 + err = -EINVAL;
48282 + goto cleanup;
48283 + }
48284 +
48285 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
48286 + err = -ENOMEM;
48287 + goto cleanup;
48288 + }
48289 +
48290 + if (copy_from_user(tmp, sptmp->rolename, len)) {
48291 + err = -EFAULT;
48292 + goto cleanup;
48293 + }
48294 + tmp[len-1] = '\0';
48295 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48296 + printk(KERN_ALERT "Copying special role %s\n", tmp);
48297 +#endif
48298 + sptmp->rolename = tmp;
48299 + acl_special_roles[i] = sptmp;
48300 + }
48301 +
48302 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
48303 +
48304 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
48305 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
48306 +
48307 + if (!r_tmp) {
48308 + err = -ENOMEM;
48309 + goto cleanup;
48310 + }
48311 +
48312 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
48313 + sizeof (struct acl_role_label *))) {
48314 + err = -EFAULT;
48315 + goto cleanup;
48316 + }
48317 +
48318 + if (copy_from_user(r_tmp, r_utmp2,
48319 + sizeof (struct acl_role_label))) {
48320 + err = -EFAULT;
48321 + goto cleanup;
48322 + }
48323 +
48324 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
48325 +
48326 + if (!len || len >= PATH_MAX) {
48327 + err = -EINVAL;
48328 + goto cleanup;
48329 + }
48330 +
48331 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
48332 + err = -ENOMEM;
48333 + goto cleanup;
48334 + }
48335 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
48336 + err = -EFAULT;
48337 + goto cleanup;
48338 + }
48339 + tmp[len-1] = '\0';
48340 + r_tmp->rolename = tmp;
48341 +
48342 + if (!strcmp(r_tmp->rolename, "default")
48343 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
48344 + default_role = r_tmp;
48345 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
48346 + kernel_role = r_tmp;
48347 + }
48348 +
48349 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
48350 + err = -ENOMEM;
48351 + goto cleanup;
48352 + }
48353 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
48354 + err = -EFAULT;
48355 + goto cleanup;
48356 + }
48357 +
48358 + r_tmp->hash = ghash;
48359 +
48360 + num_subjs = count_user_subjs(r_tmp->hash->first);
48361 +
48362 + r_tmp->subj_hash_size = num_subjs;
48363 + r_tmp->subj_hash =
48364 + (struct acl_subject_label **)
48365 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
48366 +
48367 + if (!r_tmp->subj_hash) {
48368 + err = -ENOMEM;
48369 + goto cleanup;
48370 + }
48371 +
48372 + err = copy_user_allowedips(r_tmp);
48373 + if (err)
48374 + goto cleanup;
48375 +
48376 + /* copy domain info */
48377 + if (r_tmp->domain_children != NULL) {
48378 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
48379 + if (domainlist == NULL) {
48380 + err = -ENOMEM;
48381 + goto cleanup;
48382 + }
48383 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
48384 + err = -EFAULT;
48385 + goto cleanup;
48386 + }
48387 + r_tmp->domain_children = domainlist;
48388 + }
48389 +
48390 + err = copy_user_transitions(r_tmp);
48391 + if (err)
48392 + goto cleanup;
48393 +
48394 + memset(r_tmp->subj_hash, 0,
48395 + r_tmp->subj_hash_size *
48396 + sizeof (struct acl_subject_label *));
48397 +
48398 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
48399 +
48400 + if (err)
48401 + goto cleanup;
48402 +
48403 + /* set nested subject list to null */
48404 + r_tmp->hash->first = NULL;
48405 +
48406 + insert_acl_role_label(r_tmp);
48407 + }
48408 +
48409 + goto return_err;
48410 + cleanup:
48411 + free_variables();
48412 + return_err:
48413 + return err;
48414 +
48415 +}
48416 +
48417 +static int
48418 +gracl_init(struct gr_arg *args)
48419 +{
48420 + int error = 0;
48421 +
48422 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
48423 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
48424 +
48425 + if (init_variables(args)) {
48426 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
48427 + error = -ENOMEM;
48428 + free_variables();
48429 + goto out;
48430 + }
48431 +
48432 + error = copy_user_acl(args);
48433 + free_init_variables();
48434 + if (error) {
48435 + free_variables();
48436 + goto out;
48437 + }
48438 +
48439 + if ((error = gr_set_acls(0))) {
48440 + free_variables();
48441 + goto out;
48442 + }
48443 +
48444 + pax_open_kernel();
48445 + gr_status |= GR_READY;
48446 + pax_close_kernel();
48447 +
48448 + out:
48449 + return error;
48450 +}
48451 +
48452 +/* derived from glibc fnmatch() 0: match, 1: no match*/
48453 +
48454 +static int
48455 +glob_match(const char *p, const char *n)
48456 +{
48457 + char c;
48458 +
48459 + while ((c = *p++) != '\0') {
48460 + switch (c) {
48461 + case '?':
48462 + if (*n == '\0')
48463 + return 1;
48464 + else if (*n == '/')
48465 + return 1;
48466 + break;
48467 + case '\\':
48468 + if (*n != c)
48469 + return 1;
48470 + break;
48471 + case '*':
48472 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
48473 + if (*n == '/')
48474 + return 1;
48475 + else if (c == '?') {
48476 + if (*n == '\0')
48477 + return 1;
48478 + else
48479 + ++n;
48480 + }
48481 + }
48482 + if (c == '\0') {
48483 + return 0;
48484 + } else {
48485 + const char *endp;
48486 +
48487 + if ((endp = strchr(n, '/')) == NULL)
48488 + endp = n + strlen(n);
48489 +
48490 + if (c == '[') {
48491 + for (--p; n < endp; ++n)
48492 + if (!glob_match(p, n))
48493 + return 0;
48494 + } else if (c == '/') {
48495 + while (*n != '\0' && *n != '/')
48496 + ++n;
48497 + if (*n == '/' && !glob_match(p, n + 1))
48498 + return 0;
48499 + } else {
48500 + for (--p; n < endp; ++n)
48501 + if (*n == c && !glob_match(p, n))
48502 + return 0;
48503 + }
48504 +
48505 + return 1;
48506 + }
48507 + case '[':
48508 + {
48509 + int not;
48510 + char cold;
48511 +
48512 + if (*n == '\0' || *n == '/')
48513 + return 1;
48514 +
48515 + not = (*p == '!' || *p == '^');
48516 + if (not)
48517 + ++p;
48518 +
48519 + c = *p++;
48520 + for (;;) {
48521 + unsigned char fn = (unsigned char)*n;
48522 +
48523 + if (c == '\0')
48524 + return 1;
48525 + else {
48526 + if (c == fn)
48527 + goto matched;
48528 + cold = c;
48529 + c = *p++;
48530 +
48531 + if (c == '-' && *p != ']') {
48532 + unsigned char cend = *p++;
48533 +
48534 + if (cend == '\0')
48535 + return 1;
48536 +
48537 + if (cold <= fn && fn <= cend)
48538 + goto matched;
48539 +
48540 + c = *p++;
48541 + }
48542 + }
48543 +
48544 + if (c == ']')
48545 + break;
48546 + }
48547 + if (!not)
48548 + return 1;
48549 + break;
48550 + matched:
48551 + while (c != ']') {
48552 + if (c == '\0')
48553 + return 1;
48554 +
48555 + c = *p++;
48556 + }
48557 + if (not)
48558 + return 1;
48559 + }
48560 + break;
48561 + default:
48562 + if (c != *n)
48563 + return 1;
48564 + }
48565 +
48566 + ++n;
48567 + }
48568 +
48569 + if (*n == '\0')
48570 + return 0;
48571 +
48572 + if (*n == '/')
48573 + return 0;
48574 +
48575 + return 1;
48576 +}
48577 +
48578 +static struct acl_object_label *
48579 +chk_glob_label(struct acl_object_label *globbed,
48580 + struct dentry *dentry, struct vfsmount *mnt, char **path)
48581 +{
48582 + struct acl_object_label *tmp;
48583 +
48584 + if (*path == NULL)
48585 + *path = gr_to_filename_nolock(dentry, mnt);
48586 +
48587 + tmp = globbed;
48588 +
48589 + while (tmp) {
48590 + if (!glob_match(tmp->filename, *path))
48591 + return tmp;
48592 + tmp = tmp->next;
48593 + }
48594 +
48595 + return NULL;
48596 +}
48597 +
48598 +static struct acl_object_label *
48599 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
48600 + const ino_t curr_ino, const dev_t curr_dev,
48601 + const struct acl_subject_label *subj, char **path, const int checkglob)
48602 +{
48603 + struct acl_subject_label *tmpsubj;
48604 + struct acl_object_label *retval;
48605 + struct acl_object_label *retval2;
48606 +
48607 + tmpsubj = (struct acl_subject_label *) subj;
48608 + read_lock(&gr_inode_lock);
48609 + do {
48610 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
48611 + if (retval) {
48612 + if (checkglob && retval->globbed) {
48613 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
48614 + (struct vfsmount *)orig_mnt, path);
48615 + if (retval2)
48616 + retval = retval2;
48617 + }
48618 + break;
48619 + }
48620 + } while ((tmpsubj = tmpsubj->parent_subject));
48621 + read_unlock(&gr_inode_lock);
48622 +
48623 + return retval;
48624 +}
48625 +
48626 +static __inline__ struct acl_object_label *
48627 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
48628 + struct dentry *curr_dentry,
48629 + const struct acl_subject_label *subj, char **path, const int checkglob)
48630 +{
48631 + int newglob = checkglob;
48632 + ino_t inode;
48633 + dev_t device;
48634 +
48635 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
48636 + as we don't want a / * rule to match instead of the / object
48637 + don't do this for create lookups that call this function though, since they're looking up
48638 + on the parent and thus need globbing checks on all paths
48639 + */
48640 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
48641 + newglob = GR_NO_GLOB;
48642 +
48643 + spin_lock(&curr_dentry->d_lock);
48644 + inode = curr_dentry->d_inode->i_ino;
48645 + device = __get_dev(curr_dentry);
48646 + spin_unlock(&curr_dentry->d_lock);
48647 +
48648 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
48649 +}
48650 +
48651 +static struct acl_object_label *
48652 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48653 + const struct acl_subject_label *subj, char *path, const int checkglob)
48654 +{
48655 + struct dentry *dentry = (struct dentry *) l_dentry;
48656 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
48657 + struct acl_object_label *retval;
48658 + struct dentry *parent;
48659 +
48660 + write_seqlock(&rename_lock);
48661 + br_read_lock(vfsmount_lock);
48662 +
48663 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
48664 +#ifdef CONFIG_NET
48665 + mnt == sock_mnt ||
48666 +#endif
48667 +#ifdef CONFIG_HUGETLBFS
48668 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
48669 +#endif
48670 + /* ignore Eric Biederman */
48671 + IS_PRIVATE(l_dentry->d_inode))) {
48672 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
48673 + goto out;
48674 + }
48675 +
48676 + for (;;) {
48677 + if (dentry == real_root.dentry && mnt == real_root.mnt)
48678 + break;
48679 +
48680 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
48681 + if (mnt->mnt_parent == mnt)
48682 + break;
48683 +
48684 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48685 + if (retval != NULL)
48686 + goto out;
48687 +
48688 + dentry = mnt->mnt_mountpoint;
48689 + mnt = mnt->mnt_parent;
48690 + continue;
48691 + }
48692 +
48693 + parent = dentry->d_parent;
48694 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48695 + if (retval != NULL)
48696 + goto out;
48697 +
48698 + dentry = parent;
48699 + }
48700 +
48701 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48702 +
48703 + /* real_root is pinned so we don't have to hold a reference */
48704 + if (retval == NULL)
48705 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
48706 +out:
48707 + br_read_unlock(vfsmount_lock);
48708 + write_sequnlock(&rename_lock);
48709 +
48710 + BUG_ON(retval == NULL);
48711 +
48712 + return retval;
48713 +}
48714 +
48715 +static __inline__ struct acl_object_label *
48716 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48717 + const struct acl_subject_label *subj)
48718 +{
48719 + char *path = NULL;
48720 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
48721 +}
48722 +
48723 +static __inline__ struct acl_object_label *
48724 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48725 + const struct acl_subject_label *subj)
48726 +{
48727 + char *path = NULL;
48728 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
48729 +}
48730 +
48731 +static __inline__ struct acl_object_label *
48732 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48733 + const struct acl_subject_label *subj, char *path)
48734 +{
48735 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
48736 +}
48737 +
48738 +static struct acl_subject_label *
48739 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48740 + const struct acl_role_label *role)
48741 +{
48742 + struct dentry *dentry = (struct dentry *) l_dentry;
48743 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
48744 + struct acl_subject_label *retval;
48745 + struct dentry *parent;
48746 +
48747 + write_seqlock(&rename_lock);
48748 + br_read_lock(vfsmount_lock);
48749 +
48750 + for (;;) {
48751 + if (dentry == real_root.dentry && mnt == real_root.mnt)
48752 + break;
48753 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
48754 + if (mnt->mnt_parent == mnt)
48755 + break;
48756 +
48757 + spin_lock(&dentry->d_lock);
48758 + read_lock(&gr_inode_lock);
48759 + retval =
48760 + lookup_acl_subj_label(dentry->d_inode->i_ino,
48761 + __get_dev(dentry), role);
48762 + read_unlock(&gr_inode_lock);
48763 + spin_unlock(&dentry->d_lock);
48764 + if (retval != NULL)
48765 + goto out;
48766 +
48767 + dentry = mnt->mnt_mountpoint;
48768 + mnt = mnt->mnt_parent;
48769 + continue;
48770 + }
48771 +
48772 + spin_lock(&dentry->d_lock);
48773 + read_lock(&gr_inode_lock);
48774 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
48775 + __get_dev(dentry), role);
48776 + read_unlock(&gr_inode_lock);
48777 + parent = dentry->d_parent;
48778 + spin_unlock(&dentry->d_lock);
48779 +
48780 + if (retval != NULL)
48781 + goto out;
48782 +
48783 + dentry = parent;
48784 + }
48785 +
48786 + spin_lock(&dentry->d_lock);
48787 + read_lock(&gr_inode_lock);
48788 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
48789 + __get_dev(dentry), role);
48790 + read_unlock(&gr_inode_lock);
48791 + spin_unlock(&dentry->d_lock);
48792 +
48793 + if (unlikely(retval == NULL)) {
48794 + /* real_root is pinned, we don't need to hold a reference */
48795 + read_lock(&gr_inode_lock);
48796 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
48797 + __get_dev(real_root.dentry), role);
48798 + read_unlock(&gr_inode_lock);
48799 + }
48800 +out:
48801 + br_read_unlock(vfsmount_lock);
48802 + write_sequnlock(&rename_lock);
48803 +
48804 + BUG_ON(retval == NULL);
48805 +
48806 + return retval;
48807 +}
48808 +
48809 +static void
48810 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
48811 +{
48812 + struct task_struct *task = current;
48813 + const struct cred *cred = current_cred();
48814 +
48815 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
48816 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48817 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48818 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
48819 +
48820 + return;
48821 +}
48822 +
48823 +static void
48824 +gr_log_learn_sysctl(const char *path, const __u32 mode)
48825 +{
48826 + struct task_struct *task = current;
48827 + const struct cred *cred = current_cred();
48828 +
48829 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
48830 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48831 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48832 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
48833 +
48834 + return;
48835 +}
48836 +
48837 +static void
48838 +gr_log_learn_id_change(const char type, const unsigned int real,
48839 + const unsigned int effective, const unsigned int fs)
48840 +{
48841 + struct task_struct *task = current;
48842 + const struct cred *cred = current_cred();
48843 +
48844 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
48845 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48846 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48847 + type, real, effective, fs, &task->signal->saved_ip);
48848 +
48849 + return;
48850 +}
48851 +
48852 +__u32
48853 +gr_search_file(const struct dentry * dentry, const __u32 mode,
48854 + const struct vfsmount * mnt)
48855 +{
48856 + __u32 retval = mode;
48857 + struct acl_subject_label *curracl;
48858 + struct acl_object_label *currobj;
48859 +
48860 + if (unlikely(!(gr_status & GR_READY)))
48861 + return (mode & ~GR_AUDITS);
48862 +
48863 + curracl = current->acl;
48864 +
48865 + currobj = chk_obj_label(dentry, mnt, curracl);
48866 + retval = currobj->mode & mode;
48867 +
48868 + /* if we're opening a specified transfer file for writing
48869 + (e.g. /dev/initctl), then transfer our role to init
48870 + */
48871 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
48872 + current->role->roletype & GR_ROLE_PERSIST)) {
48873 + struct task_struct *task = init_pid_ns.child_reaper;
48874 +
48875 + if (task->role != current->role) {
48876 + task->acl_sp_role = 0;
48877 + task->acl_role_id = current->acl_role_id;
48878 + task->role = current->role;
48879 + rcu_read_lock();
48880 + read_lock(&grsec_exec_file_lock);
48881 + gr_apply_subject_to_task(task);
48882 + read_unlock(&grsec_exec_file_lock);
48883 + rcu_read_unlock();
48884 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
48885 + }
48886 + }
48887 +
48888 + if (unlikely
48889 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
48890 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
48891 + __u32 new_mode = mode;
48892 +
48893 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
48894 +
48895 + retval = new_mode;
48896 +
48897 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
48898 + new_mode |= GR_INHERIT;
48899 +
48900 + if (!(mode & GR_NOLEARN))
48901 + gr_log_learn(dentry, mnt, new_mode);
48902 + }
48903 +
48904 + return retval;
48905 +}
48906 +
48907 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
48908 + const struct dentry *parent,
48909 + const struct vfsmount *mnt)
48910 +{
48911 + struct name_entry *match;
48912 + struct acl_object_label *matchpo;
48913 + struct acl_subject_label *curracl;
48914 + char *path;
48915 +
48916 + if (unlikely(!(gr_status & GR_READY)))
48917 + return NULL;
48918 +
48919 + preempt_disable();
48920 + path = gr_to_filename_rbac(new_dentry, mnt);
48921 + match = lookup_name_entry_create(path);
48922 +
48923 + curracl = current->acl;
48924 +
48925 + if (match) {
48926 + read_lock(&gr_inode_lock);
48927 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
48928 + read_unlock(&gr_inode_lock);
48929 +
48930 + if (matchpo) {
48931 + preempt_enable();
48932 + return matchpo;
48933 + }
48934 + }
48935 +
48936 + // lookup parent
48937 +
48938 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
48939 +
48940 + preempt_enable();
48941 + return matchpo;
48942 +}
48943 +
48944 +__u32
48945 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
48946 + const struct vfsmount * mnt, const __u32 mode)
48947 +{
48948 + struct acl_object_label *matchpo;
48949 + __u32 retval;
48950 +
48951 + if (unlikely(!(gr_status & GR_READY)))
48952 + return (mode & ~GR_AUDITS);
48953 +
48954 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
48955 +
48956 + retval = matchpo->mode & mode;
48957 +
48958 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
48959 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
48960 + __u32 new_mode = mode;
48961 +
48962 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
48963 +
48964 + gr_log_learn(new_dentry, mnt, new_mode);
48965 + return new_mode;
48966 + }
48967 +
48968 + return retval;
48969 +}
48970 +
48971 +__u32
48972 +gr_check_link(const struct dentry * new_dentry,
48973 + const struct dentry * parent_dentry,
48974 + const struct vfsmount * parent_mnt,
48975 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
48976 +{
48977 + struct acl_object_label *obj;
48978 + __u32 oldmode, newmode;
48979 + __u32 needmode;
48980 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
48981 + GR_DELETE | GR_INHERIT;
48982 +
48983 + if (unlikely(!(gr_status & GR_READY)))
48984 + return (GR_CREATE | GR_LINK);
48985 +
48986 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
48987 + oldmode = obj->mode;
48988 +
48989 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
48990 + newmode = obj->mode;
48991 +
48992 + needmode = newmode & checkmodes;
48993 +
48994 + // old name for hardlink must have at least the permissions of the new name
48995 + if ((oldmode & needmode) != needmode)
48996 + goto bad;
48997 +
48998 + // if old name had restrictions/auditing, make sure the new name does as well
48999 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
49000 +
49001 + // don't allow hardlinking of suid/sgid files without permission
49002 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
49003 + needmode |= GR_SETID;
49004 +
49005 + if ((newmode & needmode) != needmode)
49006 + goto bad;
49007 +
49008 + // enforce minimum permissions
49009 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
49010 + return newmode;
49011 +bad:
49012 + needmode = oldmode;
49013 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
49014 + needmode |= GR_SETID;
49015 +
49016 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
49017 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
49018 + return (GR_CREATE | GR_LINK);
49019 + } else if (newmode & GR_SUPPRESS)
49020 + return GR_SUPPRESS;
49021 + else
49022 + return 0;
49023 +}
49024 +
49025 +int
49026 +gr_check_hidden_task(const struct task_struct *task)
49027 +{
49028 + if (unlikely(!(gr_status & GR_READY)))
49029 + return 0;
49030 +
49031 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
49032 + return 1;
49033 +
49034 + return 0;
49035 +}
49036 +
49037 +int
49038 +gr_check_protected_task(const struct task_struct *task)
49039 +{
49040 + if (unlikely(!(gr_status & GR_READY) || !task))
49041 + return 0;
49042 +
49043 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
49044 + task->acl != current->acl)
49045 + return 1;
49046 +
49047 + return 0;
49048 +}
49049 +
49050 +int
49051 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
49052 +{
49053 + struct task_struct *p;
49054 + int ret = 0;
49055 +
49056 + if (unlikely(!(gr_status & GR_READY) || !pid))
49057 + return ret;
49058 +
49059 + read_lock(&tasklist_lock);
49060 + do_each_pid_task(pid, type, p) {
49061 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
49062 + p->acl != current->acl) {
49063 + ret = 1;
49064 + goto out;
49065 + }
49066 + } while_each_pid_task(pid, type, p);
49067 +out:
49068 + read_unlock(&tasklist_lock);
49069 +
49070 + return ret;
49071 +}
49072 +
49073 +void
49074 +gr_copy_label(struct task_struct *tsk)
49075 +{
49076 + tsk->signal->used_accept = 0;
49077 + tsk->acl_sp_role = 0;
49078 + tsk->acl_role_id = current->acl_role_id;
49079 + tsk->acl = current->acl;
49080 + tsk->role = current->role;
49081 + tsk->signal->curr_ip = current->signal->curr_ip;
49082 + tsk->signal->saved_ip = current->signal->saved_ip;
49083 + if (current->exec_file)
49084 + get_file(current->exec_file);
49085 + tsk->exec_file = current->exec_file;
49086 + tsk->is_writable = current->is_writable;
49087 + if (unlikely(current->signal->used_accept)) {
49088 + current->signal->curr_ip = 0;
49089 + current->signal->saved_ip = 0;
49090 + }
49091 +
49092 + return;
49093 +}
49094 +
49095 +static void
49096 +gr_set_proc_res(struct task_struct *task)
49097 +{
49098 + struct acl_subject_label *proc;
49099 + unsigned short i;
49100 +
49101 + proc = task->acl;
49102 +
49103 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
49104 + return;
49105 +
49106 + for (i = 0; i < RLIM_NLIMITS; i++) {
49107 + if (!(proc->resmask & (1 << i)))
49108 + continue;
49109 +
49110 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
49111 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
49112 + }
49113 +
49114 + return;
49115 +}
49116 +
49117 +extern int __gr_process_user_ban(struct user_struct *user);
49118 +
49119 +int
49120 +gr_check_user_change(int real, int effective, int fs)
49121 +{
49122 + unsigned int i;
49123 + __u16 num;
49124 + uid_t *uidlist;
49125 + int curuid;
49126 + int realok = 0;
49127 + int effectiveok = 0;
49128 + int fsok = 0;
49129 +
49130 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49131 + struct user_struct *user;
49132 +
49133 + if (real == -1)
49134 + goto skipit;
49135 +
49136 + user = find_user(real);
49137 + if (user == NULL)
49138 + goto skipit;
49139 +
49140 + if (__gr_process_user_ban(user)) {
49141 + /* for find_user */
49142 + free_uid(user);
49143 + return 1;
49144 + }
49145 +
49146 + /* for find_user */
49147 + free_uid(user);
49148 +
49149 +skipit:
49150 +#endif
49151 +
49152 + if (unlikely(!(gr_status & GR_READY)))
49153 + return 0;
49154 +
49155 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49156 + gr_log_learn_id_change('u', real, effective, fs);
49157 +
49158 + num = current->acl->user_trans_num;
49159 + uidlist = current->acl->user_transitions;
49160 +
49161 + if (uidlist == NULL)
49162 + return 0;
49163 +
49164 + if (real == -1)
49165 + realok = 1;
49166 + if (effective == -1)
49167 + effectiveok = 1;
49168 + if (fs == -1)
49169 + fsok = 1;
49170 +
49171 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
49172 + for (i = 0; i < num; i++) {
49173 + curuid = (int)uidlist[i];
49174 + if (real == curuid)
49175 + realok = 1;
49176 + if (effective == curuid)
49177 + effectiveok = 1;
49178 + if (fs == curuid)
49179 + fsok = 1;
49180 + }
49181 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
49182 + for (i = 0; i < num; i++) {
49183 + curuid = (int)uidlist[i];
49184 + if (real == curuid)
49185 + break;
49186 + if (effective == curuid)
49187 + break;
49188 + if (fs == curuid)
49189 + break;
49190 + }
49191 + /* not in deny list */
49192 + if (i == num) {
49193 + realok = 1;
49194 + effectiveok = 1;
49195 + fsok = 1;
49196 + }
49197 + }
49198 +
49199 + if (realok && effectiveok && fsok)
49200 + return 0;
49201 + else {
49202 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49203 + return 1;
49204 + }
49205 +}
49206 +
49207 +int
49208 +gr_check_group_change(int real, int effective, int fs)
49209 +{
49210 + unsigned int i;
49211 + __u16 num;
49212 + gid_t *gidlist;
49213 + int curgid;
49214 + int realok = 0;
49215 + int effectiveok = 0;
49216 + int fsok = 0;
49217 +
49218 + if (unlikely(!(gr_status & GR_READY)))
49219 + return 0;
49220 +
49221 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49222 + gr_log_learn_id_change('g', real, effective, fs);
49223 +
49224 + num = current->acl->group_trans_num;
49225 + gidlist = current->acl->group_transitions;
49226 +
49227 + if (gidlist == NULL)
49228 + return 0;
49229 +
49230 + if (real == -1)
49231 + realok = 1;
49232 + if (effective == -1)
49233 + effectiveok = 1;
49234 + if (fs == -1)
49235 + fsok = 1;
49236 +
49237 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
49238 + for (i = 0; i < num; i++) {
49239 + curgid = (int)gidlist[i];
49240 + if (real == curgid)
49241 + realok = 1;
49242 + if (effective == curgid)
49243 + effectiveok = 1;
49244 + if (fs == curgid)
49245 + fsok = 1;
49246 + }
49247 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
49248 + for (i = 0; i < num; i++) {
49249 + curgid = (int)gidlist[i];
49250 + if (real == curgid)
49251 + break;
49252 + if (effective == curgid)
49253 + break;
49254 + if (fs == curgid)
49255 + break;
49256 + }
49257 + /* not in deny list */
49258 + if (i == num) {
49259 + realok = 1;
49260 + effectiveok = 1;
49261 + fsok = 1;
49262 + }
49263 + }
49264 +
49265 + if (realok && effectiveok && fsok)
49266 + return 0;
49267 + else {
49268 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49269 + return 1;
49270 + }
49271 +}
49272 +
49273 +void
49274 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
49275 +{
49276 + struct acl_role_label *role = task->role;
49277 + struct acl_subject_label *subj = NULL;
49278 + struct acl_object_label *obj;
49279 + struct file *filp;
49280 +
49281 + if (unlikely(!(gr_status & GR_READY)))
49282 + return;
49283 +
49284 + filp = task->exec_file;
49285 +
49286 + /* kernel process, we'll give them the kernel role */
49287 + if (unlikely(!filp)) {
49288 + task->role = kernel_role;
49289 + task->acl = kernel_role->root_label;
49290 + return;
49291 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
49292 + role = lookup_acl_role_label(task, uid, gid);
49293 +
49294 + /* perform subject lookup in possibly new role
49295 + we can use this result below in the case where role == task->role
49296 + */
49297 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
49298 +
49299 + /* if we changed uid/gid, but result in the same role
49300 + and are using inheritance, don't lose the inherited subject
49301 + if current subject is other than what normal lookup
49302 + would result in, we arrived via inheritance, don't
49303 + lose subject
49304 + */
49305 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
49306 + (subj == task->acl)))
49307 + task->acl = subj;
49308 +
49309 + task->role = role;
49310 +
49311 + task->is_writable = 0;
49312 +
49313 + /* ignore additional mmap checks for processes that are writable
49314 + by the default ACL */
49315 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49316 + if (unlikely(obj->mode & GR_WRITE))
49317 + task->is_writable = 1;
49318 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
49319 + if (unlikely(obj->mode & GR_WRITE))
49320 + task->is_writable = 1;
49321 +
49322 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49323 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49324 +#endif
49325 +
49326 + gr_set_proc_res(task);
49327 +
49328 + return;
49329 +}
49330 +
49331 +int
49332 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
49333 + const int unsafe_share)
49334 +{
49335 + struct task_struct *task = current;
49336 + struct acl_subject_label *newacl;
49337 + struct acl_object_label *obj;
49338 + __u32 retmode;
49339 +
49340 + if (unlikely(!(gr_status & GR_READY)))
49341 + return 0;
49342 +
49343 + newacl = chk_subj_label(dentry, mnt, task->role);
49344 +
49345 + task_lock(task);
49346 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
49347 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
49348 + !(task->role->roletype & GR_ROLE_GOD) &&
49349 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
49350 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
49351 + task_unlock(task);
49352 + if (unsafe_share)
49353 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
49354 + else
49355 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
49356 + return -EACCES;
49357 + }
49358 + task_unlock(task);
49359 +
49360 + obj = chk_obj_label(dentry, mnt, task->acl);
49361 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
49362 +
49363 + if (!(task->acl->mode & GR_INHERITLEARN) &&
49364 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
49365 + if (obj->nested)
49366 + task->acl = obj->nested;
49367 + else
49368 + task->acl = newacl;
49369 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
49370 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
49371 +
49372 + task->is_writable = 0;
49373 +
49374 + /* ignore additional mmap checks for processes that are writable
49375 + by the default ACL */
49376 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
49377 + if (unlikely(obj->mode & GR_WRITE))
49378 + task->is_writable = 1;
49379 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
49380 + if (unlikely(obj->mode & GR_WRITE))
49381 + task->is_writable = 1;
49382 +
49383 + gr_set_proc_res(task);
49384 +
49385 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49386 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49387 +#endif
49388 + return 0;
49389 +}
49390 +
49391 +/* always called with valid inodev ptr */
49392 +static void
49393 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
49394 +{
49395 + struct acl_object_label *matchpo;
49396 + struct acl_subject_label *matchps;
49397 + struct acl_subject_label *subj;
49398 + struct acl_role_label *role;
49399 + unsigned int x;
49400 +
49401 + FOR_EACH_ROLE_START(role)
49402 + FOR_EACH_SUBJECT_START(role, subj, x)
49403 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
49404 + matchpo->mode |= GR_DELETED;
49405 + FOR_EACH_SUBJECT_END(subj,x)
49406 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
49407 + if (subj->inode == ino && subj->device == dev)
49408 + subj->mode |= GR_DELETED;
49409 + FOR_EACH_NESTED_SUBJECT_END(subj)
49410 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
49411 + matchps->mode |= GR_DELETED;
49412 + FOR_EACH_ROLE_END(role)
49413 +
49414 + inodev->nentry->deleted = 1;
49415 +
49416 + return;
49417 +}
49418 +
49419 +void
49420 +gr_handle_delete(const ino_t ino, const dev_t dev)
49421 +{
49422 + struct inodev_entry *inodev;
49423 +
49424 + if (unlikely(!(gr_status & GR_READY)))
49425 + return;
49426 +
49427 + write_lock(&gr_inode_lock);
49428 + inodev = lookup_inodev_entry(ino, dev);
49429 + if (inodev != NULL)
49430 + do_handle_delete(inodev, ino, dev);
49431 + write_unlock(&gr_inode_lock);
49432 +
49433 + return;
49434 +}
49435 +
49436 +static void
49437 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
49438 + const ino_t newinode, const dev_t newdevice,
49439 + struct acl_subject_label *subj)
49440 +{
49441 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
49442 + struct acl_object_label *match;
49443 +
49444 + match = subj->obj_hash[index];
49445 +
49446 + while (match && (match->inode != oldinode ||
49447 + match->device != olddevice ||
49448 + !(match->mode & GR_DELETED)))
49449 + match = match->next;
49450 +
49451 + if (match && (match->inode == oldinode)
49452 + && (match->device == olddevice)
49453 + && (match->mode & GR_DELETED)) {
49454 + if (match->prev == NULL) {
49455 + subj->obj_hash[index] = match->next;
49456 + if (match->next != NULL)
49457 + match->next->prev = NULL;
49458 + } else {
49459 + match->prev->next = match->next;
49460 + if (match->next != NULL)
49461 + match->next->prev = match->prev;
49462 + }
49463 + match->prev = NULL;
49464 + match->next = NULL;
49465 + match->inode = newinode;
49466 + match->device = newdevice;
49467 + match->mode &= ~GR_DELETED;
49468 +
49469 + insert_acl_obj_label(match, subj);
49470 + }
49471 +
49472 + return;
49473 +}
49474 +
49475 +static void
49476 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
49477 + const ino_t newinode, const dev_t newdevice,
49478 + struct acl_role_label *role)
49479 +{
49480 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
49481 + struct acl_subject_label *match;
49482 +
49483 + match = role->subj_hash[index];
49484 +
49485 + while (match && (match->inode != oldinode ||
49486 + match->device != olddevice ||
49487 + !(match->mode & GR_DELETED)))
49488 + match = match->next;
49489 +
49490 + if (match && (match->inode == oldinode)
49491 + && (match->device == olddevice)
49492 + && (match->mode & GR_DELETED)) {
49493 + if (match->prev == NULL) {
49494 + role->subj_hash[index] = match->next;
49495 + if (match->next != NULL)
49496 + match->next->prev = NULL;
49497 + } else {
49498 + match->prev->next = match->next;
49499 + if (match->next != NULL)
49500 + match->next->prev = match->prev;
49501 + }
49502 + match->prev = NULL;
49503 + match->next = NULL;
49504 + match->inode = newinode;
49505 + match->device = newdevice;
49506 + match->mode &= ~GR_DELETED;
49507 +
49508 + insert_acl_subj_label(match, role);
49509 + }
49510 +
49511 + return;
49512 +}
49513 +
49514 +static void
49515 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
49516 + const ino_t newinode, const dev_t newdevice)
49517 +{
49518 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
49519 + struct inodev_entry *match;
49520 +
49521 + match = inodev_set.i_hash[index];
49522 +
49523 + while (match && (match->nentry->inode != oldinode ||
49524 + match->nentry->device != olddevice || !match->nentry->deleted))
49525 + match = match->next;
49526 +
49527 + if (match && (match->nentry->inode == oldinode)
49528 + && (match->nentry->device == olddevice) &&
49529 + match->nentry->deleted) {
49530 + if (match->prev == NULL) {
49531 + inodev_set.i_hash[index] = match->next;
49532 + if (match->next != NULL)
49533 + match->next->prev = NULL;
49534 + } else {
49535 + match->prev->next = match->next;
49536 + if (match->next != NULL)
49537 + match->next->prev = match->prev;
49538 + }
49539 + match->prev = NULL;
49540 + match->next = NULL;
49541 + match->nentry->inode = newinode;
49542 + match->nentry->device = newdevice;
49543 + match->nentry->deleted = 0;
49544 +
49545 + insert_inodev_entry(match);
49546 + }
49547 +
49548 + return;
49549 +}
49550 +
49551 +static void
49552 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
49553 +{
49554 + struct acl_subject_label *subj;
49555 + struct acl_role_label *role;
49556 + unsigned int x;
49557 +
49558 + FOR_EACH_ROLE_START(role)
49559 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
49560 +
49561 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
49562 + if ((subj->inode == ino) && (subj->device == dev)) {
49563 + subj->inode = ino;
49564 + subj->device = dev;
49565 + }
49566 + FOR_EACH_NESTED_SUBJECT_END(subj)
49567 + FOR_EACH_SUBJECT_START(role, subj, x)
49568 + update_acl_obj_label(matchn->inode, matchn->device,
49569 + ino, dev, subj);
49570 + FOR_EACH_SUBJECT_END(subj,x)
49571 + FOR_EACH_ROLE_END(role)
49572 +
49573 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
49574 +
49575 + return;
49576 +}
49577 +
49578 +static void
49579 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
49580 + const struct vfsmount *mnt)
49581 +{
49582 + ino_t ino = dentry->d_inode->i_ino;
49583 + dev_t dev = __get_dev(dentry);
49584 +
49585 + __do_handle_create(matchn, ino, dev);
49586 +
49587 + return;
49588 +}
49589 +
49590 +void
49591 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
49592 +{
49593 + struct name_entry *matchn;
49594 +
49595 + if (unlikely(!(gr_status & GR_READY)))
49596 + return;
49597 +
49598 + preempt_disable();
49599 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
49600 +
49601 + if (unlikely((unsigned long)matchn)) {
49602 + write_lock(&gr_inode_lock);
49603 + do_handle_create(matchn, dentry, mnt);
49604 + write_unlock(&gr_inode_lock);
49605 + }
49606 + preempt_enable();
49607 +
49608 + return;
49609 +}
49610 +
49611 +void
49612 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
49613 +{
49614 + struct name_entry *matchn;
49615 +
49616 + if (unlikely(!(gr_status & GR_READY)))
49617 + return;
49618 +
49619 + preempt_disable();
49620 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
49621 +
49622 + if (unlikely((unsigned long)matchn)) {
49623 + write_lock(&gr_inode_lock);
49624 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
49625 + write_unlock(&gr_inode_lock);
49626 + }
49627 + preempt_enable();
49628 +
49629 + return;
49630 +}
49631 +
49632 +void
49633 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
49634 + struct dentry *old_dentry,
49635 + struct dentry *new_dentry,
49636 + struct vfsmount *mnt, const __u8 replace)
49637 +{
49638 + struct name_entry *matchn;
49639 + struct inodev_entry *inodev;
49640 + ino_t old_ino = old_dentry->d_inode->i_ino;
49641 + dev_t old_dev = __get_dev(old_dentry);
49642 +
49643 + /* vfs_rename swaps the name and parent link for old_dentry and
49644 + new_dentry
49645 + at this point, old_dentry has the new name, parent link, and inode
49646 + for the renamed file
49647 + if a file is being replaced by a rename, new_dentry has the inode
49648 + and name for the replaced file
49649 + */
49650 +
49651 + if (unlikely(!(gr_status & GR_READY)))
49652 + return;
49653 +
49654 + preempt_disable();
49655 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
49656 +
49657 + /* we wouldn't have to check d_inode if it weren't for
49658 + NFS silly-renaming
49659 + */
49660 +
49661 + write_lock(&gr_inode_lock);
49662 + if (unlikely(replace && new_dentry->d_inode)) {
49663 + ino_t new_ino = new_dentry->d_inode->i_ino;
49664 + dev_t new_dev = __get_dev(new_dentry);
49665 +
49666 + inodev = lookup_inodev_entry(new_ino, new_dev);
49667 + if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
49668 + do_handle_delete(inodev, new_ino, new_dev);
49669 + }
49670 +
49671 + inodev = lookup_inodev_entry(old_ino, old_dev);
49672 + if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
49673 + do_handle_delete(inodev, old_ino, old_dev);
49674 +
49675 + if (unlikely((unsigned long)matchn))
49676 + do_handle_create(matchn, old_dentry, mnt);
49677 +
49678 + write_unlock(&gr_inode_lock);
49679 + preempt_enable();
49680 +
49681 + return;
49682 +}
49683 +
49684 +static int
49685 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
49686 + unsigned char **sum)
49687 +{
49688 + struct acl_role_label *r;
49689 + struct role_allowed_ip *ipp;
49690 + struct role_transition *trans;
49691 + unsigned int i;
49692 + int found = 0;
49693 + u32 curr_ip = current->signal->curr_ip;
49694 +
49695 + current->signal->saved_ip = curr_ip;
49696 +
49697 + /* check transition table */
49698 +
49699 + for (trans = current->role->transitions; trans; trans = trans->next) {
49700 + if (!strcmp(rolename, trans->rolename)) {
49701 + found = 1;
49702 + break;
49703 + }
49704 + }
49705 +
49706 + if (!found)
49707 + return 0;
49708 +
49709 + /* handle special roles that do not require authentication
49710 + and check ip */
49711 +
49712 + FOR_EACH_ROLE_START(r)
49713 + if (!strcmp(rolename, r->rolename) &&
49714 + (r->roletype & GR_ROLE_SPECIAL)) {
49715 + found = 0;
49716 + if (r->allowed_ips != NULL) {
49717 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
49718 + if ((ntohl(curr_ip) & ipp->netmask) ==
49719 + (ntohl(ipp->addr) & ipp->netmask))
49720 + found = 1;
49721 + }
49722 + } else
49723 + found = 2;
49724 + if (!found)
49725 + return 0;
49726 +
49727 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
49728 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
49729 + *salt = NULL;
49730 + *sum = NULL;
49731 + return 1;
49732 + }
49733 + }
49734 + FOR_EACH_ROLE_END(r)
49735 +
49736 + for (i = 0; i < num_sprole_pws; i++) {
49737 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
49738 + *salt = acl_special_roles[i]->salt;
49739 + *sum = acl_special_roles[i]->sum;
49740 + return 1;
49741 + }
49742 + }
49743 +
49744 + return 0;
49745 +}
49746 +
49747 +static void
49748 +assign_special_role(char *rolename)
49749 +{
49750 + struct acl_object_label *obj;
49751 + struct acl_role_label *r;
49752 + struct acl_role_label *assigned = NULL;
49753 + struct task_struct *tsk;
49754 + struct file *filp;
49755 +
49756 + FOR_EACH_ROLE_START(r)
49757 + if (!strcmp(rolename, r->rolename) &&
49758 + (r->roletype & GR_ROLE_SPECIAL)) {
49759 + assigned = r;
49760 + break;
49761 + }
49762 + FOR_EACH_ROLE_END(r)
49763 +
49764 + if (!assigned)
49765 + return;
49766 +
49767 + read_lock(&tasklist_lock);
49768 + read_lock(&grsec_exec_file_lock);
49769 +
49770 + tsk = current->real_parent;
49771 + if (tsk == NULL)
49772 + goto out_unlock;
49773 +
49774 + filp = tsk->exec_file;
49775 + if (filp == NULL)
49776 + goto out_unlock;
49777 +
49778 + tsk->is_writable = 0;
49779 +
49780 + tsk->acl_sp_role = 1;
49781 + tsk->acl_role_id = ++acl_sp_role_value;
49782 + tsk->role = assigned;
49783 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
49784 +
49785 + /* ignore additional mmap checks for processes that are writable
49786 + by the default ACL */
49787 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49788 + if (unlikely(obj->mode & GR_WRITE))
49789 + tsk->is_writable = 1;
49790 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
49791 + if (unlikely(obj->mode & GR_WRITE))
49792 + tsk->is_writable = 1;
49793 +
49794 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49795 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
49796 +#endif
49797 +
49798 +out_unlock:
49799 + read_unlock(&grsec_exec_file_lock);
49800 + read_unlock(&tasklist_lock);
49801 + return;
49802 +}
49803 +
49804 +int gr_check_secure_terminal(struct task_struct *task)
49805 +{
49806 + struct task_struct *p, *p2, *p3;
49807 + struct files_struct *files;
49808 + struct fdtable *fdt;
49809 + struct file *our_file = NULL, *file;
49810 + int i;
49811 +
49812 + if (task->signal->tty == NULL)
49813 + return 1;
49814 +
49815 + files = get_files_struct(task);
49816 + if (files != NULL) {
49817 + rcu_read_lock();
49818 + fdt = files_fdtable(files);
49819 + for (i=0; i < fdt->max_fds; i++) {
49820 + file = fcheck_files(files, i);
49821 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
49822 + get_file(file);
49823 + our_file = file;
49824 + }
49825 + }
49826 + rcu_read_unlock();
49827 + put_files_struct(files);
49828 + }
49829 +
49830 + if (our_file == NULL)
49831 + return 1;
49832 +
49833 + read_lock(&tasklist_lock);
49834 + do_each_thread(p2, p) {
49835 + files = get_files_struct(p);
49836 + if (files == NULL ||
49837 + (p->signal && p->signal->tty == task->signal->tty)) {
49838 + if (files != NULL)
49839 + put_files_struct(files);
49840 + continue;
49841 + }
49842 + rcu_read_lock();
49843 + fdt = files_fdtable(files);
49844 + for (i=0; i < fdt->max_fds; i++) {
49845 + file = fcheck_files(files, i);
49846 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
49847 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
49848 + p3 = task;
49849 + while (p3->pid > 0) {
49850 + if (p3 == p)
49851 + break;
49852 + p3 = p3->real_parent;
49853 + }
49854 + if (p3 == p)
49855 + break;
49856 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
49857 + gr_handle_alertkill(p);
49858 + rcu_read_unlock();
49859 + put_files_struct(files);
49860 + read_unlock(&tasklist_lock);
49861 + fput(our_file);
49862 + return 0;
49863 + }
49864 + }
49865 + rcu_read_unlock();
49866 + put_files_struct(files);
49867 + } while_each_thread(p2, p);
49868 + read_unlock(&tasklist_lock);
49869 +
49870 + fput(our_file);
49871 + return 1;
49872 +}
49873 +
49874 +ssize_t
49875 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
49876 +{
49877 + struct gr_arg_wrapper uwrap;
49878 + unsigned char *sprole_salt = NULL;
49879 + unsigned char *sprole_sum = NULL;
49880 + int error = sizeof (struct gr_arg_wrapper);
49881 + int error2 = 0;
49882 +
49883 + mutex_lock(&gr_dev_mutex);
49884 +
49885 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
49886 + error = -EPERM;
49887 + goto out;
49888 + }
49889 +
49890 + if (count != sizeof (struct gr_arg_wrapper)) {
49891 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
49892 + error = -EINVAL;
49893 + goto out;
49894 + }
49895 +
49896 +
49897 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
49898 + gr_auth_expires = 0;
49899 + gr_auth_attempts = 0;
49900 + }
49901 +
49902 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
49903 + error = -EFAULT;
49904 + goto out;
49905 + }
49906 +
49907 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
49908 + error = -EINVAL;
49909 + goto out;
49910 + }
49911 +
49912 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
49913 + error = -EFAULT;
49914 + goto out;
49915 + }
49916 +
49917 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
49918 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
49919 + time_after(gr_auth_expires, get_seconds())) {
49920 + error = -EBUSY;
49921 + goto out;
49922 + }
49923 +
49924 + /* if non-root trying to do anything other than use a special role,
49925 + do not attempt authentication, do not count towards authentication
49926 + locking
49927 + */
49928 +
49929 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
49930 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
49931 + current_uid()) {
49932 + error = -EPERM;
49933 + goto out;
49934 + }
49935 +
49936 + /* ensure pw and special role name are null terminated */
49937 +
49938 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
49939 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
49940 +
49941 + /* Okay.
49942 + * We have our enough of the argument structure..(we have yet
49943 + * to copy_from_user the tables themselves) . Copy the tables
49944 + * only if we need them, i.e. for loading operations. */
49945 +
49946 + switch (gr_usermode->mode) {
49947 + case GR_STATUS:
49948 + if (gr_status & GR_READY) {
49949 + error = 1;
49950 + if (!gr_check_secure_terminal(current))
49951 + error = 3;
49952 + } else
49953 + error = 2;
49954 + goto out;
49955 + case GR_SHUTDOWN:
49956 + if ((gr_status & GR_READY)
49957 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
49958 + pax_open_kernel();
49959 + gr_status &= ~GR_READY;
49960 + pax_close_kernel();
49961 +
49962 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
49963 + free_variables();
49964 + memset(gr_usermode, 0, sizeof (struct gr_arg));
49965 + memset(gr_system_salt, 0, GR_SALT_LEN);
49966 + memset(gr_system_sum, 0, GR_SHA_LEN);
49967 + } else if (gr_status & GR_READY) {
49968 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
49969 + error = -EPERM;
49970 + } else {
49971 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
49972 + error = -EAGAIN;
49973 + }
49974 + break;
49975 + case GR_ENABLE:
49976 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
49977 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
49978 + else {
49979 + if (gr_status & GR_READY)
49980 + error = -EAGAIN;
49981 + else
49982 + error = error2;
49983 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
49984 + }
49985 + break;
49986 + case GR_RELOAD:
49987 + if (!(gr_status & GR_READY)) {
49988 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
49989 + error = -EAGAIN;
49990 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
49991 + preempt_disable();
49992 +
49993 + pax_open_kernel();
49994 + gr_status &= ~GR_READY;
49995 + pax_close_kernel();
49996 +
49997 + free_variables();
49998 + if (!(error2 = gracl_init(gr_usermode))) {
49999 + preempt_enable();
50000 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
50001 + } else {
50002 + preempt_enable();
50003 + error = error2;
50004 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
50005 + }
50006 + } else {
50007 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
50008 + error = -EPERM;
50009 + }
50010 + break;
50011 + case GR_SEGVMOD:
50012 + if (unlikely(!(gr_status & GR_READY))) {
50013 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
50014 + error = -EAGAIN;
50015 + break;
50016 + }
50017 +
50018 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50019 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
50020 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
50021 + struct acl_subject_label *segvacl;
50022 + segvacl =
50023 + lookup_acl_subj_label(gr_usermode->segv_inode,
50024 + gr_usermode->segv_device,
50025 + current->role);
50026 + if (segvacl) {
50027 + segvacl->crashes = 0;
50028 + segvacl->expires = 0;
50029 + }
50030 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
50031 + gr_remove_uid(gr_usermode->segv_uid);
50032 + }
50033 + } else {
50034 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
50035 + error = -EPERM;
50036 + }
50037 + break;
50038 + case GR_SPROLE:
50039 + case GR_SPROLEPAM:
50040 + if (unlikely(!(gr_status & GR_READY))) {
50041 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
50042 + error = -EAGAIN;
50043 + break;
50044 + }
50045 +
50046 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
50047 + current->role->expires = 0;
50048 + current->role->auth_attempts = 0;
50049 + }
50050 +
50051 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
50052 + time_after(current->role->expires, get_seconds())) {
50053 + error = -EBUSY;
50054 + goto out;
50055 + }
50056 +
50057 + if (lookup_special_role_auth
50058 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
50059 + && ((!sprole_salt && !sprole_sum)
50060 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
50061 + char *p = "";
50062 + assign_special_role(gr_usermode->sp_role);
50063 + read_lock(&tasklist_lock);
50064 + if (current->real_parent)
50065 + p = current->real_parent->role->rolename;
50066 + read_unlock(&tasklist_lock);
50067 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
50068 + p, acl_sp_role_value);
50069 + } else {
50070 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
50071 + error = -EPERM;
50072 + if(!(current->role->auth_attempts++))
50073 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
50074 +
50075 + goto out;
50076 + }
50077 + break;
50078 + case GR_UNSPROLE:
50079 + if (unlikely(!(gr_status & GR_READY))) {
50080 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
50081 + error = -EAGAIN;
50082 + break;
50083 + }
50084 +
50085 + if (current->role->roletype & GR_ROLE_SPECIAL) {
50086 + char *p = "";
50087 + int i = 0;
50088 +
50089 + read_lock(&tasklist_lock);
50090 + if (current->real_parent) {
50091 + p = current->real_parent->role->rolename;
50092 + i = current->real_parent->acl_role_id;
50093 + }
50094 + read_unlock(&tasklist_lock);
50095 +
50096 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
50097 + gr_set_acls(1);
50098 + } else {
50099 + error = -EPERM;
50100 + goto out;
50101 + }
50102 + break;
50103 + default:
50104 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
50105 + error = -EINVAL;
50106 + break;
50107 + }
50108 +
50109 + if (error != -EPERM)
50110 + goto out;
50111 +
50112 + if(!(gr_auth_attempts++))
50113 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
50114 +
50115 + out:
50116 + mutex_unlock(&gr_dev_mutex);
50117 + return error;
50118 +}
50119 +
50120 +/* must be called with
50121 + rcu_read_lock();
50122 + read_lock(&tasklist_lock);
50123 + read_lock(&grsec_exec_file_lock);
50124 +*/
50125 +int gr_apply_subject_to_task(struct task_struct *task)
50126 +{
50127 + struct acl_object_label *obj;
50128 + char *tmpname;
50129 + struct acl_subject_label *tmpsubj;
50130 + struct file *filp;
50131 + struct name_entry *nmatch;
50132 +
50133 + filp = task->exec_file;
50134 + if (filp == NULL)
50135 + return 0;
50136 +
50137 + /* the following is to apply the correct subject
50138 + on binaries running when the RBAC system
50139 + is enabled, when the binaries have been
50140 + replaced or deleted since their execution
50141 + -----
50142 + when the RBAC system starts, the inode/dev
50143 + from exec_file will be one the RBAC system
50144 + is unaware of. It only knows the inode/dev
50145 + of the present file on disk, or the absence
50146 + of it.
50147 + */
50148 + preempt_disable();
50149 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
50150 +
50151 + nmatch = lookup_name_entry(tmpname);
50152 + preempt_enable();
50153 + tmpsubj = NULL;
50154 + if (nmatch) {
50155 + if (nmatch->deleted)
50156 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
50157 + else
50158 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
50159 + if (tmpsubj != NULL)
50160 + task->acl = tmpsubj;
50161 + }
50162 + if (tmpsubj == NULL)
50163 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
50164 + task->role);
50165 + if (task->acl) {
50166 + task->is_writable = 0;
50167 + /* ignore additional mmap checks for processes that are writable
50168 + by the default ACL */
50169 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50170 + if (unlikely(obj->mode & GR_WRITE))
50171 + task->is_writable = 1;
50172 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
50173 + if (unlikely(obj->mode & GR_WRITE))
50174 + task->is_writable = 1;
50175 +
50176 + gr_set_proc_res(task);
50177 +
50178 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50179 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50180 +#endif
50181 + } else {
50182 + return 1;
50183 + }
50184 +
50185 + return 0;
50186 +}
50187 +
50188 +int
50189 +gr_set_acls(const int type)
50190 +{
50191 + struct task_struct *task, *task2;
50192 + struct acl_role_label *role = current->role;
50193 + __u16 acl_role_id = current->acl_role_id;
50194 + const struct cred *cred;
50195 + int ret;
50196 +
50197 + rcu_read_lock();
50198 + read_lock(&tasklist_lock);
50199 + read_lock(&grsec_exec_file_lock);
50200 + do_each_thread(task2, task) {
50201 + /* check to see if we're called from the exit handler,
50202 + if so, only replace ACLs that have inherited the admin
50203 + ACL */
50204 +
50205 + if (type && (task->role != role ||
50206 + task->acl_role_id != acl_role_id))
50207 + continue;
50208 +
50209 + task->acl_role_id = 0;
50210 + task->acl_sp_role = 0;
50211 +
50212 + if (task->exec_file) {
50213 + cred = __task_cred(task);
50214 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
50215 + ret = gr_apply_subject_to_task(task);
50216 + if (ret) {
50217 + read_unlock(&grsec_exec_file_lock);
50218 + read_unlock(&tasklist_lock);
50219 + rcu_read_unlock();
50220 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
50221 + return ret;
50222 + }
50223 + } else {
50224 + // it's a kernel process
50225 + task->role = kernel_role;
50226 + task->acl = kernel_role->root_label;
50227 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
50228 + task->acl->mode &= ~GR_PROCFIND;
50229 +#endif
50230 + }
50231 + } while_each_thread(task2, task);
50232 + read_unlock(&grsec_exec_file_lock);
50233 + read_unlock(&tasklist_lock);
50234 + rcu_read_unlock();
50235 +
50236 + return 0;
50237 +}
50238 +
50239 +void
50240 +gr_learn_resource(const struct task_struct *task,
50241 + const int res, const unsigned long wanted, const int gt)
50242 +{
50243 + struct acl_subject_label *acl;
50244 + const struct cred *cred;
50245 +
50246 + if (unlikely((gr_status & GR_READY) &&
50247 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
50248 + goto skip_reslog;
50249 +
50250 +#ifdef CONFIG_GRKERNSEC_RESLOG
50251 + gr_log_resource(task, res, wanted, gt);
50252 +#endif
50253 + skip_reslog:
50254 +
50255 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
50256 + return;
50257 +
50258 + acl = task->acl;
50259 +
50260 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
50261 + !(acl->resmask & (1 << (unsigned short) res))))
50262 + return;
50263 +
50264 + if (wanted >= acl->res[res].rlim_cur) {
50265 + unsigned long res_add;
50266 +
50267 + res_add = wanted;
50268 + switch (res) {
50269 + case RLIMIT_CPU:
50270 + res_add += GR_RLIM_CPU_BUMP;
50271 + break;
50272 + case RLIMIT_FSIZE:
50273 + res_add += GR_RLIM_FSIZE_BUMP;
50274 + break;
50275 + case RLIMIT_DATA:
50276 + res_add += GR_RLIM_DATA_BUMP;
50277 + break;
50278 + case RLIMIT_STACK:
50279 + res_add += GR_RLIM_STACK_BUMP;
50280 + break;
50281 + case RLIMIT_CORE:
50282 + res_add += GR_RLIM_CORE_BUMP;
50283 + break;
50284 + case RLIMIT_RSS:
50285 + res_add += GR_RLIM_RSS_BUMP;
50286 + break;
50287 + case RLIMIT_NPROC:
50288 + res_add += GR_RLIM_NPROC_BUMP;
50289 + break;
50290 + case RLIMIT_NOFILE:
50291 + res_add += GR_RLIM_NOFILE_BUMP;
50292 + break;
50293 + case RLIMIT_MEMLOCK:
50294 + res_add += GR_RLIM_MEMLOCK_BUMP;
50295 + break;
50296 + case RLIMIT_AS:
50297 + res_add += GR_RLIM_AS_BUMP;
50298 + break;
50299 + case RLIMIT_LOCKS:
50300 + res_add += GR_RLIM_LOCKS_BUMP;
50301 + break;
50302 + case RLIMIT_SIGPENDING:
50303 + res_add += GR_RLIM_SIGPENDING_BUMP;
50304 + break;
50305 + case RLIMIT_MSGQUEUE:
50306 + res_add += GR_RLIM_MSGQUEUE_BUMP;
50307 + break;
50308 + case RLIMIT_NICE:
50309 + res_add += GR_RLIM_NICE_BUMP;
50310 + break;
50311 + case RLIMIT_RTPRIO:
50312 + res_add += GR_RLIM_RTPRIO_BUMP;
50313 + break;
50314 + case RLIMIT_RTTIME:
50315 + res_add += GR_RLIM_RTTIME_BUMP;
50316 + break;
50317 + }
50318 +
50319 + acl->res[res].rlim_cur = res_add;
50320 +
50321 + if (wanted > acl->res[res].rlim_max)
50322 + acl->res[res].rlim_max = res_add;
50323 +
50324 + /* only log the subject filename, since resource logging is supported for
50325 + single-subject learning only */
50326 + rcu_read_lock();
50327 + cred = __task_cred(task);
50328 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
50329 + task->role->roletype, cred->uid, cred->gid, acl->filename,
50330 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
50331 + "", (unsigned long) res, &task->signal->saved_ip);
50332 + rcu_read_unlock();
50333 + }
50334 +
50335 + return;
50336 +}
50337 +
50338 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
50339 +void
50340 +pax_set_initial_flags(struct linux_binprm *bprm)
50341 +{
50342 + struct task_struct *task = current;
50343 + struct acl_subject_label *proc;
50344 + unsigned long flags;
50345 +
50346 + if (unlikely(!(gr_status & GR_READY)))
50347 + return;
50348 +
50349 + flags = pax_get_flags(task);
50350 +
50351 + proc = task->acl;
50352 +
50353 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
50354 + flags &= ~MF_PAX_PAGEEXEC;
50355 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
50356 + flags &= ~MF_PAX_SEGMEXEC;
50357 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
50358 + flags &= ~MF_PAX_RANDMMAP;
50359 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
50360 + flags &= ~MF_PAX_EMUTRAMP;
50361 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
50362 + flags &= ~MF_PAX_MPROTECT;
50363 +
50364 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
50365 + flags |= MF_PAX_PAGEEXEC;
50366 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
50367 + flags |= MF_PAX_SEGMEXEC;
50368 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
50369 + flags |= MF_PAX_RANDMMAP;
50370 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
50371 + flags |= MF_PAX_EMUTRAMP;
50372 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
50373 + flags |= MF_PAX_MPROTECT;
50374 +
50375 + pax_set_flags(task, flags);
50376 +
50377 + return;
50378 +}
50379 +#endif
50380 +
50381 +#ifdef CONFIG_SYSCTL
50382 +/* Eric Biederman likes breaking userland ABI and every inode-based security
50383 + system to save 35kb of memory */
50384 +
50385 +/* we modify the passed in filename, but adjust it back before returning */
50386 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
50387 +{
50388 + struct name_entry *nmatch;
50389 + char *p, *lastp = NULL;
50390 + struct acl_object_label *obj = NULL, *tmp;
50391 + struct acl_subject_label *tmpsubj;
50392 + char c = '\0';
50393 +
50394 + read_lock(&gr_inode_lock);
50395 +
50396 + p = name + len - 1;
50397 + do {
50398 + nmatch = lookup_name_entry(name);
50399 + if (lastp != NULL)
50400 + *lastp = c;
50401 +
50402 + if (nmatch == NULL)
50403 + goto next_component;
50404 + tmpsubj = current->acl;
50405 + do {
50406 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
50407 + if (obj != NULL) {
50408 + tmp = obj->globbed;
50409 + while (tmp) {
50410 + if (!glob_match(tmp->filename, name)) {
50411 + obj = tmp;
50412 + goto found_obj;
50413 + }
50414 + tmp = tmp->next;
50415 + }
50416 + goto found_obj;
50417 + }
50418 + } while ((tmpsubj = tmpsubj->parent_subject));
50419 +next_component:
50420 + /* end case */
50421 + if (p == name)
50422 + break;
50423 +
50424 + while (*p != '/')
50425 + p--;
50426 + if (p == name)
50427 + lastp = p + 1;
50428 + else {
50429 + lastp = p;
50430 + p--;
50431 + }
50432 + c = *lastp;
50433 + *lastp = '\0';
50434 + } while (1);
50435 +found_obj:
50436 + read_unlock(&gr_inode_lock);
50437 + /* obj returned will always be non-null */
50438 + return obj;
50439 +}
50440 +
50441 +/* returns 0 when allowing, non-zero on error
50442 + op of 0 is used for readdir, so we don't log the names of hidden files
50443 +*/
50444 +__u32
50445 +gr_handle_sysctl(const struct ctl_table *table, const int op)
50446 +{
50447 + struct ctl_table *tmp;
50448 + const char *proc_sys = "/proc/sys";
50449 + char *path;
50450 + struct acl_object_label *obj;
50451 + unsigned short len = 0, pos = 0, depth = 0, i;
50452 + __u32 err = 0;
50453 + __u32 mode = 0;
50454 +
50455 + if (unlikely(!(gr_status & GR_READY)))
50456 + return 0;
50457 +
50458 + /* for now, ignore operations on non-sysctl entries if it's not a
50459 + readdir*/
50460 + if (table->child != NULL && op != 0)
50461 + return 0;
50462 +
50463 + mode |= GR_FIND;
50464 + /* it's only a read if it's an entry, read on dirs is for readdir */
50465 + if (op & MAY_READ)
50466 + mode |= GR_READ;
50467 + if (op & MAY_WRITE)
50468 + mode |= GR_WRITE;
50469 +
50470 + preempt_disable();
50471 +
50472 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50473 +
50474 + /* it's only a read/write if it's an actual entry, not a dir
50475 + (which are opened for readdir)
50476 + */
50477 +
50478 + /* convert the requested sysctl entry into a pathname */
50479 +
50480 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50481 + len += strlen(tmp->procname);
50482 + len++;
50483 + depth++;
50484 + }
50485 +
50486 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
50487 + /* deny */
50488 + goto out;
50489 + }
50490 +
50491 + memset(path, 0, PAGE_SIZE);
50492 +
50493 + memcpy(path, proc_sys, strlen(proc_sys));
50494 +
50495 + pos += strlen(proc_sys);
50496 +
50497 + for (; depth > 0; depth--) {
50498 + path[pos] = '/';
50499 + pos++;
50500 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50501 + if (depth == i) {
50502 + memcpy(path + pos, tmp->procname,
50503 + strlen(tmp->procname));
50504 + pos += strlen(tmp->procname);
50505 + }
50506 + i++;
50507 + }
50508 + }
50509 +
50510 + obj = gr_lookup_by_name(path, pos);
50511 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
50512 +
50513 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
50514 + ((err & mode) != mode))) {
50515 + __u32 new_mode = mode;
50516 +
50517 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50518 +
50519 + err = 0;
50520 + gr_log_learn_sysctl(path, new_mode);
50521 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
50522 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
50523 + err = -ENOENT;
50524 + } else if (!(err & GR_FIND)) {
50525 + err = -ENOENT;
50526 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
50527 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
50528 + path, (mode & GR_READ) ? " reading" : "",
50529 + (mode & GR_WRITE) ? " writing" : "");
50530 + err = -EACCES;
50531 + } else if ((err & mode) != mode) {
50532 + err = -EACCES;
50533 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
50534 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
50535 + path, (mode & GR_READ) ? " reading" : "",
50536 + (mode & GR_WRITE) ? " writing" : "");
50537 + err = 0;
50538 + } else
50539 + err = 0;
50540 +
50541 + out:
50542 + preempt_enable();
50543 +
50544 + return err;
50545 +}
50546 +#endif
50547 +
50548 +int
50549 +gr_handle_proc_ptrace(struct task_struct *task)
50550 +{
50551 + struct file *filp;
50552 + struct task_struct *tmp = task;
50553 + struct task_struct *curtemp = current;
50554 + __u32 retmode;
50555 +
50556 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
50557 + if (unlikely(!(gr_status & GR_READY)))
50558 + return 0;
50559 +#endif
50560 +
50561 + read_lock(&tasklist_lock);
50562 + read_lock(&grsec_exec_file_lock);
50563 + filp = task->exec_file;
50564 +
50565 + while (tmp->pid > 0) {
50566 + if (tmp == curtemp)
50567 + break;
50568 + tmp = tmp->real_parent;
50569 + }
50570 +
50571 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
50572 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
50573 + read_unlock(&grsec_exec_file_lock);
50574 + read_unlock(&tasklist_lock);
50575 + return 1;
50576 + }
50577 +
50578 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50579 + if (!(gr_status & GR_READY)) {
50580 + read_unlock(&grsec_exec_file_lock);
50581 + read_unlock(&tasklist_lock);
50582 + return 0;
50583 + }
50584 +#endif
50585 +
50586 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
50587 + read_unlock(&grsec_exec_file_lock);
50588 + read_unlock(&tasklist_lock);
50589 +
50590 + if (retmode & GR_NOPTRACE)
50591 + return 1;
50592 +
50593 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
50594 + && (current->acl != task->acl || (current->acl != current->role->root_label
50595 + && current->pid != task->pid)))
50596 + return 1;
50597 +
50598 + return 0;
50599 +}
50600 +
50601 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
50602 +{
50603 + if (unlikely(!(gr_status & GR_READY)))
50604 + return;
50605 +
50606 + if (!(current->role->roletype & GR_ROLE_GOD))
50607 + return;
50608 +
50609 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
50610 + p->role->rolename, gr_task_roletype_to_char(p),
50611 + p->acl->filename);
50612 +}
50613 +
50614 +int
50615 +gr_handle_ptrace(struct task_struct *task, const long request)
50616 +{
50617 + struct task_struct *tmp = task;
50618 + struct task_struct *curtemp = current;
50619 + __u32 retmode;
50620 +
50621 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
50622 + if (unlikely(!(gr_status & GR_READY)))
50623 + return 0;
50624 +#endif
50625 +
50626 + read_lock(&tasklist_lock);
50627 + while (tmp->pid > 0) {
50628 + if (tmp == curtemp)
50629 + break;
50630 + tmp = tmp->real_parent;
50631 + }
50632 +
50633 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
50634 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
50635 + read_unlock(&tasklist_lock);
50636 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50637 + return 1;
50638 + }
50639 + read_unlock(&tasklist_lock);
50640 +
50641 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50642 + if (!(gr_status & GR_READY))
50643 + return 0;
50644 +#endif
50645 +
50646 + read_lock(&grsec_exec_file_lock);
50647 + if (unlikely(!task->exec_file)) {
50648 + read_unlock(&grsec_exec_file_lock);
50649 + return 0;
50650 + }
50651 +
50652 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
50653 + read_unlock(&grsec_exec_file_lock);
50654 +
50655 + if (retmode & GR_NOPTRACE) {
50656 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50657 + return 1;
50658 + }
50659 +
50660 + if (retmode & GR_PTRACERD) {
50661 + switch (request) {
50662 + case PTRACE_POKETEXT:
50663 + case PTRACE_POKEDATA:
50664 + case PTRACE_POKEUSR:
50665 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
50666 + case PTRACE_SETREGS:
50667 + case PTRACE_SETFPREGS:
50668 +#endif
50669 +#ifdef CONFIG_X86
50670 + case PTRACE_SETFPXREGS:
50671 +#endif
50672 +#ifdef CONFIG_ALTIVEC
50673 + case PTRACE_SETVRREGS:
50674 +#endif
50675 + return 1;
50676 + default:
50677 + return 0;
50678 + }
50679 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
50680 + !(current->role->roletype & GR_ROLE_GOD) &&
50681 + (current->acl != task->acl)) {
50682 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50683 + return 1;
50684 + }
50685 +
50686 + return 0;
50687 +}
50688 +
50689 +static int is_writable_mmap(const struct file *filp)
50690 +{
50691 + struct task_struct *task = current;
50692 + struct acl_object_label *obj, *obj2;
50693 +
50694 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
50695 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
50696 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50697 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
50698 + task->role->root_label);
50699 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
50700 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
50701 + return 1;
50702 + }
50703 + }
50704 + return 0;
50705 +}
50706 +
50707 +int
50708 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
50709 +{
50710 + __u32 mode;
50711 +
50712 + if (unlikely(!file || !(prot & PROT_EXEC)))
50713 + return 1;
50714 +
50715 + if (is_writable_mmap(file))
50716 + return 0;
50717 +
50718 + mode =
50719 + gr_search_file(file->f_path.dentry,
50720 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
50721 + file->f_path.mnt);
50722 +
50723 + if (!gr_tpe_allow(file))
50724 + return 0;
50725 +
50726 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
50727 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50728 + return 0;
50729 + } else if (unlikely(!(mode & GR_EXEC))) {
50730 + return 0;
50731 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
50732 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50733 + return 1;
50734 + }
50735 +
50736 + return 1;
50737 +}
50738 +
50739 +int
50740 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
50741 +{
50742 + __u32 mode;
50743 +
50744 + if (unlikely(!file || !(prot & PROT_EXEC)))
50745 + return 1;
50746 +
50747 + if (is_writable_mmap(file))
50748 + return 0;
50749 +
50750 + mode =
50751 + gr_search_file(file->f_path.dentry,
50752 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
50753 + file->f_path.mnt);
50754 +
50755 + if (!gr_tpe_allow(file))
50756 + return 0;
50757 +
50758 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
50759 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50760 + return 0;
50761 + } else if (unlikely(!(mode & GR_EXEC))) {
50762 + return 0;
50763 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
50764 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50765 + return 1;
50766 + }
50767 +
50768 + return 1;
50769 +}
50770 +
50771 +void
50772 +gr_acl_handle_psacct(struct task_struct *task, const long code)
50773 +{
50774 + unsigned long runtime;
50775 + unsigned long cputime;
50776 + unsigned int wday, cday;
50777 + __u8 whr, chr;
50778 + __u8 wmin, cmin;
50779 + __u8 wsec, csec;
50780 + struct timespec timeval;
50781 +
50782 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
50783 + !(task->acl->mode & GR_PROCACCT)))
50784 + return;
50785 +
50786 + do_posix_clock_monotonic_gettime(&timeval);
50787 + runtime = timeval.tv_sec - task->start_time.tv_sec;
50788 + wday = runtime / (3600 * 24);
50789 + runtime -= wday * (3600 * 24);
50790 + whr = runtime / 3600;
50791 + runtime -= whr * 3600;
50792 + wmin = runtime / 60;
50793 + runtime -= wmin * 60;
50794 + wsec = runtime;
50795 +
50796 + cputime = (task->utime + task->stime) / HZ;
50797 + cday = cputime / (3600 * 24);
50798 + cputime -= cday * (3600 * 24);
50799 + chr = cputime / 3600;
50800 + cputime -= chr * 3600;
50801 + cmin = cputime / 60;
50802 + cputime -= cmin * 60;
50803 + csec = cputime;
50804 +
50805 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
50806 +
50807 + return;
50808 +}
50809 +
50810 +void gr_set_kernel_label(struct task_struct *task)
50811 +{
50812 + if (gr_status & GR_READY) {
50813 + task->role = kernel_role;
50814 + task->acl = kernel_role->root_label;
50815 + }
50816 + return;
50817 +}
50818 +
50819 +#ifdef CONFIG_TASKSTATS
50820 +int gr_is_taskstats_denied(int pid)
50821 +{
50822 + struct task_struct *task;
50823 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50824 + const struct cred *cred;
50825 +#endif
50826 + int ret = 0;
50827 +
50828 + /* restrict taskstats viewing to un-chrooted root users
50829 + who have the 'view' subject flag if the RBAC system is enabled
50830 + */
50831 +
50832 + rcu_read_lock();
50833 + read_lock(&tasklist_lock);
50834 + task = find_task_by_vpid(pid);
50835 + if (task) {
50836 +#ifdef CONFIG_GRKERNSEC_CHROOT
50837 + if (proc_is_chrooted(task))
50838 + ret = -EACCES;
50839 +#endif
50840 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50841 + cred = __task_cred(task);
50842 +#ifdef CONFIG_GRKERNSEC_PROC_USER
50843 + if (cred->uid != 0)
50844 + ret = -EACCES;
50845 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50846 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
50847 + ret = -EACCES;
50848 +#endif
50849 +#endif
50850 + if (gr_status & GR_READY) {
50851 + if (!(task->acl->mode & GR_VIEW))
50852 + ret = -EACCES;
50853 + }
50854 + } else
50855 + ret = -ENOENT;
50856 +
50857 + read_unlock(&tasklist_lock);
50858 + rcu_read_unlock();
50859 +
50860 + return ret;
50861 +}
50862 +#endif
50863 +
50864 +/* AUXV entries are filled via a descendant of search_binary_handler
50865 + after we've already applied the subject for the target
50866 +*/
50867 +int gr_acl_enable_at_secure(void)
50868 +{
50869 + if (unlikely(!(gr_status & GR_READY)))
50870 + return 0;
50871 +
50872 + if (current->acl->mode & GR_ATSECURE)
50873 + return 1;
50874 +
50875 + return 0;
50876 +}
50877 +
50878 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
50879 +{
50880 + struct task_struct *task = current;
50881 + struct dentry *dentry = file->f_path.dentry;
50882 + struct vfsmount *mnt = file->f_path.mnt;
50883 + struct acl_object_label *obj, *tmp;
50884 + struct acl_subject_label *subj;
50885 + unsigned int bufsize;
50886 + int is_not_root;
50887 + char *path;
50888 + dev_t dev = __get_dev(dentry);
50889 +
50890 + if (unlikely(!(gr_status & GR_READY)))
50891 + return 1;
50892 +
50893 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50894 + return 1;
50895 +
50896 + /* ignore Eric Biederman */
50897 + if (IS_PRIVATE(dentry->d_inode))
50898 + return 1;
50899 +
50900 + subj = task->acl;
50901 + do {
50902 + obj = lookup_acl_obj_label(ino, dev, subj);
50903 + if (obj != NULL)
50904 + return (obj->mode & GR_FIND) ? 1 : 0;
50905 + } while ((subj = subj->parent_subject));
50906 +
50907 + /* this is purely an optimization since we're looking for an object
50908 + for the directory we're doing a readdir on
50909 + if it's possible for any globbed object to match the entry we're
50910 + filling into the directory, then the object we find here will be
50911 + an anchor point with attached globbed objects
50912 + */
50913 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
50914 + if (obj->globbed == NULL)
50915 + return (obj->mode & GR_FIND) ? 1 : 0;
50916 +
50917 + is_not_root = ((obj->filename[0] == '/') &&
50918 + (obj->filename[1] == '\0')) ? 0 : 1;
50919 + bufsize = PAGE_SIZE - namelen - is_not_root;
50920 +
50921 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
50922 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
50923 + return 1;
50924 +
50925 + preempt_disable();
50926 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
50927 + bufsize);
50928 +
50929 + bufsize = strlen(path);
50930 +
50931 + /* if base is "/", don't append an additional slash */
50932 + if (is_not_root)
50933 + *(path + bufsize) = '/';
50934 + memcpy(path + bufsize + is_not_root, name, namelen);
50935 + *(path + bufsize + namelen + is_not_root) = '\0';
50936 +
50937 + tmp = obj->globbed;
50938 + while (tmp) {
50939 + if (!glob_match(tmp->filename, path)) {
50940 + preempt_enable();
50941 + return (tmp->mode & GR_FIND) ? 1 : 0;
50942 + }
50943 + tmp = tmp->next;
50944 + }
50945 + preempt_enable();
50946 + return (obj->mode & GR_FIND) ? 1 : 0;
50947 +}
50948 +
50949 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
50950 +EXPORT_SYMBOL(gr_acl_is_enabled);
50951 +#endif
50952 +EXPORT_SYMBOL(gr_learn_resource);
50953 +EXPORT_SYMBOL(gr_set_kernel_label);
50954 +#ifdef CONFIG_SECURITY
50955 +EXPORT_SYMBOL(gr_check_user_change);
50956 +EXPORT_SYMBOL(gr_check_group_change);
50957 +#endif
50958 +
50959 diff -urNp linux-3.0.7/grsecurity/gracl_cap.c linux-3.0.7/grsecurity/gracl_cap.c
50960 --- linux-3.0.7/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
50961 +++ linux-3.0.7/grsecurity/gracl_cap.c 2011-09-14 09:21:24.000000000 -0400
50962 @@ -0,0 +1,101 @@
50963 +#include <linux/kernel.h>
50964 +#include <linux/module.h>
50965 +#include <linux/sched.h>
50966 +#include <linux/gracl.h>
50967 +#include <linux/grsecurity.h>
50968 +#include <linux/grinternal.h>
50969 +
50970 +extern const char *captab_log[];
50971 +extern int captab_log_entries;
50972 +
50973 +int
50974 +gr_acl_is_capable(const int cap)
50975 +{
50976 + struct task_struct *task = current;
50977 + const struct cred *cred = current_cred();
50978 + struct acl_subject_label *curracl;
50979 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
50980 + kernel_cap_t cap_audit = __cap_empty_set;
50981 +
50982 + if (!gr_acl_is_enabled())
50983 + return 1;
50984 +
50985 + curracl = task->acl;
50986 +
50987 + cap_drop = curracl->cap_lower;
50988 + cap_mask = curracl->cap_mask;
50989 + cap_audit = curracl->cap_invert_audit;
50990 +
50991 + while ((curracl = curracl->parent_subject)) {
50992 + /* if the cap isn't specified in the current computed mask but is specified in the
50993 + current level subject, and is lowered in the current level subject, then add
50994 + it to the set of dropped capabilities
50995 + otherwise, add the current level subject's mask to the current computed mask
50996 + */
50997 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
50998 + cap_raise(cap_mask, cap);
50999 + if (cap_raised(curracl->cap_lower, cap))
51000 + cap_raise(cap_drop, cap);
51001 + if (cap_raised(curracl->cap_invert_audit, cap))
51002 + cap_raise(cap_audit, cap);
51003 + }
51004 + }
51005 +
51006 + if (!cap_raised(cap_drop, cap)) {
51007 + if (cap_raised(cap_audit, cap))
51008 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
51009 + return 1;
51010 + }
51011 +
51012 + curracl = task->acl;
51013 +
51014 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
51015 + && cap_raised(cred->cap_effective, cap)) {
51016 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
51017 + task->role->roletype, cred->uid,
51018 + cred->gid, task->exec_file ?
51019 + gr_to_filename(task->exec_file->f_path.dentry,
51020 + task->exec_file->f_path.mnt) : curracl->filename,
51021 + curracl->filename, 0UL,
51022 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
51023 + return 1;
51024 + }
51025 +
51026 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
51027 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
51028 + return 0;
51029 +}
51030 +
51031 +int
51032 +gr_acl_is_capable_nolog(const int cap)
51033 +{
51034 + struct acl_subject_label *curracl;
51035 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
51036 +
51037 + if (!gr_acl_is_enabled())
51038 + return 1;
51039 +
51040 + curracl = current->acl;
51041 +
51042 + cap_drop = curracl->cap_lower;
51043 + cap_mask = curracl->cap_mask;
51044 +
51045 + while ((curracl = curracl->parent_subject)) {
51046 + /* if the cap isn't specified in the current computed mask but is specified in the
51047 + current level subject, and is lowered in the current level subject, then add
51048 + it to the set of dropped capabilities
51049 + otherwise, add the current level subject's mask to the current computed mask
51050 + */
51051 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
51052 + cap_raise(cap_mask, cap);
51053 + if (cap_raised(curracl->cap_lower, cap))
51054 + cap_raise(cap_drop, cap);
51055 + }
51056 + }
51057 +
51058 + if (!cap_raised(cap_drop, cap))
51059 + return 1;
51060 +
51061 + return 0;
51062 +}
51063 +
51064 diff -urNp linux-3.0.7/grsecurity/gracl_fs.c linux-3.0.7/grsecurity/gracl_fs.c
51065 --- linux-3.0.7/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
51066 +++ linux-3.0.7/grsecurity/gracl_fs.c 2011-10-17 01:22:26.000000000 -0400
51067 @@ -0,0 +1,431 @@
51068 +#include <linux/kernel.h>
51069 +#include <linux/sched.h>
51070 +#include <linux/types.h>
51071 +#include <linux/fs.h>
51072 +#include <linux/file.h>
51073 +#include <linux/stat.h>
51074 +#include <linux/grsecurity.h>
51075 +#include <linux/grinternal.h>
51076 +#include <linux/gracl.h>
51077 +
51078 +__u32
51079 +gr_acl_handle_hidden_file(const struct dentry * dentry,
51080 + const struct vfsmount * mnt)
51081 +{
51082 + __u32 mode;
51083 +
51084 + if (unlikely(!dentry->d_inode))
51085 + return GR_FIND;
51086 +
51087 + mode =
51088 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
51089 +
51090 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
51091 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
51092 + return mode;
51093 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
51094 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
51095 + return 0;
51096 + } else if (unlikely(!(mode & GR_FIND)))
51097 + return 0;
51098 +
51099 + return GR_FIND;
51100 +}
51101 +
51102 +__u32
51103 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
51104 + const int fmode)
51105 +{
51106 + __u32 reqmode = GR_FIND;
51107 + __u32 mode;
51108 +
51109 + if (unlikely(!dentry->d_inode))
51110 + return reqmode;
51111 +
51112 + if (unlikely(fmode & O_APPEND))
51113 + reqmode |= GR_APPEND;
51114 + else if (unlikely(fmode & FMODE_WRITE))
51115 + reqmode |= GR_WRITE;
51116 + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
51117 + reqmode |= GR_READ;
51118 + if ((fmode & FMODE_GREXEC) && (fmode & __FMODE_EXEC))
51119 + reqmode &= ~GR_READ;
51120 + mode =
51121 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51122 + mnt);
51123 +
51124 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51125 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
51126 + reqmode & GR_READ ? " reading" : "",
51127 + reqmode & GR_WRITE ? " writing" : reqmode &
51128 + GR_APPEND ? " appending" : "");
51129 + return reqmode;
51130 + } else
51131 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51132 + {
51133 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
51134 + reqmode & GR_READ ? " reading" : "",
51135 + reqmode & GR_WRITE ? " writing" : reqmode &
51136 + GR_APPEND ? " appending" : "");
51137 + return 0;
51138 + } else if (unlikely((mode & reqmode) != reqmode))
51139 + return 0;
51140 +
51141 + return reqmode;
51142 +}
51143 +
51144 +__u32
51145 +gr_acl_handle_creat(const struct dentry * dentry,
51146 + const struct dentry * p_dentry,
51147 + const struct vfsmount * p_mnt, const int fmode,
51148 + const int imode)
51149 +{
51150 + __u32 reqmode = GR_WRITE | GR_CREATE;
51151 + __u32 mode;
51152 +
51153 + if (unlikely(fmode & O_APPEND))
51154 + reqmode |= GR_APPEND;
51155 + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
51156 + reqmode |= GR_READ;
51157 + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
51158 + reqmode |= GR_SETID;
51159 +
51160 + mode =
51161 + gr_check_create(dentry, p_dentry, p_mnt,
51162 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51163 +
51164 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51165 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51166 + reqmode & GR_READ ? " reading" : "",
51167 + reqmode & GR_WRITE ? " writing" : reqmode &
51168 + GR_APPEND ? " appending" : "");
51169 + return reqmode;
51170 + } else
51171 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51172 + {
51173 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51174 + reqmode & GR_READ ? " reading" : "",
51175 + reqmode & GR_WRITE ? " writing" : reqmode &
51176 + GR_APPEND ? " appending" : "");
51177 + return 0;
51178 + } else if (unlikely((mode & reqmode) != reqmode))
51179 + return 0;
51180 +
51181 + return reqmode;
51182 +}
51183 +
51184 +__u32
51185 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
51186 + const int fmode)
51187 +{
51188 + __u32 mode, reqmode = GR_FIND;
51189 +
51190 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
51191 + reqmode |= GR_EXEC;
51192 + if (fmode & S_IWOTH)
51193 + reqmode |= GR_WRITE;
51194 + if (fmode & S_IROTH)
51195 + reqmode |= GR_READ;
51196 +
51197 + mode =
51198 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51199 + mnt);
51200 +
51201 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51202 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51203 + reqmode & GR_READ ? " reading" : "",
51204 + reqmode & GR_WRITE ? " writing" : "",
51205 + reqmode & GR_EXEC ? " executing" : "");
51206 + return reqmode;
51207 + } else
51208 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51209 + {
51210 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51211 + reqmode & GR_READ ? " reading" : "",
51212 + reqmode & GR_WRITE ? " writing" : "",
51213 + reqmode & GR_EXEC ? " executing" : "");
51214 + return 0;
51215 + } else if (unlikely((mode & reqmode) != reqmode))
51216 + return 0;
51217 +
51218 + return reqmode;
51219 +}
51220 +
51221 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
51222 +{
51223 + __u32 mode;
51224 +
51225 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
51226 +
51227 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51228 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
51229 + return mode;
51230 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51231 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
51232 + return 0;
51233 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
51234 + return 0;
51235 +
51236 + return (reqmode);
51237 +}
51238 +
51239 +__u32
51240 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
51241 +{
51242 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
51243 +}
51244 +
51245 +__u32
51246 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
51247 +{
51248 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
51249 +}
51250 +
51251 +__u32
51252 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
51253 +{
51254 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
51255 +}
51256 +
51257 +__u32
51258 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
51259 +{
51260 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
51261 +}
51262 +
51263 +__u32
51264 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
51265 + mode_t mode)
51266 +{
51267 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
51268 + return 1;
51269 +
51270 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51271 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51272 + GR_FCHMOD_ACL_MSG);
51273 + } else {
51274 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
51275 + }
51276 +}
51277 +
51278 +__u32
51279 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
51280 + mode_t mode)
51281 +{
51282 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51283 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51284 + GR_CHMOD_ACL_MSG);
51285 + } else {
51286 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
51287 + }
51288 +}
51289 +
51290 +__u32
51291 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
51292 +{
51293 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
51294 +}
51295 +
51296 +__u32
51297 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
51298 +{
51299 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
51300 +}
51301 +
51302 +__u32
51303 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
51304 +{
51305 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
51306 +}
51307 +
51308 +__u32
51309 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
51310 +{
51311 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
51312 + GR_UNIXCONNECT_ACL_MSG);
51313 +}
51314 +
51315 +/* hardlinks require at minimum create and link permission,
51316 + any additional privilege required is based on the
51317 + privilege of the file being linked to
51318 +*/
51319 +__u32
51320 +gr_acl_handle_link(const struct dentry * new_dentry,
51321 + const struct dentry * parent_dentry,
51322 + const struct vfsmount * parent_mnt,
51323 + const struct dentry * old_dentry,
51324 + const struct vfsmount * old_mnt, const char *to)
51325 +{
51326 + __u32 mode;
51327 + __u32 needmode = GR_CREATE | GR_LINK;
51328 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
51329 +
51330 + mode =
51331 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
51332 + old_mnt);
51333 +
51334 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
51335 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51336 + return mode;
51337 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51338 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51339 + return 0;
51340 + } else if (unlikely((mode & needmode) != needmode))
51341 + return 0;
51342 +
51343 + return 1;
51344 +}
51345 +
51346 +__u32
51347 +gr_acl_handle_symlink(const struct dentry * new_dentry,
51348 + const struct dentry * parent_dentry,
51349 + const struct vfsmount * parent_mnt, const char *from)
51350 +{
51351 + __u32 needmode = GR_WRITE | GR_CREATE;
51352 + __u32 mode;
51353 +
51354 + mode =
51355 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
51356 + GR_CREATE | GR_AUDIT_CREATE |
51357 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
51358 +
51359 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
51360 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51361 + return mode;
51362 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51363 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51364 + return 0;
51365 + } else if (unlikely((mode & needmode) != needmode))
51366 + return 0;
51367 +
51368 + return (GR_WRITE | GR_CREATE);
51369 +}
51370 +
51371 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
51372 +{
51373 + __u32 mode;
51374 +
51375 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51376 +
51377 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51378 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
51379 + return mode;
51380 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51381 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
51382 + return 0;
51383 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
51384 + return 0;
51385 +
51386 + return (reqmode);
51387 +}
51388 +
51389 +__u32
51390 +gr_acl_handle_mknod(const struct dentry * new_dentry,
51391 + const struct dentry * parent_dentry,
51392 + const struct vfsmount * parent_mnt,
51393 + const int mode)
51394 +{
51395 + __u32 reqmode = GR_WRITE | GR_CREATE;
51396 + if (unlikely(mode & (S_ISUID | S_ISGID)))
51397 + reqmode |= GR_SETID;
51398 +
51399 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51400 + reqmode, GR_MKNOD_ACL_MSG);
51401 +}
51402 +
51403 +__u32
51404 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
51405 + const struct dentry *parent_dentry,
51406 + const struct vfsmount *parent_mnt)
51407 +{
51408 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51409 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
51410 +}
51411 +
51412 +#define RENAME_CHECK_SUCCESS(old, new) \
51413 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
51414 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
51415 +
51416 +int
51417 +gr_acl_handle_rename(struct dentry *new_dentry,
51418 + struct dentry *parent_dentry,
51419 + const struct vfsmount *parent_mnt,
51420 + struct dentry *old_dentry,
51421 + struct inode *old_parent_inode,
51422 + struct vfsmount *old_mnt, const char *newname)
51423 +{
51424 + __u32 comp1, comp2;
51425 + int error = 0;
51426 +
51427 + if (unlikely(!gr_acl_is_enabled()))
51428 + return 0;
51429 +
51430 + if (!new_dentry->d_inode) {
51431 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
51432 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
51433 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
51434 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
51435 + GR_DELETE | GR_AUDIT_DELETE |
51436 + GR_AUDIT_READ | GR_AUDIT_WRITE |
51437 + GR_SUPPRESS, old_mnt);
51438 + } else {
51439 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
51440 + GR_CREATE | GR_DELETE |
51441 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
51442 + GR_AUDIT_READ | GR_AUDIT_WRITE |
51443 + GR_SUPPRESS, parent_mnt);
51444 + comp2 =
51445 + gr_search_file(old_dentry,
51446 + GR_READ | GR_WRITE | GR_AUDIT_READ |
51447 + GR_DELETE | GR_AUDIT_DELETE |
51448 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
51449 + }
51450 +
51451 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
51452 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
51453 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51454 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
51455 + && !(comp2 & GR_SUPPRESS)) {
51456 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51457 + error = -EACCES;
51458 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
51459 + error = -EACCES;
51460 +
51461 + return error;
51462 +}
51463 +
51464 +void
51465 +gr_acl_handle_exit(void)
51466 +{
51467 + u16 id;
51468 + char *rolename;
51469 + struct file *exec_file;
51470 +
51471 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
51472 + !(current->role->roletype & GR_ROLE_PERSIST))) {
51473 + id = current->acl_role_id;
51474 + rolename = current->role->rolename;
51475 + gr_set_acls(1);
51476 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
51477 + }
51478 +
51479 + write_lock(&grsec_exec_file_lock);
51480 + exec_file = current->exec_file;
51481 + current->exec_file = NULL;
51482 + write_unlock(&grsec_exec_file_lock);
51483 +
51484 + if (exec_file)
51485 + fput(exec_file);
51486 +}
51487 +
51488 +int
51489 +gr_acl_handle_procpidmem(const struct task_struct *task)
51490 +{
51491 + if (unlikely(!gr_acl_is_enabled()))
51492 + return 0;
51493 +
51494 + if (task != current && task->acl->mode & GR_PROTPROCFD)
51495 + return -EACCES;
51496 +
51497 + return 0;
51498 +}
51499 diff -urNp linux-3.0.7/grsecurity/gracl_ip.c linux-3.0.7/grsecurity/gracl_ip.c
51500 --- linux-3.0.7/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
51501 +++ linux-3.0.7/grsecurity/gracl_ip.c 2011-08-23 21:48:14.000000000 -0400
51502 @@ -0,0 +1,381 @@
51503 +#include <linux/kernel.h>
51504 +#include <asm/uaccess.h>
51505 +#include <asm/errno.h>
51506 +#include <net/sock.h>
51507 +#include <linux/file.h>
51508 +#include <linux/fs.h>
51509 +#include <linux/net.h>
51510 +#include <linux/in.h>
51511 +#include <linux/skbuff.h>
51512 +#include <linux/ip.h>
51513 +#include <linux/udp.h>
51514 +#include <linux/types.h>
51515 +#include <linux/sched.h>
51516 +#include <linux/netdevice.h>
51517 +#include <linux/inetdevice.h>
51518 +#include <linux/gracl.h>
51519 +#include <linux/grsecurity.h>
51520 +#include <linux/grinternal.h>
51521 +
51522 +#define GR_BIND 0x01
51523 +#define GR_CONNECT 0x02
51524 +#define GR_INVERT 0x04
51525 +#define GR_BINDOVERRIDE 0x08
51526 +#define GR_CONNECTOVERRIDE 0x10
51527 +#define GR_SOCK_FAMILY 0x20
51528 +
51529 +static const char * gr_protocols[IPPROTO_MAX] = {
51530 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
51531 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
51532 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
51533 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
51534 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
51535 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
51536 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
51537 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
51538 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
51539 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
51540 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
51541 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
51542 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
51543 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
51544 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
51545 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
51546 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
51547 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
51548 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
51549 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
51550 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
51551 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
51552 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
51553 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
51554 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
51555 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
51556 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
51557 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
51558 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
51559 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
51560 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
51561 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
51562 + };
51563 +
51564 +static const char * gr_socktypes[SOCK_MAX] = {
51565 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
51566 + "unknown:7", "unknown:8", "unknown:9", "packet"
51567 + };
51568 +
51569 +static const char * gr_sockfamilies[AF_MAX+1] = {
51570 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
51571 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
51572 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
51573 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
51574 + };
51575 +
51576 +const char *
51577 +gr_proto_to_name(unsigned char proto)
51578 +{
51579 + return gr_protocols[proto];
51580 +}
51581 +
51582 +const char *
51583 +gr_socktype_to_name(unsigned char type)
51584 +{
51585 + return gr_socktypes[type];
51586 +}
51587 +
51588 +const char *
51589 +gr_sockfamily_to_name(unsigned char family)
51590 +{
51591 + return gr_sockfamilies[family];
51592 +}
51593 +
51594 +int
51595 +gr_search_socket(const int domain, const int type, const int protocol)
51596 +{
51597 + struct acl_subject_label *curr;
51598 + const struct cred *cred = current_cred();
51599 +
51600 + if (unlikely(!gr_acl_is_enabled()))
51601 + goto exit;
51602 +
51603 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
51604 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
51605 + goto exit; // let the kernel handle it
51606 +
51607 + curr = current->acl;
51608 +
51609 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
51610 + /* the family is allowed, if this is PF_INET allow it only if
51611 + the extra sock type/protocol checks pass */
51612 + if (domain == PF_INET)
51613 + goto inet_check;
51614 + goto exit;
51615 + } else {
51616 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51617 + __u32 fakeip = 0;
51618 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51619 + current->role->roletype, cred->uid,
51620 + cred->gid, current->exec_file ?
51621 + gr_to_filename(current->exec_file->f_path.dentry,
51622 + current->exec_file->f_path.mnt) :
51623 + curr->filename, curr->filename,
51624 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
51625 + &current->signal->saved_ip);
51626 + goto exit;
51627 + }
51628 + goto exit_fail;
51629 + }
51630 +
51631 +inet_check:
51632 + /* the rest of this checking is for IPv4 only */
51633 + if (!curr->ips)
51634 + goto exit;
51635 +
51636 + if ((curr->ip_type & (1 << type)) &&
51637 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
51638 + goto exit;
51639 +
51640 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51641 + /* we don't place acls on raw sockets , and sometimes
51642 + dgram/ip sockets are opened for ioctl and not
51643 + bind/connect, so we'll fake a bind learn log */
51644 + if (type == SOCK_RAW || type == SOCK_PACKET) {
51645 + __u32 fakeip = 0;
51646 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51647 + current->role->roletype, cred->uid,
51648 + cred->gid, current->exec_file ?
51649 + gr_to_filename(current->exec_file->f_path.dentry,
51650 + current->exec_file->f_path.mnt) :
51651 + curr->filename, curr->filename,
51652 + &fakeip, 0, type,
51653 + protocol, GR_CONNECT, &current->signal->saved_ip);
51654 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
51655 + __u32 fakeip = 0;
51656 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51657 + current->role->roletype, cred->uid,
51658 + cred->gid, current->exec_file ?
51659 + gr_to_filename(current->exec_file->f_path.dentry,
51660 + current->exec_file->f_path.mnt) :
51661 + curr->filename, curr->filename,
51662 + &fakeip, 0, type,
51663 + protocol, GR_BIND, &current->signal->saved_ip);
51664 + }
51665 + /* we'll log when they use connect or bind */
51666 + goto exit;
51667 + }
51668 +
51669 +exit_fail:
51670 + if (domain == PF_INET)
51671 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
51672 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
51673 + else
51674 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
51675 + gr_socktype_to_name(type), protocol);
51676 +
51677 + return 0;
51678 +exit:
51679 + return 1;
51680 +}
51681 +
51682 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
51683 +{
51684 + if ((ip->mode & mode) &&
51685 + (ip_port >= ip->low) &&
51686 + (ip_port <= ip->high) &&
51687 + ((ntohl(ip_addr) & our_netmask) ==
51688 + (ntohl(our_addr) & our_netmask))
51689 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
51690 + && (ip->type & (1 << type))) {
51691 + if (ip->mode & GR_INVERT)
51692 + return 2; // specifically denied
51693 + else
51694 + return 1; // allowed
51695 + }
51696 +
51697 + return 0; // not specifically allowed, may continue parsing
51698 +}
51699 +
51700 +static int
51701 +gr_search_connectbind(const int full_mode, struct sock *sk,
51702 + struct sockaddr_in *addr, const int type)
51703 +{
51704 + char iface[IFNAMSIZ] = {0};
51705 + struct acl_subject_label *curr;
51706 + struct acl_ip_label *ip;
51707 + struct inet_sock *isk;
51708 + struct net_device *dev;
51709 + struct in_device *idev;
51710 + unsigned long i;
51711 + int ret;
51712 + int mode = full_mode & (GR_BIND | GR_CONNECT);
51713 + __u32 ip_addr = 0;
51714 + __u32 our_addr;
51715 + __u32 our_netmask;
51716 + char *p;
51717 + __u16 ip_port = 0;
51718 + const struct cred *cred = current_cred();
51719 +
51720 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
51721 + return 0;
51722 +
51723 + curr = current->acl;
51724 + isk = inet_sk(sk);
51725 +
51726 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
51727 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
51728 + addr->sin_addr.s_addr = curr->inaddr_any_override;
51729 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
51730 + struct sockaddr_in saddr;
51731 + int err;
51732 +
51733 + saddr.sin_family = AF_INET;
51734 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
51735 + saddr.sin_port = isk->inet_sport;
51736 +
51737 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
51738 + if (err)
51739 + return err;
51740 +
51741 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
51742 + if (err)
51743 + return err;
51744 + }
51745 +
51746 + if (!curr->ips)
51747 + return 0;
51748 +
51749 + ip_addr = addr->sin_addr.s_addr;
51750 + ip_port = ntohs(addr->sin_port);
51751 +
51752 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51753 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51754 + current->role->roletype, cred->uid,
51755 + cred->gid, current->exec_file ?
51756 + gr_to_filename(current->exec_file->f_path.dentry,
51757 + current->exec_file->f_path.mnt) :
51758 + curr->filename, curr->filename,
51759 + &ip_addr, ip_port, type,
51760 + sk->sk_protocol, mode, &current->signal->saved_ip);
51761 + return 0;
51762 + }
51763 +
51764 + for (i = 0; i < curr->ip_num; i++) {
51765 + ip = *(curr->ips + i);
51766 + if (ip->iface != NULL) {
51767 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
51768 + p = strchr(iface, ':');
51769 + if (p != NULL)
51770 + *p = '\0';
51771 + dev = dev_get_by_name(sock_net(sk), iface);
51772 + if (dev == NULL)
51773 + continue;
51774 + idev = in_dev_get(dev);
51775 + if (idev == NULL) {
51776 + dev_put(dev);
51777 + continue;
51778 + }
51779 + rcu_read_lock();
51780 + for_ifa(idev) {
51781 + if (!strcmp(ip->iface, ifa->ifa_label)) {
51782 + our_addr = ifa->ifa_address;
51783 + our_netmask = 0xffffffff;
51784 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
51785 + if (ret == 1) {
51786 + rcu_read_unlock();
51787 + in_dev_put(idev);
51788 + dev_put(dev);
51789 + return 0;
51790 + } else if (ret == 2) {
51791 + rcu_read_unlock();
51792 + in_dev_put(idev);
51793 + dev_put(dev);
51794 + goto denied;
51795 + }
51796 + }
51797 + } endfor_ifa(idev);
51798 + rcu_read_unlock();
51799 + in_dev_put(idev);
51800 + dev_put(dev);
51801 + } else {
51802 + our_addr = ip->addr;
51803 + our_netmask = ip->netmask;
51804 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
51805 + if (ret == 1)
51806 + return 0;
51807 + else if (ret == 2)
51808 + goto denied;
51809 + }
51810 + }
51811 +
51812 +denied:
51813 + if (mode == GR_BIND)
51814 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
51815 + else if (mode == GR_CONNECT)
51816 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
51817 +
51818 + return -EACCES;
51819 +}
51820 +
51821 +int
51822 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
51823 +{
51824 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
51825 +}
51826 +
51827 +int
51828 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
51829 +{
51830 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
51831 +}
51832 +
51833 +int gr_search_listen(struct socket *sock)
51834 +{
51835 + struct sock *sk = sock->sk;
51836 + struct sockaddr_in addr;
51837 +
51838 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
51839 + addr.sin_port = inet_sk(sk)->inet_sport;
51840 +
51841 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
51842 +}
51843 +
51844 +int gr_search_accept(struct socket *sock)
51845 +{
51846 + struct sock *sk = sock->sk;
51847 + struct sockaddr_in addr;
51848 +
51849 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
51850 + addr.sin_port = inet_sk(sk)->inet_sport;
51851 +
51852 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
51853 +}
51854 +
51855 +int
51856 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
51857 +{
51858 + if (addr)
51859 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
51860 + else {
51861 + struct sockaddr_in sin;
51862 + const struct inet_sock *inet = inet_sk(sk);
51863 +
51864 + sin.sin_addr.s_addr = inet->inet_daddr;
51865 + sin.sin_port = inet->inet_dport;
51866 +
51867 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
51868 + }
51869 +}
51870 +
51871 +int
51872 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
51873 +{
51874 + struct sockaddr_in sin;
51875 +
51876 + if (unlikely(skb->len < sizeof (struct udphdr)))
51877 + return 0; // skip this packet
51878 +
51879 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
51880 + sin.sin_port = udp_hdr(skb)->source;
51881 +
51882 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
51883 +}
51884 diff -urNp linux-3.0.7/grsecurity/gracl_learn.c linux-3.0.7/grsecurity/gracl_learn.c
51885 --- linux-3.0.7/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
51886 +++ linux-3.0.7/grsecurity/gracl_learn.c 2011-08-23 21:48:14.000000000 -0400
51887 @@ -0,0 +1,207 @@
51888 +#include <linux/kernel.h>
51889 +#include <linux/mm.h>
51890 +#include <linux/sched.h>
51891 +#include <linux/poll.h>
51892 +#include <linux/string.h>
51893 +#include <linux/file.h>
51894 +#include <linux/types.h>
51895 +#include <linux/vmalloc.h>
51896 +#include <linux/grinternal.h>
51897 +
51898 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
51899 + size_t count, loff_t *ppos);
51900 +extern int gr_acl_is_enabled(void);
51901 +
51902 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
51903 +static int gr_learn_attached;
51904 +
51905 +/* use a 512k buffer */
51906 +#define LEARN_BUFFER_SIZE (512 * 1024)
51907 +
51908 +static DEFINE_SPINLOCK(gr_learn_lock);
51909 +static DEFINE_MUTEX(gr_learn_user_mutex);
51910 +
51911 +/* we need to maintain two buffers, so that the kernel context of grlearn
51912 + uses a semaphore around the userspace copying, and the other kernel contexts
51913 + use a spinlock when copying into the buffer, since they cannot sleep
51914 +*/
51915 +static char *learn_buffer;
51916 +static char *learn_buffer_user;
51917 +static int learn_buffer_len;
51918 +static int learn_buffer_user_len;
51919 +
51920 +static ssize_t
51921 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
51922 +{
51923 + DECLARE_WAITQUEUE(wait, current);
51924 + ssize_t retval = 0;
51925 +
51926 + add_wait_queue(&learn_wait, &wait);
51927 + set_current_state(TASK_INTERRUPTIBLE);
51928 + do {
51929 + mutex_lock(&gr_learn_user_mutex);
51930 + spin_lock(&gr_learn_lock);
51931 + if (learn_buffer_len)
51932 + break;
51933 + spin_unlock(&gr_learn_lock);
51934 + mutex_unlock(&gr_learn_user_mutex);
51935 + if (file->f_flags & O_NONBLOCK) {
51936 + retval = -EAGAIN;
51937 + goto out;
51938 + }
51939 + if (signal_pending(current)) {
51940 + retval = -ERESTARTSYS;
51941 + goto out;
51942 + }
51943 +
51944 + schedule();
51945 + } while (1);
51946 +
51947 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
51948 + learn_buffer_user_len = learn_buffer_len;
51949 + retval = learn_buffer_len;
51950 + learn_buffer_len = 0;
51951 +
51952 + spin_unlock(&gr_learn_lock);
51953 +
51954 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
51955 + retval = -EFAULT;
51956 +
51957 + mutex_unlock(&gr_learn_user_mutex);
51958 +out:
51959 + set_current_state(TASK_RUNNING);
51960 + remove_wait_queue(&learn_wait, &wait);
51961 + return retval;
51962 +}
51963 +
51964 +static unsigned int
51965 +poll_learn(struct file * file, poll_table * wait)
51966 +{
51967 + poll_wait(file, &learn_wait, wait);
51968 +
51969 + if (learn_buffer_len)
51970 + return (POLLIN | POLLRDNORM);
51971 +
51972 + return 0;
51973 +}
51974 +
51975 +void
51976 +gr_clear_learn_entries(void)
51977 +{
51978 + char *tmp;
51979 +
51980 + mutex_lock(&gr_learn_user_mutex);
51981 + spin_lock(&gr_learn_lock);
51982 + tmp = learn_buffer;
51983 + learn_buffer = NULL;
51984 + spin_unlock(&gr_learn_lock);
51985 + if (tmp)
51986 + vfree(tmp);
51987 + if (learn_buffer_user != NULL) {
51988 + vfree(learn_buffer_user);
51989 + learn_buffer_user = NULL;
51990 + }
51991 + learn_buffer_len = 0;
51992 + mutex_unlock(&gr_learn_user_mutex);
51993 +
51994 + return;
51995 +}
51996 +
51997 +void
51998 +gr_add_learn_entry(const char *fmt, ...)
51999 +{
52000 + va_list args;
52001 + unsigned int len;
52002 +
52003 + if (!gr_learn_attached)
52004 + return;
52005 +
52006 + spin_lock(&gr_learn_lock);
52007 +
52008 + /* leave a gap at the end so we know when it's "full" but don't have to
52009 + compute the exact length of the string we're trying to append
52010 + */
52011 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
52012 + spin_unlock(&gr_learn_lock);
52013 + wake_up_interruptible(&learn_wait);
52014 + return;
52015 + }
52016 + if (learn_buffer == NULL) {
52017 + spin_unlock(&gr_learn_lock);
52018 + return;
52019 + }
52020 +
52021 + va_start(args, fmt);
52022 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
52023 + va_end(args);
52024 +
52025 + learn_buffer_len += len + 1;
52026 +
52027 + spin_unlock(&gr_learn_lock);
52028 + wake_up_interruptible(&learn_wait);
52029 +
52030 + return;
52031 +}
52032 +
52033 +static int
52034 +open_learn(struct inode *inode, struct file *file)
52035 +{
52036 + if (file->f_mode & FMODE_READ && gr_learn_attached)
52037 + return -EBUSY;
52038 + if (file->f_mode & FMODE_READ) {
52039 + int retval = 0;
52040 + mutex_lock(&gr_learn_user_mutex);
52041 + if (learn_buffer == NULL)
52042 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
52043 + if (learn_buffer_user == NULL)
52044 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
52045 + if (learn_buffer == NULL) {
52046 + retval = -ENOMEM;
52047 + goto out_error;
52048 + }
52049 + if (learn_buffer_user == NULL) {
52050 + retval = -ENOMEM;
52051 + goto out_error;
52052 + }
52053 + learn_buffer_len = 0;
52054 + learn_buffer_user_len = 0;
52055 + gr_learn_attached = 1;
52056 +out_error:
52057 + mutex_unlock(&gr_learn_user_mutex);
52058 + return retval;
52059 + }
52060 + return 0;
52061 +}
52062 +
52063 +static int
52064 +close_learn(struct inode *inode, struct file *file)
52065 +{
52066 + if (file->f_mode & FMODE_READ) {
52067 + char *tmp = NULL;
52068 + mutex_lock(&gr_learn_user_mutex);
52069 + spin_lock(&gr_learn_lock);
52070 + tmp = learn_buffer;
52071 + learn_buffer = NULL;
52072 + spin_unlock(&gr_learn_lock);
52073 + if (tmp)
52074 + vfree(tmp);
52075 + if (learn_buffer_user != NULL) {
52076 + vfree(learn_buffer_user);
52077 + learn_buffer_user = NULL;
52078 + }
52079 + learn_buffer_len = 0;
52080 + learn_buffer_user_len = 0;
52081 + gr_learn_attached = 0;
52082 + mutex_unlock(&gr_learn_user_mutex);
52083 + }
52084 +
52085 + return 0;
52086 +}
52087 +
52088 +const struct file_operations grsec_fops = {
52089 + .read = read_learn,
52090 + .write = write_grsec_handler,
52091 + .open = open_learn,
52092 + .release = close_learn,
52093 + .poll = poll_learn,
52094 +};
52095 diff -urNp linux-3.0.7/grsecurity/gracl_res.c linux-3.0.7/grsecurity/gracl_res.c
52096 --- linux-3.0.7/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
52097 +++ linux-3.0.7/grsecurity/gracl_res.c 2011-08-23 21:48:14.000000000 -0400
52098 @@ -0,0 +1,68 @@
52099 +#include <linux/kernel.h>
52100 +#include <linux/sched.h>
52101 +#include <linux/gracl.h>
52102 +#include <linux/grinternal.h>
52103 +
52104 +static const char *restab_log[] = {
52105 + [RLIMIT_CPU] = "RLIMIT_CPU",
52106 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
52107 + [RLIMIT_DATA] = "RLIMIT_DATA",
52108 + [RLIMIT_STACK] = "RLIMIT_STACK",
52109 + [RLIMIT_CORE] = "RLIMIT_CORE",
52110 + [RLIMIT_RSS] = "RLIMIT_RSS",
52111 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
52112 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
52113 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
52114 + [RLIMIT_AS] = "RLIMIT_AS",
52115 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
52116 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
52117 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
52118 + [RLIMIT_NICE] = "RLIMIT_NICE",
52119 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
52120 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
52121 + [GR_CRASH_RES] = "RLIMIT_CRASH"
52122 +};
52123 +
52124 +void
52125 +gr_log_resource(const struct task_struct *task,
52126 + const int res, const unsigned long wanted, const int gt)
52127 +{
52128 + const struct cred *cred;
52129 + unsigned long rlim;
52130 +
52131 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
52132 + return;
52133 +
52134 + // not yet supported resource
52135 + if (unlikely(!restab_log[res]))
52136 + return;
52137 +
52138 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
52139 + rlim = task_rlimit_max(task, res);
52140 + else
52141 + rlim = task_rlimit(task, res);
52142 +
52143 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
52144 + return;
52145 +
52146 + rcu_read_lock();
52147 + cred = __task_cred(task);
52148 +
52149 + if (res == RLIMIT_NPROC &&
52150 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
52151 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
52152 + goto out_rcu_unlock;
52153 + else if (res == RLIMIT_MEMLOCK &&
52154 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
52155 + goto out_rcu_unlock;
52156 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
52157 + goto out_rcu_unlock;
52158 + rcu_read_unlock();
52159 +
52160 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
52161 +
52162 + return;
52163 +out_rcu_unlock:
52164 + rcu_read_unlock();
52165 + return;
52166 +}
52167 diff -urNp linux-3.0.7/grsecurity/gracl_segv.c linux-3.0.7/grsecurity/gracl_segv.c
52168 --- linux-3.0.7/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
52169 +++ linux-3.0.7/grsecurity/gracl_segv.c 2011-08-23 21:48:14.000000000 -0400
52170 @@ -0,0 +1,299 @@
52171 +#include <linux/kernel.h>
52172 +#include <linux/mm.h>
52173 +#include <asm/uaccess.h>
52174 +#include <asm/errno.h>
52175 +#include <asm/mman.h>
52176 +#include <net/sock.h>
52177 +#include <linux/file.h>
52178 +#include <linux/fs.h>
52179 +#include <linux/net.h>
52180 +#include <linux/in.h>
52181 +#include <linux/slab.h>
52182 +#include <linux/types.h>
52183 +#include <linux/sched.h>
52184 +#include <linux/timer.h>
52185 +#include <linux/gracl.h>
52186 +#include <linux/grsecurity.h>
52187 +#include <linux/grinternal.h>
52188 +
52189 +static struct crash_uid *uid_set;
52190 +static unsigned short uid_used;
52191 +static DEFINE_SPINLOCK(gr_uid_lock);
52192 +extern rwlock_t gr_inode_lock;
52193 +extern struct acl_subject_label *
52194 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
52195 + struct acl_role_label *role);
52196 +
52197 +#ifdef CONFIG_BTRFS_FS
52198 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
52199 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
52200 +#endif
52201 +
52202 +static inline dev_t __get_dev(const struct dentry *dentry)
52203 +{
52204 +#ifdef CONFIG_BTRFS_FS
52205 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
52206 + return get_btrfs_dev_from_inode(dentry->d_inode);
52207 + else
52208 +#endif
52209 + return dentry->d_inode->i_sb->s_dev;
52210 +}
52211 +
52212 +int
52213 +gr_init_uidset(void)
52214 +{
52215 + uid_set =
52216 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
52217 + uid_used = 0;
52218 +
52219 + return uid_set ? 1 : 0;
52220 +}
52221 +
52222 +void
52223 +gr_free_uidset(void)
52224 +{
52225 + if (uid_set)
52226 + kfree(uid_set);
52227 +
52228 + return;
52229 +}
52230 +
52231 +int
52232 +gr_find_uid(const uid_t uid)
52233 +{
52234 + struct crash_uid *tmp = uid_set;
52235 + uid_t buid;
52236 + int low = 0, high = uid_used - 1, mid;
52237 +
52238 + while (high >= low) {
52239 + mid = (low + high) >> 1;
52240 + buid = tmp[mid].uid;
52241 + if (buid == uid)
52242 + return mid;
52243 + if (buid > uid)
52244 + high = mid - 1;
52245 + if (buid < uid)
52246 + low = mid + 1;
52247 + }
52248 +
52249 + return -1;
52250 +}
52251 +
52252 +static __inline__ void
52253 +gr_insertsort(void)
52254 +{
52255 + unsigned short i, j;
52256 + struct crash_uid index;
52257 +
52258 + for (i = 1; i < uid_used; i++) {
52259 + index = uid_set[i];
52260 + j = i;
52261 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
52262 + uid_set[j] = uid_set[j - 1];
52263 + j--;
52264 + }
52265 + uid_set[j] = index;
52266 + }
52267 +
52268 + return;
52269 +}
52270 +
52271 +static __inline__ void
52272 +gr_insert_uid(const uid_t uid, const unsigned long expires)
52273 +{
52274 + int loc;
52275 +
52276 + if (uid_used == GR_UIDTABLE_MAX)
52277 + return;
52278 +
52279 + loc = gr_find_uid(uid);
52280 +
52281 + if (loc >= 0) {
52282 + uid_set[loc].expires = expires;
52283 + return;
52284 + }
52285 +
52286 + uid_set[uid_used].uid = uid;
52287 + uid_set[uid_used].expires = expires;
52288 + uid_used++;
52289 +
52290 + gr_insertsort();
52291 +
52292 + return;
52293 +}
52294 +
52295 +void
52296 +gr_remove_uid(const unsigned short loc)
52297 +{
52298 + unsigned short i;
52299 +
52300 + for (i = loc + 1; i < uid_used; i++)
52301 + uid_set[i - 1] = uid_set[i];
52302 +
52303 + uid_used--;
52304 +
52305 + return;
52306 +}
52307 +
52308 +int
52309 +gr_check_crash_uid(const uid_t uid)
52310 +{
52311 + int loc;
52312 + int ret = 0;
52313 +
52314 + if (unlikely(!gr_acl_is_enabled()))
52315 + return 0;
52316 +
52317 + spin_lock(&gr_uid_lock);
52318 + loc = gr_find_uid(uid);
52319 +
52320 + if (loc < 0)
52321 + goto out_unlock;
52322 +
52323 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
52324 + gr_remove_uid(loc);
52325 + else
52326 + ret = 1;
52327 +
52328 +out_unlock:
52329 + spin_unlock(&gr_uid_lock);
52330 + return ret;
52331 +}
52332 +
52333 +static __inline__ int
52334 +proc_is_setxid(const struct cred *cred)
52335 +{
52336 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
52337 + cred->uid != cred->fsuid)
52338 + return 1;
52339 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
52340 + cred->gid != cred->fsgid)
52341 + return 1;
52342 +
52343 + return 0;
52344 +}
52345 +
52346 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
52347 +
52348 +void
52349 +gr_handle_crash(struct task_struct *task, const int sig)
52350 +{
52351 + struct acl_subject_label *curr;
52352 + struct acl_subject_label *curr2;
52353 + struct task_struct *tsk, *tsk2;
52354 + const struct cred *cred;
52355 + const struct cred *cred2;
52356 +
52357 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
52358 + return;
52359 +
52360 + if (unlikely(!gr_acl_is_enabled()))
52361 + return;
52362 +
52363 + curr = task->acl;
52364 +
52365 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
52366 + return;
52367 +
52368 + if (time_before_eq(curr->expires, get_seconds())) {
52369 + curr->expires = 0;
52370 + curr->crashes = 0;
52371 + }
52372 +
52373 + curr->crashes++;
52374 +
52375 + if (!curr->expires)
52376 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
52377 +
52378 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52379 + time_after(curr->expires, get_seconds())) {
52380 + rcu_read_lock();
52381 + cred = __task_cred(task);
52382 + if (cred->uid && proc_is_setxid(cred)) {
52383 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52384 + spin_lock(&gr_uid_lock);
52385 + gr_insert_uid(cred->uid, curr->expires);
52386 + spin_unlock(&gr_uid_lock);
52387 + curr->expires = 0;
52388 + curr->crashes = 0;
52389 + read_lock(&tasklist_lock);
52390 + do_each_thread(tsk2, tsk) {
52391 + cred2 = __task_cred(tsk);
52392 + if (tsk != task && cred2->uid == cred->uid)
52393 + gr_fake_force_sig(SIGKILL, tsk);
52394 + } while_each_thread(tsk2, tsk);
52395 + read_unlock(&tasklist_lock);
52396 + } else {
52397 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52398 + read_lock(&tasklist_lock);
52399 + do_each_thread(tsk2, tsk) {
52400 + if (likely(tsk != task)) {
52401 + curr2 = tsk->acl;
52402 +
52403 + if (curr2->device == curr->device &&
52404 + curr2->inode == curr->inode)
52405 + gr_fake_force_sig(SIGKILL, tsk);
52406 + }
52407 + } while_each_thread(tsk2, tsk);
52408 + read_unlock(&tasklist_lock);
52409 + }
52410 + rcu_read_unlock();
52411 + }
52412 +
52413 + return;
52414 +}
52415 +
52416 +int
52417 +gr_check_crash_exec(const struct file *filp)
52418 +{
52419 + struct acl_subject_label *curr;
52420 +
52421 + if (unlikely(!gr_acl_is_enabled()))
52422 + return 0;
52423 +
52424 + read_lock(&gr_inode_lock);
52425 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
52426 + __get_dev(filp->f_path.dentry),
52427 + current->role);
52428 + read_unlock(&gr_inode_lock);
52429 +
52430 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
52431 + (!curr->crashes && !curr->expires))
52432 + return 0;
52433 +
52434 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52435 + time_after(curr->expires, get_seconds()))
52436 + return 1;
52437 + else if (time_before_eq(curr->expires, get_seconds())) {
52438 + curr->crashes = 0;
52439 + curr->expires = 0;
52440 + }
52441 +
52442 + return 0;
52443 +}
52444 +
52445 +void
52446 +gr_handle_alertkill(struct task_struct *task)
52447 +{
52448 + struct acl_subject_label *curracl;
52449 + __u32 curr_ip;
52450 + struct task_struct *p, *p2;
52451 +
52452 + if (unlikely(!gr_acl_is_enabled()))
52453 + return;
52454 +
52455 + curracl = task->acl;
52456 + curr_ip = task->signal->curr_ip;
52457 +
52458 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
52459 + read_lock(&tasklist_lock);
52460 + do_each_thread(p2, p) {
52461 + if (p->signal->curr_ip == curr_ip)
52462 + gr_fake_force_sig(SIGKILL, p);
52463 + } while_each_thread(p2, p);
52464 + read_unlock(&tasklist_lock);
52465 + } else if (curracl->mode & GR_KILLPROC)
52466 + gr_fake_force_sig(SIGKILL, task);
52467 +
52468 + return;
52469 +}
52470 diff -urNp linux-3.0.7/grsecurity/gracl_shm.c linux-3.0.7/grsecurity/gracl_shm.c
52471 --- linux-3.0.7/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
52472 +++ linux-3.0.7/grsecurity/gracl_shm.c 2011-08-23 21:48:14.000000000 -0400
52473 @@ -0,0 +1,40 @@
52474 +#include <linux/kernel.h>
52475 +#include <linux/mm.h>
52476 +#include <linux/sched.h>
52477 +#include <linux/file.h>
52478 +#include <linux/ipc.h>
52479 +#include <linux/gracl.h>
52480 +#include <linux/grsecurity.h>
52481 +#include <linux/grinternal.h>
52482 +
52483 +int
52484 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
52485 + const time_t shm_createtime, const uid_t cuid, const int shmid)
52486 +{
52487 + struct task_struct *task;
52488 +
52489 + if (!gr_acl_is_enabled())
52490 + return 1;
52491 +
52492 + rcu_read_lock();
52493 + read_lock(&tasklist_lock);
52494 +
52495 + task = find_task_by_vpid(shm_cprid);
52496 +
52497 + if (unlikely(!task))
52498 + task = find_task_by_vpid(shm_lapid);
52499 +
52500 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
52501 + (task->pid == shm_lapid)) &&
52502 + (task->acl->mode & GR_PROTSHM) &&
52503 + (task->acl != current->acl))) {
52504 + read_unlock(&tasklist_lock);
52505 + rcu_read_unlock();
52506 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
52507 + return 0;
52508 + }
52509 + read_unlock(&tasklist_lock);
52510 + rcu_read_unlock();
52511 +
52512 + return 1;
52513 +}
52514 diff -urNp linux-3.0.7/grsecurity/grsec_chdir.c linux-3.0.7/grsecurity/grsec_chdir.c
52515 --- linux-3.0.7/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
52516 +++ linux-3.0.7/grsecurity/grsec_chdir.c 2011-08-23 21:48:14.000000000 -0400
52517 @@ -0,0 +1,19 @@
52518 +#include <linux/kernel.h>
52519 +#include <linux/sched.h>
52520 +#include <linux/fs.h>
52521 +#include <linux/file.h>
52522 +#include <linux/grsecurity.h>
52523 +#include <linux/grinternal.h>
52524 +
52525 +void
52526 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
52527 +{
52528 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52529 + if ((grsec_enable_chdir && grsec_enable_group &&
52530 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
52531 + !grsec_enable_group)) {
52532 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
52533 + }
52534 +#endif
52535 + return;
52536 +}
52537 diff -urNp linux-3.0.7/grsecurity/grsec_chroot.c linux-3.0.7/grsecurity/grsec_chroot.c
52538 --- linux-3.0.7/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
52539 +++ linux-3.0.7/grsecurity/grsec_chroot.c 2011-09-15 06:47:48.000000000 -0400
52540 @@ -0,0 +1,351 @@
52541 +#include <linux/kernel.h>
52542 +#include <linux/module.h>
52543 +#include <linux/sched.h>
52544 +#include <linux/file.h>
52545 +#include <linux/fs.h>
52546 +#include <linux/mount.h>
52547 +#include <linux/types.h>
52548 +#include <linux/pid_namespace.h>
52549 +#include <linux/grsecurity.h>
52550 +#include <linux/grinternal.h>
52551 +
52552 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
52553 +{
52554 +#ifdef CONFIG_GRKERNSEC
52555 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
52556 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
52557 + task->gr_is_chrooted = 1;
52558 + else
52559 + task->gr_is_chrooted = 0;
52560 +
52561 + task->gr_chroot_dentry = path->dentry;
52562 +#endif
52563 + return;
52564 +}
52565 +
52566 +void gr_clear_chroot_entries(struct task_struct *task)
52567 +{
52568 +#ifdef CONFIG_GRKERNSEC
52569 + task->gr_is_chrooted = 0;
52570 + task->gr_chroot_dentry = NULL;
52571 +#endif
52572 + return;
52573 +}
52574 +
52575 +int
52576 +gr_handle_chroot_unix(const pid_t pid)
52577 +{
52578 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
52579 + struct task_struct *p;
52580 +
52581 + if (unlikely(!grsec_enable_chroot_unix))
52582 + return 1;
52583 +
52584 + if (likely(!proc_is_chrooted(current)))
52585 + return 1;
52586 +
52587 + rcu_read_lock();
52588 + read_lock(&tasklist_lock);
52589 + p = find_task_by_vpid_unrestricted(pid);
52590 + if (unlikely(p && !have_same_root(current, p))) {
52591 + read_unlock(&tasklist_lock);
52592 + rcu_read_unlock();
52593 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
52594 + return 0;
52595 + }
52596 + read_unlock(&tasklist_lock);
52597 + rcu_read_unlock();
52598 +#endif
52599 + return 1;
52600 +}
52601 +
52602 +int
52603 +gr_handle_chroot_nice(void)
52604 +{
52605 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52606 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
52607 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
52608 + return -EPERM;
52609 + }
52610 +#endif
52611 + return 0;
52612 +}
52613 +
52614 +int
52615 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
52616 +{
52617 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52618 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
52619 + && proc_is_chrooted(current)) {
52620 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
52621 + return -EACCES;
52622 + }
52623 +#endif
52624 + return 0;
52625 +}
52626 +
52627 +int
52628 +gr_handle_chroot_rawio(const struct inode *inode)
52629 +{
52630 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52631 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
52632 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
52633 + return 1;
52634 +#endif
52635 + return 0;
52636 +}
52637 +
52638 +int
52639 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
52640 +{
52641 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52642 + struct task_struct *p;
52643 + int ret = 0;
52644 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
52645 + return ret;
52646 +
52647 + read_lock(&tasklist_lock);
52648 + do_each_pid_task(pid, type, p) {
52649 + if (!have_same_root(current, p)) {
52650 + ret = 1;
52651 + goto out;
52652 + }
52653 + } while_each_pid_task(pid, type, p);
52654 +out:
52655 + read_unlock(&tasklist_lock);
52656 + return ret;
52657 +#endif
52658 + return 0;
52659 +}
52660 +
52661 +int
52662 +gr_pid_is_chrooted(struct task_struct *p)
52663 +{
52664 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52665 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
52666 + return 0;
52667 +
52668 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
52669 + !have_same_root(current, p)) {
52670 + return 1;
52671 + }
52672 +#endif
52673 + return 0;
52674 +}
52675 +
52676 +EXPORT_SYMBOL(gr_pid_is_chrooted);
52677 +
52678 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
52679 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
52680 +{
52681 + struct path path, currentroot;
52682 + int ret = 0;
52683 +
52684 + path.dentry = (struct dentry *)u_dentry;
52685 + path.mnt = (struct vfsmount *)u_mnt;
52686 + get_fs_root(current->fs, &currentroot);
52687 + if (path_is_under(&path, &currentroot))
52688 + ret = 1;
52689 + path_put(&currentroot);
52690 +
52691 + return ret;
52692 +}
52693 +#endif
52694 +
52695 +int
52696 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
52697 +{
52698 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
52699 + if (!grsec_enable_chroot_fchdir)
52700 + return 1;
52701 +
52702 + if (!proc_is_chrooted(current))
52703 + return 1;
52704 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
52705 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
52706 + return 0;
52707 + }
52708 +#endif
52709 + return 1;
52710 +}
52711 +
52712 +int
52713 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
52714 + const time_t shm_createtime)
52715 +{
52716 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
52717 + struct task_struct *p;
52718 + time_t starttime;
52719 +
52720 + if (unlikely(!grsec_enable_chroot_shmat))
52721 + return 1;
52722 +
52723 + if (likely(!proc_is_chrooted(current)))
52724 + return 1;
52725 +
52726 + rcu_read_lock();
52727 + read_lock(&tasklist_lock);
52728 +
52729 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
52730 + starttime = p->start_time.tv_sec;
52731 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
52732 + if (have_same_root(current, p)) {
52733 + goto allow;
52734 + } else {
52735 + read_unlock(&tasklist_lock);
52736 + rcu_read_unlock();
52737 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
52738 + return 0;
52739 + }
52740 + }
52741 + /* creator exited, pid reuse, fall through to next check */
52742 + }
52743 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
52744 + if (unlikely(!have_same_root(current, p))) {
52745 + read_unlock(&tasklist_lock);
52746 + rcu_read_unlock();
52747 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
52748 + return 0;
52749 + }
52750 + }
52751 +
52752 +allow:
52753 + read_unlock(&tasklist_lock);
52754 + rcu_read_unlock();
52755 +#endif
52756 + return 1;
52757 +}
52758 +
52759 +void
52760 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
52761 +{
52762 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
52763 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
52764 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
52765 +#endif
52766 + return;
52767 +}
52768 +
52769 +int
52770 +gr_handle_chroot_mknod(const struct dentry *dentry,
52771 + const struct vfsmount *mnt, const int mode)
52772 +{
52773 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
52774 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
52775 + proc_is_chrooted(current)) {
52776 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
52777 + return -EPERM;
52778 + }
52779 +#endif
52780 + return 0;
52781 +}
52782 +
52783 +int
52784 +gr_handle_chroot_mount(const struct dentry *dentry,
52785 + const struct vfsmount *mnt, const char *dev_name)
52786 +{
52787 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
52788 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
52789 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
52790 + return -EPERM;
52791 + }
52792 +#endif
52793 + return 0;
52794 +}
52795 +
52796 +int
52797 +gr_handle_chroot_pivot(void)
52798 +{
52799 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
52800 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
52801 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
52802 + return -EPERM;
52803 + }
52804 +#endif
52805 + return 0;
52806 +}
52807 +
52808 +int
52809 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
52810 +{
52811 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
52812 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
52813 + !gr_is_outside_chroot(dentry, mnt)) {
52814 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
52815 + return -EPERM;
52816 + }
52817 +#endif
52818 + return 0;
52819 +}
52820 +
52821 +extern const char *captab_log[];
52822 +extern int captab_log_entries;
52823 +
52824 +int
52825 +gr_chroot_is_capable(const int cap)
52826 +{
52827 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52828 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
52829 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
52830 + if (cap_raised(chroot_caps, cap)) {
52831 + const struct cred *creds = current_cred();
52832 + if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
52833 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
52834 + }
52835 + return 0;
52836 + }
52837 + }
52838 +#endif
52839 + return 1;
52840 +}
52841 +
52842 +int
52843 +gr_chroot_is_capable_nolog(const int cap)
52844 +{
52845 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52846 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
52847 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
52848 + if (cap_raised(chroot_caps, cap)) {
52849 + return 0;
52850 + }
52851 + }
52852 +#endif
52853 + return 1;
52854 +}
52855 +
52856 +int
52857 +gr_handle_chroot_sysctl(const int op)
52858 +{
52859 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
52860 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
52861 + proc_is_chrooted(current))
52862 + return -EACCES;
52863 +#endif
52864 + return 0;
52865 +}
52866 +
52867 +void
52868 +gr_handle_chroot_chdir(struct path *path)
52869 +{
52870 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
52871 + if (grsec_enable_chroot_chdir)
52872 + set_fs_pwd(current->fs, path);
52873 +#endif
52874 + return;
52875 +}
52876 +
52877 +int
52878 +gr_handle_chroot_chmod(const struct dentry *dentry,
52879 + const struct vfsmount *mnt, const int mode)
52880 +{
52881 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
52882 + /* allow chmod +s on directories, but not files */
52883 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
52884 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
52885 + proc_is_chrooted(current)) {
52886 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
52887 + return -EPERM;
52888 + }
52889 +#endif
52890 + return 0;
52891 +}
52892 diff -urNp linux-3.0.7/grsecurity/grsec_disabled.c linux-3.0.7/grsecurity/grsec_disabled.c
52893 --- linux-3.0.7/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
52894 +++ linux-3.0.7/grsecurity/grsec_disabled.c 2011-09-24 08:13:01.000000000 -0400
52895 @@ -0,0 +1,433 @@
52896 +#include <linux/kernel.h>
52897 +#include <linux/module.h>
52898 +#include <linux/sched.h>
52899 +#include <linux/file.h>
52900 +#include <linux/fs.h>
52901 +#include <linux/kdev_t.h>
52902 +#include <linux/net.h>
52903 +#include <linux/in.h>
52904 +#include <linux/ip.h>
52905 +#include <linux/skbuff.h>
52906 +#include <linux/sysctl.h>
52907 +
52908 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
52909 +void
52910 +pax_set_initial_flags(struct linux_binprm *bprm)
52911 +{
52912 + return;
52913 +}
52914 +#endif
52915 +
52916 +#ifdef CONFIG_SYSCTL
52917 +__u32
52918 +gr_handle_sysctl(const struct ctl_table * table, const int op)
52919 +{
52920 + return 0;
52921 +}
52922 +#endif
52923 +
52924 +#ifdef CONFIG_TASKSTATS
52925 +int gr_is_taskstats_denied(int pid)
52926 +{
52927 + return 0;
52928 +}
52929 +#endif
52930 +
52931 +int
52932 +gr_acl_is_enabled(void)
52933 +{
52934 + return 0;
52935 +}
52936 +
52937 +int
52938 +gr_handle_rawio(const struct inode *inode)
52939 +{
52940 + return 0;
52941 +}
52942 +
52943 +void
52944 +gr_acl_handle_psacct(struct task_struct *task, const long code)
52945 +{
52946 + return;
52947 +}
52948 +
52949 +int
52950 +gr_handle_ptrace(struct task_struct *task, const long request)
52951 +{
52952 + return 0;
52953 +}
52954 +
52955 +int
52956 +gr_handle_proc_ptrace(struct task_struct *task)
52957 +{
52958 + return 0;
52959 +}
52960 +
52961 +void
52962 +gr_learn_resource(const struct task_struct *task,
52963 + const int res, const unsigned long wanted, const int gt)
52964 +{
52965 + return;
52966 +}
52967 +
52968 +int
52969 +gr_set_acls(const int type)
52970 +{
52971 + return 0;
52972 +}
52973 +
52974 +int
52975 +gr_check_hidden_task(const struct task_struct *tsk)
52976 +{
52977 + return 0;
52978 +}
52979 +
52980 +int
52981 +gr_check_protected_task(const struct task_struct *task)
52982 +{
52983 + return 0;
52984 +}
52985 +
52986 +int
52987 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
52988 +{
52989 + return 0;
52990 +}
52991 +
52992 +void
52993 +gr_copy_label(struct task_struct *tsk)
52994 +{
52995 + return;
52996 +}
52997 +
52998 +void
52999 +gr_set_pax_flags(struct task_struct *task)
53000 +{
53001 + return;
53002 +}
53003 +
53004 +int
53005 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
53006 + const int unsafe_share)
53007 +{
53008 + return 0;
53009 +}
53010 +
53011 +void
53012 +gr_handle_delete(const ino_t ino, const dev_t dev)
53013 +{
53014 + return;
53015 +}
53016 +
53017 +void
53018 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
53019 +{
53020 + return;
53021 +}
53022 +
53023 +void
53024 +gr_handle_crash(struct task_struct *task, const int sig)
53025 +{
53026 + return;
53027 +}
53028 +
53029 +int
53030 +gr_check_crash_exec(const struct file *filp)
53031 +{
53032 + return 0;
53033 +}
53034 +
53035 +int
53036 +gr_check_crash_uid(const uid_t uid)
53037 +{
53038 + return 0;
53039 +}
53040 +
53041 +void
53042 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53043 + struct dentry *old_dentry,
53044 + struct dentry *new_dentry,
53045 + struct vfsmount *mnt, const __u8 replace)
53046 +{
53047 + return;
53048 +}
53049 +
53050 +int
53051 +gr_search_socket(const int family, const int type, const int protocol)
53052 +{
53053 + return 1;
53054 +}
53055 +
53056 +int
53057 +gr_search_connectbind(const int mode, const struct socket *sock,
53058 + const struct sockaddr_in *addr)
53059 +{
53060 + return 0;
53061 +}
53062 +
53063 +void
53064 +gr_handle_alertkill(struct task_struct *task)
53065 +{
53066 + return;
53067 +}
53068 +
53069 +__u32
53070 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
53071 +{
53072 + return 1;
53073 +}
53074 +
53075 +__u32
53076 +gr_acl_handle_hidden_file(const struct dentry * dentry,
53077 + const struct vfsmount * mnt)
53078 +{
53079 + return 1;
53080 +}
53081 +
53082 +__u32
53083 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
53084 + const int fmode)
53085 +{
53086 + return 1;
53087 +}
53088 +
53089 +__u32
53090 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
53091 +{
53092 + return 1;
53093 +}
53094 +
53095 +__u32
53096 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
53097 +{
53098 + return 1;
53099 +}
53100 +
53101 +int
53102 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
53103 + unsigned int *vm_flags)
53104 +{
53105 + return 1;
53106 +}
53107 +
53108 +__u32
53109 +gr_acl_handle_truncate(const struct dentry * dentry,
53110 + const struct vfsmount * mnt)
53111 +{
53112 + return 1;
53113 +}
53114 +
53115 +__u32
53116 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
53117 +{
53118 + return 1;
53119 +}
53120 +
53121 +__u32
53122 +gr_acl_handle_access(const struct dentry * dentry,
53123 + const struct vfsmount * mnt, const int fmode)
53124 +{
53125 + return 1;
53126 +}
53127 +
53128 +__u32
53129 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
53130 + mode_t mode)
53131 +{
53132 + return 1;
53133 +}
53134 +
53135 +__u32
53136 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
53137 + mode_t mode)
53138 +{
53139 + return 1;
53140 +}
53141 +
53142 +__u32
53143 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
53144 +{
53145 + return 1;
53146 +}
53147 +
53148 +__u32
53149 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
53150 +{
53151 + return 1;
53152 +}
53153 +
53154 +void
53155 +grsecurity_init(void)
53156 +{
53157 + return;
53158 +}
53159 +
53160 +__u32
53161 +gr_acl_handle_mknod(const struct dentry * new_dentry,
53162 + const struct dentry * parent_dentry,
53163 + const struct vfsmount * parent_mnt,
53164 + const int mode)
53165 +{
53166 + return 1;
53167 +}
53168 +
53169 +__u32
53170 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
53171 + const struct dentry * parent_dentry,
53172 + const struct vfsmount * parent_mnt)
53173 +{
53174 + return 1;
53175 +}
53176 +
53177 +__u32
53178 +gr_acl_handle_symlink(const struct dentry * new_dentry,
53179 + const struct dentry * parent_dentry,
53180 + const struct vfsmount * parent_mnt, const char *from)
53181 +{
53182 + return 1;
53183 +}
53184 +
53185 +__u32
53186 +gr_acl_handle_link(const struct dentry * new_dentry,
53187 + const struct dentry * parent_dentry,
53188 + const struct vfsmount * parent_mnt,
53189 + const struct dentry * old_dentry,
53190 + const struct vfsmount * old_mnt, const char *to)
53191 +{
53192 + return 1;
53193 +}
53194 +
53195 +int
53196 +gr_acl_handle_rename(const struct dentry *new_dentry,
53197 + const struct dentry *parent_dentry,
53198 + const struct vfsmount *parent_mnt,
53199 + const struct dentry *old_dentry,
53200 + const struct inode *old_parent_inode,
53201 + const struct vfsmount *old_mnt, const char *newname)
53202 +{
53203 + return 0;
53204 +}
53205 +
53206 +int
53207 +gr_acl_handle_filldir(const struct file *file, const char *name,
53208 + const int namelen, const ino_t ino)
53209 +{
53210 + return 1;
53211 +}
53212 +
53213 +int
53214 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53215 + const time_t shm_createtime, const uid_t cuid, const int shmid)
53216 +{
53217 + return 1;
53218 +}
53219 +
53220 +int
53221 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
53222 +{
53223 + return 0;
53224 +}
53225 +
53226 +int
53227 +gr_search_accept(const struct socket *sock)
53228 +{
53229 + return 0;
53230 +}
53231 +
53232 +int
53233 +gr_search_listen(const struct socket *sock)
53234 +{
53235 + return 0;
53236 +}
53237 +
53238 +int
53239 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
53240 +{
53241 + return 0;
53242 +}
53243 +
53244 +__u32
53245 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
53246 +{
53247 + return 1;
53248 +}
53249 +
53250 +__u32
53251 +gr_acl_handle_creat(const struct dentry * dentry,
53252 + const struct dentry * p_dentry,
53253 + const struct vfsmount * p_mnt, const int fmode,
53254 + const int imode)
53255 +{
53256 + return 1;
53257 +}
53258 +
53259 +void
53260 +gr_acl_handle_exit(void)
53261 +{
53262 + return;
53263 +}
53264 +
53265 +int
53266 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
53267 +{
53268 + return 1;
53269 +}
53270 +
53271 +void
53272 +gr_set_role_label(const uid_t uid, const gid_t gid)
53273 +{
53274 + return;
53275 +}
53276 +
53277 +int
53278 +gr_acl_handle_procpidmem(const struct task_struct *task)
53279 +{
53280 + return 0;
53281 +}
53282 +
53283 +int
53284 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
53285 +{
53286 + return 0;
53287 +}
53288 +
53289 +int
53290 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
53291 +{
53292 + return 0;
53293 +}
53294 +
53295 +void
53296 +gr_set_kernel_label(struct task_struct *task)
53297 +{
53298 + return;
53299 +}
53300 +
53301 +int
53302 +gr_check_user_change(int real, int effective, int fs)
53303 +{
53304 + return 0;
53305 +}
53306 +
53307 +int
53308 +gr_check_group_change(int real, int effective, int fs)
53309 +{
53310 + return 0;
53311 +}
53312 +
53313 +int gr_acl_enable_at_secure(void)
53314 +{
53315 + return 0;
53316 +}
53317 +
53318 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
53319 +{
53320 + return dentry->d_inode->i_sb->s_dev;
53321 +}
53322 +
53323 +EXPORT_SYMBOL(gr_learn_resource);
53324 +EXPORT_SYMBOL(gr_set_kernel_label);
53325 +#ifdef CONFIG_SECURITY
53326 +EXPORT_SYMBOL(gr_check_user_change);
53327 +EXPORT_SYMBOL(gr_check_group_change);
53328 +#endif
53329 diff -urNp linux-3.0.7/grsecurity/grsec_exec.c linux-3.0.7/grsecurity/grsec_exec.c
53330 --- linux-3.0.7/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
53331 +++ linux-3.0.7/grsecurity/grsec_exec.c 2011-09-14 09:20:28.000000000 -0400
53332 @@ -0,0 +1,145 @@
53333 +#include <linux/kernel.h>
53334 +#include <linux/sched.h>
53335 +#include <linux/file.h>
53336 +#include <linux/binfmts.h>
53337 +#include <linux/fs.h>
53338 +#include <linux/types.h>
53339 +#include <linux/grdefs.h>
53340 +#include <linux/grsecurity.h>
53341 +#include <linux/grinternal.h>
53342 +#include <linux/capability.h>
53343 +#include <linux/module.h>
53344 +
53345 +#include <asm/uaccess.h>
53346 +
53347 +#ifdef CONFIG_GRKERNSEC_EXECLOG
53348 +static char gr_exec_arg_buf[132];
53349 +static DEFINE_MUTEX(gr_exec_arg_mutex);
53350 +#endif
53351 +
53352 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
53353 +
53354 +void
53355 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
53356 +{
53357 +#ifdef CONFIG_GRKERNSEC_EXECLOG
53358 + char *grarg = gr_exec_arg_buf;
53359 + unsigned int i, x, execlen = 0;
53360 + char c;
53361 +
53362 + if (!((grsec_enable_execlog && grsec_enable_group &&
53363 + in_group_p(grsec_audit_gid))
53364 + || (grsec_enable_execlog && !grsec_enable_group)))
53365 + return;
53366 +
53367 + mutex_lock(&gr_exec_arg_mutex);
53368 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
53369 +
53370 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
53371 + const char __user *p;
53372 + unsigned int len;
53373 +
53374 + p = get_user_arg_ptr(argv, i);
53375 + if (IS_ERR(p))
53376 + goto log;
53377 +
53378 + len = strnlen_user(p, 128 - execlen);
53379 + if (len > 128 - execlen)
53380 + len = 128 - execlen;
53381 + else if (len > 0)
53382 + len--;
53383 + if (copy_from_user(grarg + execlen, p, len))
53384 + goto log;
53385 +
53386 + /* rewrite unprintable characters */
53387 + for (x = 0; x < len; x++) {
53388 + c = *(grarg + execlen + x);
53389 + if (c < 32 || c > 126)
53390 + *(grarg + execlen + x) = ' ';
53391 + }
53392 +
53393 + execlen += len;
53394 + *(grarg + execlen) = ' ';
53395 + *(grarg + execlen + 1) = '\0';
53396 + execlen++;
53397 + }
53398 +
53399 + log:
53400 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
53401 + bprm->file->f_path.mnt, grarg);
53402 + mutex_unlock(&gr_exec_arg_mutex);
53403 +#endif
53404 + return;
53405 +}
53406 +
53407 +#ifdef CONFIG_GRKERNSEC
53408 +extern int gr_acl_is_capable(const int cap);
53409 +extern int gr_acl_is_capable_nolog(const int cap);
53410 +extern int gr_chroot_is_capable(const int cap);
53411 +extern int gr_chroot_is_capable_nolog(const int cap);
53412 +#endif
53413 +
53414 +const char *captab_log[] = {
53415 + "CAP_CHOWN",
53416 + "CAP_DAC_OVERRIDE",
53417 + "CAP_DAC_READ_SEARCH",
53418 + "CAP_FOWNER",
53419 + "CAP_FSETID",
53420 + "CAP_KILL",
53421 + "CAP_SETGID",
53422 + "CAP_SETUID",
53423 + "CAP_SETPCAP",
53424 + "CAP_LINUX_IMMUTABLE",
53425 + "CAP_NET_BIND_SERVICE",
53426 + "CAP_NET_BROADCAST",
53427 + "CAP_NET_ADMIN",
53428 + "CAP_NET_RAW",
53429 + "CAP_IPC_LOCK",
53430 + "CAP_IPC_OWNER",
53431 + "CAP_SYS_MODULE",
53432 + "CAP_SYS_RAWIO",
53433 + "CAP_SYS_CHROOT",
53434 + "CAP_SYS_PTRACE",
53435 + "CAP_SYS_PACCT",
53436 + "CAP_SYS_ADMIN",
53437 + "CAP_SYS_BOOT",
53438 + "CAP_SYS_NICE",
53439 + "CAP_SYS_RESOURCE",
53440 + "CAP_SYS_TIME",
53441 + "CAP_SYS_TTY_CONFIG",
53442 + "CAP_MKNOD",
53443 + "CAP_LEASE",
53444 + "CAP_AUDIT_WRITE",
53445 + "CAP_AUDIT_CONTROL",
53446 + "CAP_SETFCAP",
53447 + "CAP_MAC_OVERRIDE",
53448 + "CAP_MAC_ADMIN",
53449 + "CAP_SYSLOG"
53450 +};
53451 +
53452 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
53453 +
53454 +int gr_is_capable(const int cap)
53455 +{
53456 +#ifdef CONFIG_GRKERNSEC
53457 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
53458 + return 1;
53459 + return 0;
53460 +#else
53461 + return 1;
53462 +#endif
53463 +}
53464 +
53465 +int gr_is_capable_nolog(const int cap)
53466 +{
53467 +#ifdef CONFIG_GRKERNSEC
53468 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
53469 + return 1;
53470 + return 0;
53471 +#else
53472 + return 1;
53473 +#endif
53474 +}
53475 +
53476 +EXPORT_SYMBOL(gr_is_capable);
53477 +EXPORT_SYMBOL(gr_is_capable_nolog);
53478 diff -urNp linux-3.0.7/grsecurity/grsec_fifo.c linux-3.0.7/grsecurity/grsec_fifo.c
53479 --- linux-3.0.7/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
53480 +++ linux-3.0.7/grsecurity/grsec_fifo.c 2011-08-23 21:48:14.000000000 -0400
53481 @@ -0,0 +1,24 @@
53482 +#include <linux/kernel.h>
53483 +#include <linux/sched.h>
53484 +#include <linux/fs.h>
53485 +#include <linux/file.h>
53486 +#include <linux/grinternal.h>
53487 +
53488 +int
53489 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
53490 + const struct dentry *dir, const int flag, const int acc_mode)
53491 +{
53492 +#ifdef CONFIG_GRKERNSEC_FIFO
53493 + const struct cred *cred = current_cred();
53494 +
53495 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
53496 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
53497 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
53498 + (cred->fsuid != dentry->d_inode->i_uid)) {
53499 + if (!inode_permission(dentry->d_inode, acc_mode))
53500 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
53501 + return -EACCES;
53502 + }
53503 +#endif
53504 + return 0;
53505 +}
53506 diff -urNp linux-3.0.7/grsecurity/grsec_fork.c linux-3.0.7/grsecurity/grsec_fork.c
53507 --- linux-3.0.7/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
53508 +++ linux-3.0.7/grsecurity/grsec_fork.c 2011-08-23 21:48:14.000000000 -0400
53509 @@ -0,0 +1,23 @@
53510 +#include <linux/kernel.h>
53511 +#include <linux/sched.h>
53512 +#include <linux/grsecurity.h>
53513 +#include <linux/grinternal.h>
53514 +#include <linux/errno.h>
53515 +
53516 +void
53517 +gr_log_forkfail(const int retval)
53518 +{
53519 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
53520 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
53521 + switch (retval) {
53522 + case -EAGAIN:
53523 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
53524 + break;
53525 + case -ENOMEM:
53526 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
53527 + break;
53528 + }
53529 + }
53530 +#endif
53531 + return;
53532 +}
53533 diff -urNp linux-3.0.7/grsecurity/grsec_init.c linux-3.0.7/grsecurity/grsec_init.c
53534 --- linux-3.0.7/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
53535 +++ linux-3.0.7/grsecurity/grsec_init.c 2011-08-25 17:25:12.000000000 -0400
53536 @@ -0,0 +1,269 @@
53537 +#include <linux/kernel.h>
53538 +#include <linux/sched.h>
53539 +#include <linux/mm.h>
53540 +#include <linux/gracl.h>
53541 +#include <linux/slab.h>
53542 +#include <linux/vmalloc.h>
53543 +#include <linux/percpu.h>
53544 +#include <linux/module.h>
53545 +
53546 +int grsec_enable_brute;
53547 +int grsec_enable_link;
53548 +int grsec_enable_dmesg;
53549 +int grsec_enable_harden_ptrace;
53550 +int grsec_enable_fifo;
53551 +int grsec_enable_execlog;
53552 +int grsec_enable_signal;
53553 +int grsec_enable_forkfail;
53554 +int grsec_enable_audit_ptrace;
53555 +int grsec_enable_time;
53556 +int grsec_enable_audit_textrel;
53557 +int grsec_enable_group;
53558 +int grsec_audit_gid;
53559 +int grsec_enable_chdir;
53560 +int grsec_enable_mount;
53561 +int grsec_enable_rofs;
53562 +int grsec_enable_chroot_findtask;
53563 +int grsec_enable_chroot_mount;
53564 +int grsec_enable_chroot_shmat;
53565 +int grsec_enable_chroot_fchdir;
53566 +int grsec_enable_chroot_double;
53567 +int grsec_enable_chroot_pivot;
53568 +int grsec_enable_chroot_chdir;
53569 +int grsec_enable_chroot_chmod;
53570 +int grsec_enable_chroot_mknod;
53571 +int grsec_enable_chroot_nice;
53572 +int grsec_enable_chroot_execlog;
53573 +int grsec_enable_chroot_caps;
53574 +int grsec_enable_chroot_sysctl;
53575 +int grsec_enable_chroot_unix;
53576 +int grsec_enable_tpe;
53577 +int grsec_tpe_gid;
53578 +int grsec_enable_blackhole;
53579 +#ifdef CONFIG_IPV6_MODULE
53580 +EXPORT_SYMBOL(grsec_enable_blackhole);
53581 +#endif
53582 +int grsec_lastack_retries;
53583 +int grsec_enable_tpe_all;
53584 +int grsec_enable_tpe_invert;
53585 +int grsec_enable_socket_all;
53586 +int grsec_socket_all_gid;
53587 +int grsec_enable_socket_client;
53588 +int grsec_socket_client_gid;
53589 +int grsec_enable_socket_server;
53590 +int grsec_socket_server_gid;
53591 +int grsec_resource_logging;
53592 +int grsec_disable_privio;
53593 +int grsec_enable_log_rwxmaps;
53594 +int grsec_lock;
53595 +
53596 +DEFINE_SPINLOCK(grsec_alert_lock);
53597 +unsigned long grsec_alert_wtime = 0;
53598 +unsigned long grsec_alert_fyet = 0;
53599 +
53600 +DEFINE_SPINLOCK(grsec_audit_lock);
53601 +
53602 +DEFINE_RWLOCK(grsec_exec_file_lock);
53603 +
53604 +char *gr_shared_page[4];
53605 +
53606 +char *gr_alert_log_fmt;
53607 +char *gr_audit_log_fmt;
53608 +char *gr_alert_log_buf;
53609 +char *gr_audit_log_buf;
53610 +
53611 +extern struct gr_arg *gr_usermode;
53612 +extern unsigned char *gr_system_salt;
53613 +extern unsigned char *gr_system_sum;
53614 +
53615 +void __init
53616 +grsecurity_init(void)
53617 +{
53618 + int j;
53619 + /* create the per-cpu shared pages */
53620 +
53621 +#ifdef CONFIG_X86
53622 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
53623 +#endif
53624 +
53625 + for (j = 0; j < 4; j++) {
53626 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
53627 + if (gr_shared_page[j] == NULL) {
53628 + panic("Unable to allocate grsecurity shared page");
53629 + return;
53630 + }
53631 + }
53632 +
53633 + /* allocate log buffers */
53634 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
53635 + if (!gr_alert_log_fmt) {
53636 + panic("Unable to allocate grsecurity alert log format buffer");
53637 + return;
53638 + }
53639 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
53640 + if (!gr_audit_log_fmt) {
53641 + panic("Unable to allocate grsecurity audit log format buffer");
53642 + return;
53643 + }
53644 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
53645 + if (!gr_alert_log_buf) {
53646 + panic("Unable to allocate grsecurity alert log buffer");
53647 + return;
53648 + }
53649 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
53650 + if (!gr_audit_log_buf) {
53651 + panic("Unable to allocate grsecurity audit log buffer");
53652 + return;
53653 + }
53654 +
53655 + /* allocate memory for authentication structure */
53656 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
53657 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
53658 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
53659 +
53660 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
53661 + panic("Unable to allocate grsecurity authentication structure");
53662 + return;
53663 + }
53664 +
53665 +
53666 +#ifdef CONFIG_GRKERNSEC_IO
53667 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
53668 + grsec_disable_privio = 1;
53669 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
53670 + grsec_disable_privio = 1;
53671 +#else
53672 + grsec_disable_privio = 0;
53673 +#endif
53674 +#endif
53675 +
53676 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
53677 + /* for backward compatibility, tpe_invert always defaults to on if
53678 + enabled in the kernel
53679 + */
53680 + grsec_enable_tpe_invert = 1;
53681 +#endif
53682 +
53683 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
53684 +#ifndef CONFIG_GRKERNSEC_SYSCTL
53685 + grsec_lock = 1;
53686 +#endif
53687 +
53688 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
53689 + grsec_enable_audit_textrel = 1;
53690 +#endif
53691 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
53692 + grsec_enable_log_rwxmaps = 1;
53693 +#endif
53694 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
53695 + grsec_enable_group = 1;
53696 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
53697 +#endif
53698 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
53699 + grsec_enable_chdir = 1;
53700 +#endif
53701 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53702 + grsec_enable_harden_ptrace = 1;
53703 +#endif
53704 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
53705 + grsec_enable_mount = 1;
53706 +#endif
53707 +#ifdef CONFIG_GRKERNSEC_LINK
53708 + grsec_enable_link = 1;
53709 +#endif
53710 +#ifdef CONFIG_GRKERNSEC_BRUTE
53711 + grsec_enable_brute = 1;
53712 +#endif
53713 +#ifdef CONFIG_GRKERNSEC_DMESG
53714 + grsec_enable_dmesg = 1;
53715 +#endif
53716 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
53717 + grsec_enable_blackhole = 1;
53718 + grsec_lastack_retries = 4;
53719 +#endif
53720 +#ifdef CONFIG_GRKERNSEC_FIFO
53721 + grsec_enable_fifo = 1;
53722 +#endif
53723 +#ifdef CONFIG_GRKERNSEC_EXECLOG
53724 + grsec_enable_execlog = 1;
53725 +#endif
53726 +#ifdef CONFIG_GRKERNSEC_SIGNAL
53727 + grsec_enable_signal = 1;
53728 +#endif
53729 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
53730 + grsec_enable_forkfail = 1;
53731 +#endif
53732 +#ifdef CONFIG_GRKERNSEC_TIME
53733 + grsec_enable_time = 1;
53734 +#endif
53735 +#ifdef CONFIG_GRKERNSEC_RESLOG
53736 + grsec_resource_logging = 1;
53737 +#endif
53738 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53739 + grsec_enable_chroot_findtask = 1;
53740 +#endif
53741 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
53742 + grsec_enable_chroot_unix = 1;
53743 +#endif
53744 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
53745 + grsec_enable_chroot_mount = 1;
53746 +#endif
53747 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
53748 + grsec_enable_chroot_fchdir = 1;
53749 +#endif
53750 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
53751 + grsec_enable_chroot_shmat = 1;
53752 +#endif
53753 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
53754 + grsec_enable_audit_ptrace = 1;
53755 +#endif
53756 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
53757 + grsec_enable_chroot_double = 1;
53758 +#endif
53759 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
53760 + grsec_enable_chroot_pivot = 1;
53761 +#endif
53762 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
53763 + grsec_enable_chroot_chdir = 1;
53764 +#endif
53765 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
53766 + grsec_enable_chroot_chmod = 1;
53767 +#endif
53768 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
53769 + grsec_enable_chroot_mknod = 1;
53770 +#endif
53771 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53772 + grsec_enable_chroot_nice = 1;
53773 +#endif
53774 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
53775 + grsec_enable_chroot_execlog = 1;
53776 +#endif
53777 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53778 + grsec_enable_chroot_caps = 1;
53779 +#endif
53780 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
53781 + grsec_enable_chroot_sysctl = 1;
53782 +#endif
53783 +#ifdef CONFIG_GRKERNSEC_TPE
53784 + grsec_enable_tpe = 1;
53785 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
53786 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
53787 + grsec_enable_tpe_all = 1;
53788 +#endif
53789 +#endif
53790 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
53791 + grsec_enable_socket_all = 1;
53792 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
53793 +#endif
53794 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
53795 + grsec_enable_socket_client = 1;
53796 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
53797 +#endif
53798 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
53799 + grsec_enable_socket_server = 1;
53800 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
53801 +#endif
53802 +#endif
53803 +
53804 + return;
53805 +}
53806 diff -urNp linux-3.0.7/grsecurity/grsec_link.c linux-3.0.7/grsecurity/grsec_link.c
53807 --- linux-3.0.7/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
53808 +++ linux-3.0.7/grsecurity/grsec_link.c 2011-08-23 21:48:14.000000000 -0400
53809 @@ -0,0 +1,43 @@
53810 +#include <linux/kernel.h>
53811 +#include <linux/sched.h>
53812 +#include <linux/fs.h>
53813 +#include <linux/file.h>
53814 +#include <linux/grinternal.h>
53815 +
53816 +int
53817 +gr_handle_follow_link(const struct inode *parent,
53818 + const struct inode *inode,
53819 + const struct dentry *dentry, const struct vfsmount *mnt)
53820 +{
53821 +#ifdef CONFIG_GRKERNSEC_LINK
53822 + const struct cred *cred = current_cred();
53823 +
53824 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
53825 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
53826 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
53827 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
53828 + return -EACCES;
53829 + }
53830 +#endif
53831 + return 0;
53832 +}
53833 +
53834 +int
53835 +gr_handle_hardlink(const struct dentry *dentry,
53836 + const struct vfsmount *mnt,
53837 + struct inode *inode, const int mode, const char *to)
53838 +{
53839 +#ifdef CONFIG_GRKERNSEC_LINK
53840 + const struct cred *cred = current_cred();
53841 +
53842 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
53843 + (!S_ISREG(mode) || (mode & S_ISUID) ||
53844 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
53845 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
53846 + !capable(CAP_FOWNER) && cred->uid) {
53847 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
53848 + return -EPERM;
53849 + }
53850 +#endif
53851 + return 0;
53852 +}
53853 diff -urNp linux-3.0.7/grsecurity/grsec_log.c linux-3.0.7/grsecurity/grsec_log.c
53854 --- linux-3.0.7/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
53855 +++ linux-3.0.7/grsecurity/grsec_log.c 2011-09-26 10:46:21.000000000 -0400
53856 @@ -0,0 +1,315 @@
53857 +#include <linux/kernel.h>
53858 +#include <linux/sched.h>
53859 +#include <linux/file.h>
53860 +#include <linux/tty.h>
53861 +#include <linux/fs.h>
53862 +#include <linux/grinternal.h>
53863 +
53864 +#ifdef CONFIG_TREE_PREEMPT_RCU
53865 +#define DISABLE_PREEMPT() preempt_disable()
53866 +#define ENABLE_PREEMPT() preempt_enable()
53867 +#else
53868 +#define DISABLE_PREEMPT()
53869 +#define ENABLE_PREEMPT()
53870 +#endif
53871 +
53872 +#define BEGIN_LOCKS(x) \
53873 + DISABLE_PREEMPT(); \
53874 + rcu_read_lock(); \
53875 + read_lock(&tasklist_lock); \
53876 + read_lock(&grsec_exec_file_lock); \
53877 + if (x != GR_DO_AUDIT) \
53878 + spin_lock(&grsec_alert_lock); \
53879 + else \
53880 + spin_lock(&grsec_audit_lock)
53881 +
53882 +#define END_LOCKS(x) \
53883 + if (x != GR_DO_AUDIT) \
53884 + spin_unlock(&grsec_alert_lock); \
53885 + else \
53886 + spin_unlock(&grsec_audit_lock); \
53887 + read_unlock(&grsec_exec_file_lock); \
53888 + read_unlock(&tasklist_lock); \
53889 + rcu_read_unlock(); \
53890 + ENABLE_PREEMPT(); \
53891 + if (x == GR_DONT_AUDIT) \
53892 + gr_handle_alertkill(current)
53893 +
53894 +enum {
53895 + FLOODING,
53896 + NO_FLOODING
53897 +};
53898 +
53899 +extern char *gr_alert_log_fmt;
53900 +extern char *gr_audit_log_fmt;
53901 +extern char *gr_alert_log_buf;
53902 +extern char *gr_audit_log_buf;
53903 +
53904 +static int gr_log_start(int audit)
53905 +{
53906 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
53907 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
53908 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
53909 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
53910 + unsigned long curr_secs = get_seconds();
53911 +
53912 + if (audit == GR_DO_AUDIT)
53913 + goto set_fmt;
53914 +
53915 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
53916 + grsec_alert_wtime = curr_secs;
53917 + grsec_alert_fyet = 0;
53918 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
53919 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
53920 + grsec_alert_fyet++;
53921 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
53922 + grsec_alert_wtime = curr_secs;
53923 + grsec_alert_fyet++;
53924 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
53925 + return FLOODING;
53926 + }
53927 + else return FLOODING;
53928 +
53929 +set_fmt:
53930 +#endif
53931 + memset(buf, 0, PAGE_SIZE);
53932 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
53933 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
53934 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
53935 + } else if (current->signal->curr_ip) {
53936 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
53937 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
53938 + } else if (gr_acl_is_enabled()) {
53939 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
53940 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
53941 + } else {
53942 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
53943 + strcpy(buf, fmt);
53944 + }
53945 +
53946 + return NO_FLOODING;
53947 +}
53948 +
53949 +static void gr_log_middle(int audit, const char *msg, va_list ap)
53950 + __attribute__ ((format (printf, 2, 0)));
53951 +
53952 +static void gr_log_middle(int audit, const char *msg, va_list ap)
53953 +{
53954 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
53955 + unsigned int len = strlen(buf);
53956 +
53957 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
53958 +
53959 + return;
53960 +}
53961 +
53962 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
53963 + __attribute__ ((format (printf, 2, 3)));
53964 +
53965 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
53966 +{
53967 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
53968 + unsigned int len = strlen(buf);
53969 + va_list ap;
53970 +
53971 + va_start(ap, msg);
53972 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
53973 + va_end(ap);
53974 +
53975 + return;
53976 +}
53977 +
53978 +static void gr_log_end(int audit)
53979 +{
53980 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
53981 + unsigned int len = strlen(buf);
53982 +
53983 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
53984 + printk("%s\n", buf);
53985 +
53986 + return;
53987 +}
53988 +
53989 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
53990 +{
53991 + int logtype;
53992 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
53993 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
53994 + void *voidptr = NULL;
53995 + int num1 = 0, num2 = 0;
53996 + unsigned long ulong1 = 0, ulong2 = 0;
53997 + struct dentry *dentry = NULL;
53998 + struct vfsmount *mnt = NULL;
53999 + struct file *file = NULL;
54000 + struct task_struct *task = NULL;
54001 + const struct cred *cred, *pcred;
54002 + va_list ap;
54003 +
54004 + BEGIN_LOCKS(audit);
54005 + logtype = gr_log_start(audit);
54006 + if (logtype == FLOODING) {
54007 + END_LOCKS(audit);
54008 + return;
54009 + }
54010 + va_start(ap, argtypes);
54011 + switch (argtypes) {
54012 + case GR_TTYSNIFF:
54013 + task = va_arg(ap, struct task_struct *);
54014 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
54015 + break;
54016 + case GR_SYSCTL_HIDDEN:
54017 + str1 = va_arg(ap, char *);
54018 + gr_log_middle_varargs(audit, msg, result, str1);
54019 + break;
54020 + case GR_RBAC:
54021 + dentry = va_arg(ap, struct dentry *);
54022 + mnt = va_arg(ap, struct vfsmount *);
54023 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
54024 + break;
54025 + case GR_RBAC_STR:
54026 + dentry = va_arg(ap, struct dentry *);
54027 + mnt = va_arg(ap, struct vfsmount *);
54028 + str1 = va_arg(ap, char *);
54029 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
54030 + break;
54031 + case GR_STR_RBAC:
54032 + str1 = va_arg(ap, char *);
54033 + dentry = va_arg(ap, struct dentry *);
54034 + mnt = va_arg(ap, struct vfsmount *);
54035 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
54036 + break;
54037 + case GR_RBAC_MODE2:
54038 + dentry = va_arg(ap, struct dentry *);
54039 + mnt = va_arg(ap, struct vfsmount *);
54040 + str1 = va_arg(ap, char *);
54041 + str2 = va_arg(ap, char *);
54042 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
54043 + break;
54044 + case GR_RBAC_MODE3:
54045 + dentry = va_arg(ap, struct dentry *);
54046 + mnt = va_arg(ap, struct vfsmount *);
54047 + str1 = va_arg(ap, char *);
54048 + str2 = va_arg(ap, char *);
54049 + str3 = va_arg(ap, char *);
54050 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
54051 + break;
54052 + case GR_FILENAME:
54053 + dentry = va_arg(ap, struct dentry *);
54054 + mnt = va_arg(ap, struct vfsmount *);
54055 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
54056 + break;
54057 + case GR_STR_FILENAME:
54058 + str1 = va_arg(ap, char *);
54059 + dentry = va_arg(ap, struct dentry *);
54060 + mnt = va_arg(ap, struct vfsmount *);
54061 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
54062 + break;
54063 + case GR_FILENAME_STR:
54064 + dentry = va_arg(ap, struct dentry *);
54065 + mnt = va_arg(ap, struct vfsmount *);
54066 + str1 = va_arg(ap, char *);
54067 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
54068 + break;
54069 + case GR_FILENAME_TWO_INT:
54070 + dentry = va_arg(ap, struct dentry *);
54071 + mnt = va_arg(ap, struct vfsmount *);
54072 + num1 = va_arg(ap, int);
54073 + num2 = va_arg(ap, int);
54074 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
54075 + break;
54076 + case GR_FILENAME_TWO_INT_STR:
54077 + dentry = va_arg(ap, struct dentry *);
54078 + mnt = va_arg(ap, struct vfsmount *);
54079 + num1 = va_arg(ap, int);
54080 + num2 = va_arg(ap, int);
54081 + str1 = va_arg(ap, char *);
54082 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
54083 + break;
54084 + case GR_TEXTREL:
54085 + file = va_arg(ap, struct file *);
54086 + ulong1 = va_arg(ap, unsigned long);
54087 + ulong2 = va_arg(ap, unsigned long);
54088 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
54089 + break;
54090 + case GR_PTRACE:
54091 + task = va_arg(ap, struct task_struct *);
54092 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
54093 + break;
54094 + case GR_RESOURCE:
54095 + task = va_arg(ap, struct task_struct *);
54096 + cred = __task_cred(task);
54097 + pcred = __task_cred(task->real_parent);
54098 + ulong1 = va_arg(ap, unsigned long);
54099 + str1 = va_arg(ap, char *);
54100 + ulong2 = va_arg(ap, unsigned long);
54101 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54102 + break;
54103 + case GR_CAP:
54104 + task = va_arg(ap, struct task_struct *);
54105 + cred = __task_cred(task);
54106 + pcred = __task_cred(task->real_parent);
54107 + str1 = va_arg(ap, char *);
54108 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54109 + break;
54110 + case GR_SIG:
54111 + str1 = va_arg(ap, char *);
54112 + voidptr = va_arg(ap, void *);
54113 + gr_log_middle_varargs(audit, msg, str1, voidptr);
54114 + break;
54115 + case GR_SIG2:
54116 + task = va_arg(ap, struct task_struct *);
54117 + cred = __task_cred(task);
54118 + pcred = __task_cred(task->real_parent);
54119 + num1 = va_arg(ap, int);
54120 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54121 + break;
54122 + case GR_CRASH1:
54123 + task = va_arg(ap, struct task_struct *);
54124 + cred = __task_cred(task);
54125 + pcred = __task_cred(task->real_parent);
54126 + ulong1 = va_arg(ap, unsigned long);
54127 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
54128 + break;
54129 + case GR_CRASH2:
54130 + task = va_arg(ap, struct task_struct *);
54131 + cred = __task_cred(task);
54132 + pcred = __task_cred(task->real_parent);
54133 + ulong1 = va_arg(ap, unsigned long);
54134 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
54135 + break;
54136 + case GR_RWXMAP:
54137 + file = va_arg(ap, struct file *);
54138 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
54139 + break;
54140 + case GR_PSACCT:
54141 + {
54142 + unsigned int wday, cday;
54143 + __u8 whr, chr;
54144 + __u8 wmin, cmin;
54145 + __u8 wsec, csec;
54146 + char cur_tty[64] = { 0 };
54147 + char parent_tty[64] = { 0 };
54148 +
54149 + task = va_arg(ap, struct task_struct *);
54150 + wday = va_arg(ap, unsigned int);
54151 + cday = va_arg(ap, unsigned int);
54152 + whr = va_arg(ap, int);
54153 + chr = va_arg(ap, int);
54154 + wmin = va_arg(ap, int);
54155 + cmin = va_arg(ap, int);
54156 + wsec = va_arg(ap, int);
54157 + csec = va_arg(ap, int);
54158 + ulong1 = va_arg(ap, unsigned long);
54159 + cred = __task_cred(task);
54160 + pcred = __task_cred(task->real_parent);
54161 +
54162 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54163 + }
54164 + break;
54165 + default:
54166 + gr_log_middle(audit, msg, ap);
54167 + }
54168 + va_end(ap);
54169 + gr_log_end(audit);
54170 + END_LOCKS(audit);
54171 +}
54172 diff -urNp linux-3.0.7/grsecurity/grsec_mem.c linux-3.0.7/grsecurity/grsec_mem.c
54173 --- linux-3.0.7/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
54174 +++ linux-3.0.7/grsecurity/grsec_mem.c 2011-08-23 21:48:14.000000000 -0400
54175 @@ -0,0 +1,33 @@
54176 +#include <linux/kernel.h>
54177 +#include <linux/sched.h>
54178 +#include <linux/mm.h>
54179 +#include <linux/mman.h>
54180 +#include <linux/grinternal.h>
54181 +
54182 +void
54183 +gr_handle_ioperm(void)
54184 +{
54185 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
54186 + return;
54187 +}
54188 +
54189 +void
54190 +gr_handle_iopl(void)
54191 +{
54192 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
54193 + return;
54194 +}
54195 +
54196 +void
54197 +gr_handle_mem_readwrite(u64 from, u64 to)
54198 +{
54199 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
54200 + return;
54201 +}
54202 +
54203 +void
54204 +gr_handle_vm86(void)
54205 +{
54206 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
54207 + return;
54208 +}
54209 diff -urNp linux-3.0.7/grsecurity/grsec_mount.c linux-3.0.7/grsecurity/grsec_mount.c
54210 --- linux-3.0.7/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
54211 +++ linux-3.0.7/grsecurity/grsec_mount.c 2011-08-23 21:48:14.000000000 -0400
54212 @@ -0,0 +1,62 @@
54213 +#include <linux/kernel.h>
54214 +#include <linux/sched.h>
54215 +#include <linux/mount.h>
54216 +#include <linux/grsecurity.h>
54217 +#include <linux/grinternal.h>
54218 +
54219 +void
54220 +gr_log_remount(const char *devname, const int retval)
54221 +{
54222 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54223 + if (grsec_enable_mount && (retval >= 0))
54224 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
54225 +#endif
54226 + return;
54227 +}
54228 +
54229 +void
54230 +gr_log_unmount(const char *devname, const int retval)
54231 +{
54232 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54233 + if (grsec_enable_mount && (retval >= 0))
54234 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
54235 +#endif
54236 + return;
54237 +}
54238 +
54239 +void
54240 +gr_log_mount(const char *from, const char *to, const int retval)
54241 +{
54242 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54243 + if (grsec_enable_mount && (retval >= 0))
54244 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
54245 +#endif
54246 + return;
54247 +}
54248 +
54249 +int
54250 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
54251 +{
54252 +#ifdef CONFIG_GRKERNSEC_ROFS
54253 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
54254 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
54255 + return -EPERM;
54256 + } else
54257 + return 0;
54258 +#endif
54259 + return 0;
54260 +}
54261 +
54262 +int
54263 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
54264 +{
54265 +#ifdef CONFIG_GRKERNSEC_ROFS
54266 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
54267 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
54268 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
54269 + return -EPERM;
54270 + } else
54271 + return 0;
54272 +#endif
54273 + return 0;
54274 +}
54275 diff -urNp linux-3.0.7/grsecurity/grsec_pax.c linux-3.0.7/grsecurity/grsec_pax.c
54276 --- linux-3.0.7/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
54277 +++ linux-3.0.7/grsecurity/grsec_pax.c 2011-08-23 21:48:14.000000000 -0400
54278 @@ -0,0 +1,36 @@
54279 +#include <linux/kernel.h>
54280 +#include <linux/sched.h>
54281 +#include <linux/mm.h>
54282 +#include <linux/file.h>
54283 +#include <linux/grinternal.h>
54284 +#include <linux/grsecurity.h>
54285 +
54286 +void
54287 +gr_log_textrel(struct vm_area_struct * vma)
54288 +{
54289 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
54290 + if (grsec_enable_audit_textrel)
54291 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
54292 +#endif
54293 + return;
54294 +}
54295 +
54296 +void
54297 +gr_log_rwxmmap(struct file *file)
54298 +{
54299 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54300 + if (grsec_enable_log_rwxmaps)
54301 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
54302 +#endif
54303 + return;
54304 +}
54305 +
54306 +void
54307 +gr_log_rwxmprotect(struct file *file)
54308 +{
54309 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54310 + if (grsec_enable_log_rwxmaps)
54311 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
54312 +#endif
54313 + return;
54314 +}
54315 diff -urNp linux-3.0.7/grsecurity/grsec_ptrace.c linux-3.0.7/grsecurity/grsec_ptrace.c
54316 --- linux-3.0.7/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
54317 +++ linux-3.0.7/grsecurity/grsec_ptrace.c 2011-08-23 21:48:14.000000000 -0400
54318 @@ -0,0 +1,14 @@
54319 +#include <linux/kernel.h>
54320 +#include <linux/sched.h>
54321 +#include <linux/grinternal.h>
54322 +#include <linux/grsecurity.h>
54323 +
54324 +void
54325 +gr_audit_ptrace(struct task_struct *task)
54326 +{
54327 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
54328 + if (grsec_enable_audit_ptrace)
54329 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
54330 +#endif
54331 + return;
54332 +}
54333 diff -urNp linux-3.0.7/grsecurity/grsec_sig.c linux-3.0.7/grsecurity/grsec_sig.c
54334 --- linux-3.0.7/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
54335 +++ linux-3.0.7/grsecurity/grsec_sig.c 2011-08-23 21:48:14.000000000 -0400
54336 @@ -0,0 +1,206 @@
54337 +#include <linux/kernel.h>
54338 +#include <linux/sched.h>
54339 +#include <linux/delay.h>
54340 +#include <linux/grsecurity.h>
54341 +#include <linux/grinternal.h>
54342 +#include <linux/hardirq.h>
54343 +
54344 +char *signames[] = {
54345 + [SIGSEGV] = "Segmentation fault",
54346 + [SIGILL] = "Illegal instruction",
54347 + [SIGABRT] = "Abort",
54348 + [SIGBUS] = "Invalid alignment/Bus error"
54349 +};
54350 +
54351 +void
54352 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
54353 +{
54354 +#ifdef CONFIG_GRKERNSEC_SIGNAL
54355 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
54356 + (sig == SIGABRT) || (sig == SIGBUS))) {
54357 + if (t->pid == current->pid) {
54358 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
54359 + } else {
54360 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
54361 + }
54362 + }
54363 +#endif
54364 + return;
54365 +}
54366 +
54367 +int
54368 +gr_handle_signal(const struct task_struct *p, const int sig)
54369 +{
54370 +#ifdef CONFIG_GRKERNSEC
54371 + if (current->pid > 1 && gr_check_protected_task(p)) {
54372 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
54373 + return -EPERM;
54374 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
54375 + return -EPERM;
54376 + }
54377 +#endif
54378 + return 0;
54379 +}
54380 +
54381 +#ifdef CONFIG_GRKERNSEC
54382 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
54383 +
54384 +int gr_fake_force_sig(int sig, struct task_struct *t)
54385 +{
54386 + unsigned long int flags;
54387 + int ret, blocked, ignored;
54388 + struct k_sigaction *action;
54389 +
54390 + spin_lock_irqsave(&t->sighand->siglock, flags);
54391 + action = &t->sighand->action[sig-1];
54392 + ignored = action->sa.sa_handler == SIG_IGN;
54393 + blocked = sigismember(&t->blocked, sig);
54394 + if (blocked || ignored) {
54395 + action->sa.sa_handler = SIG_DFL;
54396 + if (blocked) {
54397 + sigdelset(&t->blocked, sig);
54398 + recalc_sigpending_and_wake(t);
54399 + }
54400 + }
54401 + if (action->sa.sa_handler == SIG_DFL)
54402 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
54403 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
54404 +
54405 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
54406 +
54407 + return ret;
54408 +}
54409 +#endif
54410 +
54411 +#ifdef CONFIG_GRKERNSEC_BRUTE
54412 +#define GR_USER_BAN_TIME (15 * 60)
54413 +
54414 +static int __get_dumpable(unsigned long mm_flags)
54415 +{
54416 + int ret;
54417 +
54418 + ret = mm_flags & MMF_DUMPABLE_MASK;
54419 + return (ret >= 2) ? 2 : ret;
54420 +}
54421 +#endif
54422 +
54423 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
54424 +{
54425 +#ifdef CONFIG_GRKERNSEC_BRUTE
54426 + uid_t uid = 0;
54427 +
54428 + if (!grsec_enable_brute)
54429 + return;
54430 +
54431 + rcu_read_lock();
54432 + read_lock(&tasklist_lock);
54433 + read_lock(&grsec_exec_file_lock);
54434 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
54435 + p->real_parent->brute = 1;
54436 + else {
54437 + const struct cred *cred = __task_cred(p), *cred2;
54438 + struct task_struct *tsk, *tsk2;
54439 +
54440 + if (!__get_dumpable(mm_flags) && cred->uid) {
54441 + struct user_struct *user;
54442 +
54443 + uid = cred->uid;
54444 +
54445 + /* this is put upon execution past expiration */
54446 + user = find_user(uid);
54447 + if (user == NULL)
54448 + goto unlock;
54449 + user->banned = 1;
54450 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
54451 + if (user->ban_expires == ~0UL)
54452 + user->ban_expires--;
54453 +
54454 + do_each_thread(tsk2, tsk) {
54455 + cred2 = __task_cred(tsk);
54456 + if (tsk != p && cred2->uid == uid)
54457 + gr_fake_force_sig(SIGKILL, tsk);
54458 + } while_each_thread(tsk2, tsk);
54459 + }
54460 + }
54461 +unlock:
54462 + read_unlock(&grsec_exec_file_lock);
54463 + read_unlock(&tasklist_lock);
54464 + rcu_read_unlock();
54465 +
54466 + if (uid)
54467 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
54468 +
54469 +#endif
54470 + return;
54471 +}
54472 +
54473 +void gr_handle_brute_check(void)
54474 +{
54475 +#ifdef CONFIG_GRKERNSEC_BRUTE
54476 + if (current->brute)
54477 + msleep(30 * 1000);
54478 +#endif
54479 + return;
54480 +}
54481 +
54482 +void gr_handle_kernel_exploit(void)
54483 +{
54484 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
54485 + const struct cred *cred;
54486 + struct task_struct *tsk, *tsk2;
54487 + struct user_struct *user;
54488 + uid_t uid;
54489 +
54490 + if (in_irq() || in_serving_softirq() || in_nmi())
54491 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
54492 +
54493 + uid = current_uid();
54494 +
54495 + if (uid == 0)
54496 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
54497 + else {
54498 + /* kill all the processes of this user, hold a reference
54499 + to their creds struct, and prevent them from creating
54500 + another process until system reset
54501 + */
54502 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
54503 + /* we intentionally leak this ref */
54504 + user = get_uid(current->cred->user);
54505 + if (user) {
54506 + user->banned = 1;
54507 + user->ban_expires = ~0UL;
54508 + }
54509 +
54510 + read_lock(&tasklist_lock);
54511 + do_each_thread(tsk2, tsk) {
54512 + cred = __task_cred(tsk);
54513 + if (cred->uid == uid)
54514 + gr_fake_force_sig(SIGKILL, tsk);
54515 + } while_each_thread(tsk2, tsk);
54516 + read_unlock(&tasklist_lock);
54517 + }
54518 +#endif
54519 +}
54520 +
54521 +int __gr_process_user_ban(struct user_struct *user)
54522 +{
54523 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54524 + if (unlikely(user->banned)) {
54525 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
54526 + user->banned = 0;
54527 + user->ban_expires = 0;
54528 + free_uid(user);
54529 + } else
54530 + return -EPERM;
54531 + }
54532 +#endif
54533 + return 0;
54534 +}
54535 +
54536 +int gr_process_user_ban(void)
54537 +{
54538 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54539 + return __gr_process_user_ban(current->cred->user);
54540 +#endif
54541 + return 0;
54542 +}
54543 diff -urNp linux-3.0.7/grsecurity/grsec_sock.c linux-3.0.7/grsecurity/grsec_sock.c
54544 --- linux-3.0.7/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
54545 +++ linux-3.0.7/grsecurity/grsec_sock.c 2011-08-23 21:48:14.000000000 -0400
54546 @@ -0,0 +1,244 @@
54547 +#include <linux/kernel.h>
54548 +#include <linux/module.h>
54549 +#include <linux/sched.h>
54550 +#include <linux/file.h>
54551 +#include <linux/net.h>
54552 +#include <linux/in.h>
54553 +#include <linux/ip.h>
54554 +#include <net/sock.h>
54555 +#include <net/inet_sock.h>
54556 +#include <linux/grsecurity.h>
54557 +#include <linux/grinternal.h>
54558 +#include <linux/gracl.h>
54559 +
54560 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
54561 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
54562 +
54563 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
54564 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
54565 +
54566 +#ifdef CONFIG_UNIX_MODULE
54567 +EXPORT_SYMBOL(gr_acl_handle_unix);
54568 +EXPORT_SYMBOL(gr_acl_handle_mknod);
54569 +EXPORT_SYMBOL(gr_handle_chroot_unix);
54570 +EXPORT_SYMBOL(gr_handle_create);
54571 +#endif
54572 +
54573 +#ifdef CONFIG_GRKERNSEC
54574 +#define gr_conn_table_size 32749
54575 +struct conn_table_entry {
54576 + struct conn_table_entry *next;
54577 + struct signal_struct *sig;
54578 +};
54579 +
54580 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
54581 +DEFINE_SPINLOCK(gr_conn_table_lock);
54582 +
54583 +extern const char * gr_socktype_to_name(unsigned char type);
54584 +extern const char * gr_proto_to_name(unsigned char proto);
54585 +extern const char * gr_sockfamily_to_name(unsigned char family);
54586 +
54587 +static __inline__ int
54588 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
54589 +{
54590 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
54591 +}
54592 +
54593 +static __inline__ int
54594 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
54595 + __u16 sport, __u16 dport)
54596 +{
54597 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
54598 + sig->gr_sport == sport && sig->gr_dport == dport))
54599 + return 1;
54600 + else
54601 + return 0;
54602 +}
54603 +
54604 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
54605 +{
54606 + struct conn_table_entry **match;
54607 + unsigned int index;
54608 +
54609 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
54610 + sig->gr_sport, sig->gr_dport,
54611 + gr_conn_table_size);
54612 +
54613 + newent->sig = sig;
54614 +
54615 + match = &gr_conn_table[index];
54616 + newent->next = *match;
54617 + *match = newent;
54618 +
54619 + return;
54620 +}
54621 +
54622 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
54623 +{
54624 + struct conn_table_entry *match, *last = NULL;
54625 + unsigned int index;
54626 +
54627 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
54628 + sig->gr_sport, sig->gr_dport,
54629 + gr_conn_table_size);
54630 +
54631 + match = gr_conn_table[index];
54632 + while (match && !conn_match(match->sig,
54633 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
54634 + sig->gr_dport)) {
54635 + last = match;
54636 + match = match->next;
54637 + }
54638 +
54639 + if (match) {
54640 + if (last)
54641 + last->next = match->next;
54642 + else
54643 + gr_conn_table[index] = NULL;
54644 + kfree(match);
54645 + }
54646 +
54647 + return;
54648 +}
54649 +
54650 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
54651 + __u16 sport, __u16 dport)
54652 +{
54653 + struct conn_table_entry *match;
54654 + unsigned int index;
54655 +
54656 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
54657 +
54658 + match = gr_conn_table[index];
54659 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
54660 + match = match->next;
54661 +
54662 + if (match)
54663 + return match->sig;
54664 + else
54665 + return NULL;
54666 +}
54667 +
54668 +#endif
54669 +
54670 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
54671 +{
54672 +#ifdef CONFIG_GRKERNSEC
54673 + struct signal_struct *sig = task->signal;
54674 + struct conn_table_entry *newent;
54675 +
54676 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
54677 + if (newent == NULL)
54678 + return;
54679 + /* no bh lock needed since we are called with bh disabled */
54680 + spin_lock(&gr_conn_table_lock);
54681 + gr_del_task_from_ip_table_nolock(sig);
54682 + sig->gr_saddr = inet->inet_rcv_saddr;
54683 + sig->gr_daddr = inet->inet_daddr;
54684 + sig->gr_sport = inet->inet_sport;
54685 + sig->gr_dport = inet->inet_dport;
54686 + gr_add_to_task_ip_table_nolock(sig, newent);
54687 + spin_unlock(&gr_conn_table_lock);
54688 +#endif
54689 + return;
54690 +}
54691 +
54692 +void gr_del_task_from_ip_table(struct task_struct *task)
54693 +{
54694 +#ifdef CONFIG_GRKERNSEC
54695 + spin_lock_bh(&gr_conn_table_lock);
54696 + gr_del_task_from_ip_table_nolock(task->signal);
54697 + spin_unlock_bh(&gr_conn_table_lock);
54698 +#endif
54699 + return;
54700 +}
54701 +
54702 +void
54703 +gr_attach_curr_ip(const struct sock *sk)
54704 +{
54705 +#ifdef CONFIG_GRKERNSEC
54706 + struct signal_struct *p, *set;
54707 + const struct inet_sock *inet = inet_sk(sk);
54708 +
54709 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
54710 + return;
54711 +
54712 + set = current->signal;
54713 +
54714 + spin_lock_bh(&gr_conn_table_lock);
54715 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
54716 + inet->inet_dport, inet->inet_sport);
54717 + if (unlikely(p != NULL)) {
54718 + set->curr_ip = p->curr_ip;
54719 + set->used_accept = 1;
54720 + gr_del_task_from_ip_table_nolock(p);
54721 + spin_unlock_bh(&gr_conn_table_lock);
54722 + return;
54723 + }
54724 + spin_unlock_bh(&gr_conn_table_lock);
54725 +
54726 + set->curr_ip = inet->inet_daddr;
54727 + set->used_accept = 1;
54728 +#endif
54729 + return;
54730 +}
54731 +
54732 +int
54733 +gr_handle_sock_all(const int family, const int type, const int protocol)
54734 +{
54735 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
54736 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
54737 + (family != AF_UNIX)) {
54738 + if (family == AF_INET)
54739 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
54740 + else
54741 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
54742 + return -EACCES;
54743 + }
54744 +#endif
54745 + return 0;
54746 +}
54747 +
54748 +int
54749 +gr_handle_sock_server(const struct sockaddr *sck)
54750 +{
54751 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54752 + if (grsec_enable_socket_server &&
54753 + in_group_p(grsec_socket_server_gid) &&
54754 + sck && (sck->sa_family != AF_UNIX) &&
54755 + (sck->sa_family != AF_LOCAL)) {
54756 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
54757 + return -EACCES;
54758 + }
54759 +#endif
54760 + return 0;
54761 +}
54762 +
54763 +int
54764 +gr_handle_sock_server_other(const struct sock *sck)
54765 +{
54766 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54767 + if (grsec_enable_socket_server &&
54768 + in_group_p(grsec_socket_server_gid) &&
54769 + sck && (sck->sk_family != AF_UNIX) &&
54770 + (sck->sk_family != AF_LOCAL)) {
54771 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
54772 + return -EACCES;
54773 + }
54774 +#endif
54775 + return 0;
54776 +}
54777 +
54778 +int
54779 +gr_handle_sock_client(const struct sockaddr *sck)
54780 +{
54781 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
54782 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
54783 + sck && (sck->sa_family != AF_UNIX) &&
54784 + (sck->sa_family != AF_LOCAL)) {
54785 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
54786 + return -EACCES;
54787 + }
54788 +#endif
54789 + return 0;
54790 +}
54791 diff -urNp linux-3.0.7/grsecurity/grsec_sysctl.c linux-3.0.7/grsecurity/grsec_sysctl.c
54792 --- linux-3.0.7/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
54793 +++ linux-3.0.7/grsecurity/grsec_sysctl.c 2011-08-25 17:26:15.000000000 -0400
54794 @@ -0,0 +1,433 @@
54795 +#include <linux/kernel.h>
54796 +#include <linux/sched.h>
54797 +#include <linux/sysctl.h>
54798 +#include <linux/grsecurity.h>
54799 +#include <linux/grinternal.h>
54800 +
54801 +int
54802 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
54803 +{
54804 +#ifdef CONFIG_GRKERNSEC_SYSCTL
54805 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
54806 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
54807 + return -EACCES;
54808 + }
54809 +#endif
54810 + return 0;
54811 +}
54812 +
54813 +#ifdef CONFIG_GRKERNSEC_ROFS
54814 +static int __maybe_unused one = 1;
54815 +#endif
54816 +
54817 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
54818 +struct ctl_table grsecurity_table[] = {
54819 +#ifdef CONFIG_GRKERNSEC_SYSCTL
54820 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
54821 +#ifdef CONFIG_GRKERNSEC_IO
54822 + {
54823 + .procname = "disable_priv_io",
54824 + .data = &grsec_disable_privio,
54825 + .maxlen = sizeof(int),
54826 + .mode = 0600,
54827 + .proc_handler = &proc_dointvec,
54828 + },
54829 +#endif
54830 +#endif
54831 +#ifdef CONFIG_GRKERNSEC_LINK
54832 + {
54833 + .procname = "linking_restrictions",
54834 + .data = &grsec_enable_link,
54835 + .maxlen = sizeof(int),
54836 + .mode = 0600,
54837 + .proc_handler = &proc_dointvec,
54838 + },
54839 +#endif
54840 +#ifdef CONFIG_GRKERNSEC_BRUTE
54841 + {
54842 + .procname = "deter_bruteforce",
54843 + .data = &grsec_enable_brute,
54844 + .maxlen = sizeof(int),
54845 + .mode = 0600,
54846 + .proc_handler = &proc_dointvec,
54847 + },
54848 +#endif
54849 +#ifdef CONFIG_GRKERNSEC_FIFO
54850 + {
54851 + .procname = "fifo_restrictions",
54852 + .data = &grsec_enable_fifo,
54853 + .maxlen = sizeof(int),
54854 + .mode = 0600,
54855 + .proc_handler = &proc_dointvec,
54856 + },
54857 +#endif
54858 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54859 + {
54860 + .procname = "ip_blackhole",
54861 + .data = &grsec_enable_blackhole,
54862 + .maxlen = sizeof(int),
54863 + .mode = 0600,
54864 + .proc_handler = &proc_dointvec,
54865 + },
54866 + {
54867 + .procname = "lastack_retries",
54868 + .data = &grsec_lastack_retries,
54869 + .maxlen = sizeof(int),
54870 + .mode = 0600,
54871 + .proc_handler = &proc_dointvec,
54872 + },
54873 +#endif
54874 +#ifdef CONFIG_GRKERNSEC_EXECLOG
54875 + {
54876 + .procname = "exec_logging",
54877 + .data = &grsec_enable_execlog,
54878 + .maxlen = sizeof(int),
54879 + .mode = 0600,
54880 + .proc_handler = &proc_dointvec,
54881 + },
54882 +#endif
54883 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54884 + {
54885 + .procname = "rwxmap_logging",
54886 + .data = &grsec_enable_log_rwxmaps,
54887 + .maxlen = sizeof(int),
54888 + .mode = 0600,
54889 + .proc_handler = &proc_dointvec,
54890 + },
54891 +#endif
54892 +#ifdef CONFIG_GRKERNSEC_SIGNAL
54893 + {
54894 + .procname = "signal_logging",
54895 + .data = &grsec_enable_signal,
54896 + .maxlen = sizeof(int),
54897 + .mode = 0600,
54898 + .proc_handler = &proc_dointvec,
54899 + },
54900 +#endif
54901 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
54902 + {
54903 + .procname = "forkfail_logging",
54904 + .data = &grsec_enable_forkfail,
54905 + .maxlen = sizeof(int),
54906 + .mode = 0600,
54907 + .proc_handler = &proc_dointvec,
54908 + },
54909 +#endif
54910 +#ifdef CONFIG_GRKERNSEC_TIME
54911 + {
54912 + .procname = "timechange_logging",
54913 + .data = &grsec_enable_time,
54914 + .maxlen = sizeof(int),
54915 + .mode = 0600,
54916 + .proc_handler = &proc_dointvec,
54917 + },
54918 +#endif
54919 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54920 + {
54921 + .procname = "chroot_deny_shmat",
54922 + .data = &grsec_enable_chroot_shmat,
54923 + .maxlen = sizeof(int),
54924 + .mode = 0600,
54925 + .proc_handler = &proc_dointvec,
54926 + },
54927 +#endif
54928 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
54929 + {
54930 + .procname = "chroot_deny_unix",
54931 + .data = &grsec_enable_chroot_unix,
54932 + .maxlen = sizeof(int),
54933 + .mode = 0600,
54934 + .proc_handler = &proc_dointvec,
54935 + },
54936 +#endif
54937 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54938 + {
54939 + .procname = "chroot_deny_mount",
54940 + .data = &grsec_enable_chroot_mount,
54941 + .maxlen = sizeof(int),
54942 + .mode = 0600,
54943 + .proc_handler = &proc_dointvec,
54944 + },
54945 +#endif
54946 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
54947 + {
54948 + .procname = "chroot_deny_fchdir",
54949 + .data = &grsec_enable_chroot_fchdir,
54950 + .maxlen = sizeof(int),
54951 + .mode = 0600,
54952 + .proc_handler = &proc_dointvec,
54953 + },
54954 +#endif
54955 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54956 + {
54957 + .procname = "chroot_deny_chroot",
54958 + .data = &grsec_enable_chroot_double,
54959 + .maxlen = sizeof(int),
54960 + .mode = 0600,
54961 + .proc_handler = &proc_dointvec,
54962 + },
54963 +#endif
54964 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54965 + {
54966 + .procname = "chroot_deny_pivot",
54967 + .data = &grsec_enable_chroot_pivot,
54968 + .maxlen = sizeof(int),
54969 + .mode = 0600,
54970 + .proc_handler = &proc_dointvec,
54971 + },
54972 +#endif
54973 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
54974 + {
54975 + .procname = "chroot_enforce_chdir",
54976 + .data = &grsec_enable_chroot_chdir,
54977 + .maxlen = sizeof(int),
54978 + .mode = 0600,
54979 + .proc_handler = &proc_dointvec,
54980 + },
54981 +#endif
54982 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
54983 + {
54984 + .procname = "chroot_deny_chmod",
54985 + .data = &grsec_enable_chroot_chmod,
54986 + .maxlen = sizeof(int),
54987 + .mode = 0600,
54988 + .proc_handler = &proc_dointvec,
54989 + },
54990 +#endif
54991 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
54992 + {
54993 + .procname = "chroot_deny_mknod",
54994 + .data = &grsec_enable_chroot_mknod,
54995 + .maxlen = sizeof(int),
54996 + .mode = 0600,
54997 + .proc_handler = &proc_dointvec,
54998 + },
54999 +#endif
55000 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55001 + {
55002 + .procname = "chroot_restrict_nice",
55003 + .data = &grsec_enable_chroot_nice,
55004 + .maxlen = sizeof(int),
55005 + .mode = 0600,
55006 + .proc_handler = &proc_dointvec,
55007 + },
55008 +#endif
55009 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55010 + {
55011 + .procname = "chroot_execlog",
55012 + .data = &grsec_enable_chroot_execlog,
55013 + .maxlen = sizeof(int),
55014 + .mode = 0600,
55015 + .proc_handler = &proc_dointvec,
55016 + },
55017 +#endif
55018 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55019 + {
55020 + .procname = "chroot_caps",
55021 + .data = &grsec_enable_chroot_caps,
55022 + .maxlen = sizeof(int),
55023 + .mode = 0600,
55024 + .proc_handler = &proc_dointvec,
55025 + },
55026 +#endif
55027 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55028 + {
55029 + .procname = "chroot_deny_sysctl",
55030 + .data = &grsec_enable_chroot_sysctl,
55031 + .maxlen = sizeof(int),
55032 + .mode = 0600,
55033 + .proc_handler = &proc_dointvec,
55034 + },
55035 +#endif
55036 +#ifdef CONFIG_GRKERNSEC_TPE
55037 + {
55038 + .procname = "tpe",
55039 + .data = &grsec_enable_tpe,
55040 + .maxlen = sizeof(int),
55041 + .mode = 0600,
55042 + .proc_handler = &proc_dointvec,
55043 + },
55044 + {
55045 + .procname = "tpe_gid",
55046 + .data = &grsec_tpe_gid,
55047 + .maxlen = sizeof(int),
55048 + .mode = 0600,
55049 + .proc_handler = &proc_dointvec,
55050 + },
55051 +#endif
55052 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55053 + {
55054 + .procname = "tpe_invert",
55055 + .data = &grsec_enable_tpe_invert,
55056 + .maxlen = sizeof(int),
55057 + .mode = 0600,
55058 + .proc_handler = &proc_dointvec,
55059 + },
55060 +#endif
55061 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
55062 + {
55063 + .procname = "tpe_restrict_all",
55064 + .data = &grsec_enable_tpe_all,
55065 + .maxlen = sizeof(int),
55066 + .mode = 0600,
55067 + .proc_handler = &proc_dointvec,
55068 + },
55069 +#endif
55070 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55071 + {
55072 + .procname = "socket_all",
55073 + .data = &grsec_enable_socket_all,
55074 + .maxlen = sizeof(int),
55075 + .mode = 0600,
55076 + .proc_handler = &proc_dointvec,
55077 + },
55078 + {
55079 + .procname = "socket_all_gid",
55080 + .data = &grsec_socket_all_gid,
55081 + .maxlen = sizeof(int),
55082 + .mode = 0600,
55083 + .proc_handler = &proc_dointvec,
55084 + },
55085 +#endif
55086 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55087 + {
55088 + .procname = "socket_client",
55089 + .data = &grsec_enable_socket_client,
55090 + .maxlen = sizeof(int),
55091 + .mode = 0600,
55092 + .proc_handler = &proc_dointvec,
55093 + },
55094 + {
55095 + .procname = "socket_client_gid",
55096 + .data = &grsec_socket_client_gid,
55097 + .maxlen = sizeof(int),
55098 + .mode = 0600,
55099 + .proc_handler = &proc_dointvec,
55100 + },
55101 +#endif
55102 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55103 + {
55104 + .procname = "socket_server",
55105 + .data = &grsec_enable_socket_server,
55106 + .maxlen = sizeof(int),
55107 + .mode = 0600,
55108 + .proc_handler = &proc_dointvec,
55109 + },
55110 + {
55111 + .procname = "socket_server_gid",
55112 + .data = &grsec_socket_server_gid,
55113 + .maxlen = sizeof(int),
55114 + .mode = 0600,
55115 + .proc_handler = &proc_dointvec,
55116 + },
55117 +#endif
55118 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55119 + {
55120 + .procname = "audit_group",
55121 + .data = &grsec_enable_group,
55122 + .maxlen = sizeof(int),
55123 + .mode = 0600,
55124 + .proc_handler = &proc_dointvec,
55125 + },
55126 + {
55127 + .procname = "audit_gid",
55128 + .data = &grsec_audit_gid,
55129 + .maxlen = sizeof(int),
55130 + .mode = 0600,
55131 + .proc_handler = &proc_dointvec,
55132 + },
55133 +#endif
55134 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55135 + {
55136 + .procname = "audit_chdir",
55137 + .data = &grsec_enable_chdir,
55138 + .maxlen = sizeof(int),
55139 + .mode = 0600,
55140 + .proc_handler = &proc_dointvec,
55141 + },
55142 +#endif
55143 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55144 + {
55145 + .procname = "audit_mount",
55146 + .data = &grsec_enable_mount,
55147 + .maxlen = sizeof(int),
55148 + .mode = 0600,
55149 + .proc_handler = &proc_dointvec,
55150 + },
55151 +#endif
55152 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55153 + {
55154 + .procname = "audit_textrel",
55155 + .data = &grsec_enable_audit_textrel,
55156 + .maxlen = sizeof(int),
55157 + .mode = 0600,
55158 + .proc_handler = &proc_dointvec,
55159 + },
55160 +#endif
55161 +#ifdef CONFIG_GRKERNSEC_DMESG
55162 + {
55163 + .procname = "dmesg",
55164 + .data = &grsec_enable_dmesg,
55165 + .maxlen = sizeof(int),
55166 + .mode = 0600,
55167 + .proc_handler = &proc_dointvec,
55168 + },
55169 +#endif
55170 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55171 + {
55172 + .procname = "chroot_findtask",
55173 + .data = &grsec_enable_chroot_findtask,
55174 + .maxlen = sizeof(int),
55175 + .mode = 0600,
55176 + .proc_handler = &proc_dointvec,
55177 + },
55178 +#endif
55179 +#ifdef CONFIG_GRKERNSEC_RESLOG
55180 + {
55181 + .procname = "resource_logging",
55182 + .data = &grsec_resource_logging,
55183 + .maxlen = sizeof(int),
55184 + .mode = 0600,
55185 + .proc_handler = &proc_dointvec,
55186 + },
55187 +#endif
55188 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55189 + {
55190 + .procname = "audit_ptrace",
55191 + .data = &grsec_enable_audit_ptrace,
55192 + .maxlen = sizeof(int),
55193 + .mode = 0600,
55194 + .proc_handler = &proc_dointvec,
55195 + },
55196 +#endif
55197 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55198 + {
55199 + .procname = "harden_ptrace",
55200 + .data = &grsec_enable_harden_ptrace,
55201 + .maxlen = sizeof(int),
55202 + .mode = 0600,
55203 + .proc_handler = &proc_dointvec,
55204 + },
55205 +#endif
55206 + {
55207 + .procname = "grsec_lock",
55208 + .data = &grsec_lock,
55209 + .maxlen = sizeof(int),
55210 + .mode = 0600,
55211 + .proc_handler = &proc_dointvec,
55212 + },
55213 +#endif
55214 +#ifdef CONFIG_GRKERNSEC_ROFS
55215 + {
55216 + .procname = "romount_protect",
55217 + .data = &grsec_enable_rofs,
55218 + .maxlen = sizeof(int),
55219 + .mode = 0600,
55220 + .proc_handler = &proc_dointvec_minmax,
55221 + .extra1 = &one,
55222 + .extra2 = &one,
55223 + },
55224 +#endif
55225 + { }
55226 +};
55227 +#endif
55228 diff -urNp linux-3.0.7/grsecurity/grsec_time.c linux-3.0.7/grsecurity/grsec_time.c
55229 --- linux-3.0.7/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
55230 +++ linux-3.0.7/grsecurity/grsec_time.c 2011-08-23 21:48:14.000000000 -0400
55231 @@ -0,0 +1,16 @@
55232 +#include <linux/kernel.h>
55233 +#include <linux/sched.h>
55234 +#include <linux/grinternal.h>
55235 +#include <linux/module.h>
55236 +
55237 +void
55238 +gr_log_timechange(void)
55239 +{
55240 +#ifdef CONFIG_GRKERNSEC_TIME
55241 + if (grsec_enable_time)
55242 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
55243 +#endif
55244 + return;
55245 +}
55246 +
55247 +EXPORT_SYMBOL(gr_log_timechange);
55248 diff -urNp linux-3.0.7/grsecurity/grsec_tpe.c linux-3.0.7/grsecurity/grsec_tpe.c
55249 --- linux-3.0.7/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
55250 +++ linux-3.0.7/grsecurity/grsec_tpe.c 2011-08-23 21:48:14.000000000 -0400
55251 @@ -0,0 +1,39 @@
55252 +#include <linux/kernel.h>
55253 +#include <linux/sched.h>
55254 +#include <linux/file.h>
55255 +#include <linux/fs.h>
55256 +#include <linux/grinternal.h>
55257 +
55258 +extern int gr_acl_tpe_check(void);
55259 +
55260 +int
55261 +gr_tpe_allow(const struct file *file)
55262 +{
55263 +#ifdef CONFIG_GRKERNSEC
55264 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
55265 + const struct cred *cred = current_cred();
55266 +
55267 + if (cred->uid && ((grsec_enable_tpe &&
55268 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55269 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
55270 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
55271 +#else
55272 + in_group_p(grsec_tpe_gid)
55273 +#endif
55274 + ) || gr_acl_tpe_check()) &&
55275 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
55276 + (inode->i_mode & S_IWOTH))))) {
55277 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55278 + return 0;
55279 + }
55280 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
55281 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
55282 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
55283 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
55284 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55285 + return 0;
55286 + }
55287 +#endif
55288 +#endif
55289 + return 1;
55290 +}
55291 diff -urNp linux-3.0.7/grsecurity/grsum.c linux-3.0.7/grsecurity/grsum.c
55292 --- linux-3.0.7/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
55293 +++ linux-3.0.7/grsecurity/grsum.c 2011-08-23 21:48:14.000000000 -0400
55294 @@ -0,0 +1,61 @@
55295 +#include <linux/err.h>
55296 +#include <linux/kernel.h>
55297 +#include <linux/sched.h>
55298 +#include <linux/mm.h>
55299 +#include <linux/scatterlist.h>
55300 +#include <linux/crypto.h>
55301 +#include <linux/gracl.h>
55302 +
55303 +
55304 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
55305 +#error "crypto and sha256 must be built into the kernel"
55306 +#endif
55307 +
55308 +int
55309 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
55310 +{
55311 + char *p;
55312 + struct crypto_hash *tfm;
55313 + struct hash_desc desc;
55314 + struct scatterlist sg;
55315 + unsigned char temp_sum[GR_SHA_LEN];
55316 + volatile int retval = 0;
55317 + volatile int dummy = 0;
55318 + unsigned int i;
55319 +
55320 + sg_init_table(&sg, 1);
55321 +
55322 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
55323 + if (IS_ERR(tfm)) {
55324 + /* should never happen, since sha256 should be built in */
55325 + return 1;
55326 + }
55327 +
55328 + desc.tfm = tfm;
55329 + desc.flags = 0;
55330 +
55331 + crypto_hash_init(&desc);
55332 +
55333 + p = salt;
55334 + sg_set_buf(&sg, p, GR_SALT_LEN);
55335 + crypto_hash_update(&desc, &sg, sg.length);
55336 +
55337 + p = entry->pw;
55338 + sg_set_buf(&sg, p, strlen(p));
55339 +
55340 + crypto_hash_update(&desc, &sg, sg.length);
55341 +
55342 + crypto_hash_final(&desc, temp_sum);
55343 +
55344 + memset(entry->pw, 0, GR_PW_LEN);
55345 +
55346 + for (i = 0; i < GR_SHA_LEN; i++)
55347 + if (sum[i] != temp_sum[i])
55348 + retval = 1;
55349 + else
55350 + dummy = 1; // waste a cycle
55351 +
55352 + crypto_free_hash(tfm);
55353 +
55354 + return retval;
55355 +}
55356 diff -urNp linux-3.0.7/grsecurity/Kconfig linux-3.0.7/grsecurity/Kconfig
55357 --- linux-3.0.7/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
55358 +++ linux-3.0.7/grsecurity/Kconfig 2011-09-15 00:00:57.000000000 -0400
55359 @@ -0,0 +1,1038 @@
55360 +#
55361 +# grecurity configuration
55362 +#
55363 +
55364 +menu "Grsecurity"
55365 +
55366 +config GRKERNSEC
55367 + bool "Grsecurity"
55368 + select CRYPTO
55369 + select CRYPTO_SHA256
55370 + help
55371 + If you say Y here, you will be able to configure many features
55372 + that will enhance the security of your system. It is highly
55373 + recommended that you say Y here and read through the help
55374 + for each option so that you fully understand the features and
55375 + can evaluate their usefulness for your machine.
55376 +
55377 +choice
55378 + prompt "Security Level"
55379 + depends on GRKERNSEC
55380 + default GRKERNSEC_CUSTOM
55381 +
55382 +config GRKERNSEC_LOW
55383 + bool "Low"
55384 + select GRKERNSEC_LINK
55385 + select GRKERNSEC_FIFO
55386 + select GRKERNSEC_RANDNET
55387 + select GRKERNSEC_DMESG
55388 + select GRKERNSEC_CHROOT
55389 + select GRKERNSEC_CHROOT_CHDIR
55390 +
55391 + help
55392 + If you choose this option, several of the grsecurity options will
55393 + be enabled that will give you greater protection against a number
55394 + of attacks, while assuring that none of your software will have any
55395 + conflicts with the additional security measures. If you run a lot
55396 + of unusual software, or you are having problems with the higher
55397 + security levels, you should say Y here. With this option, the
55398 + following features are enabled:
55399 +
55400 + - Linking restrictions
55401 + - FIFO restrictions
55402 + - Restricted dmesg
55403 + - Enforced chdir("/") on chroot
55404 + - Runtime module disabling
55405 +
55406 +config GRKERNSEC_MEDIUM
55407 + bool "Medium"
55408 + select PAX
55409 + select PAX_EI_PAX
55410 + select PAX_PT_PAX_FLAGS
55411 + select PAX_HAVE_ACL_FLAGS
55412 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55413 + select GRKERNSEC_CHROOT
55414 + select GRKERNSEC_CHROOT_SYSCTL
55415 + select GRKERNSEC_LINK
55416 + select GRKERNSEC_FIFO
55417 + select GRKERNSEC_DMESG
55418 + select GRKERNSEC_RANDNET
55419 + select GRKERNSEC_FORKFAIL
55420 + select GRKERNSEC_TIME
55421 + select GRKERNSEC_SIGNAL
55422 + select GRKERNSEC_CHROOT
55423 + select GRKERNSEC_CHROOT_UNIX
55424 + select GRKERNSEC_CHROOT_MOUNT
55425 + select GRKERNSEC_CHROOT_PIVOT
55426 + select GRKERNSEC_CHROOT_DOUBLE
55427 + select GRKERNSEC_CHROOT_CHDIR
55428 + select GRKERNSEC_CHROOT_MKNOD
55429 + select GRKERNSEC_PROC
55430 + select GRKERNSEC_PROC_USERGROUP
55431 + select PAX_RANDUSTACK
55432 + select PAX_ASLR
55433 + select PAX_RANDMMAP
55434 + select PAX_REFCOUNT if (X86 || SPARC64)
55435 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55436 +
55437 + help
55438 + If you say Y here, several features in addition to those included
55439 + in the low additional security level will be enabled. These
55440 + features provide even more security to your system, though in rare
55441 + cases they may be incompatible with very old or poorly written
55442 + software. If you enable this option, make sure that your auth
55443 + service (identd) is running as gid 1001. With this option,
55444 + the following features (in addition to those provided in the
55445 + low additional security level) will be enabled:
55446 +
55447 + - Failed fork logging
55448 + - Time change logging
55449 + - Signal logging
55450 + - Deny mounts in chroot
55451 + - Deny double chrooting
55452 + - Deny sysctl writes in chroot
55453 + - Deny mknod in chroot
55454 + - Deny access to abstract AF_UNIX sockets out of chroot
55455 + - Deny pivot_root in chroot
55456 + - Denied writes of /dev/kmem, /dev/mem, and /dev/port
55457 + - /proc restrictions with special GID set to 10 (usually wheel)
55458 + - Address Space Layout Randomization (ASLR)
55459 + - Prevent exploitation of most refcount overflows
55460 + - Bounds checking of copying between the kernel and userland
55461 +
55462 +config GRKERNSEC_HIGH
55463 + bool "High"
55464 + select GRKERNSEC_LINK
55465 + select GRKERNSEC_FIFO
55466 + select GRKERNSEC_DMESG
55467 + select GRKERNSEC_FORKFAIL
55468 + select GRKERNSEC_TIME
55469 + select GRKERNSEC_SIGNAL
55470 + select GRKERNSEC_CHROOT
55471 + select GRKERNSEC_CHROOT_SHMAT
55472 + select GRKERNSEC_CHROOT_UNIX
55473 + select GRKERNSEC_CHROOT_MOUNT
55474 + select GRKERNSEC_CHROOT_FCHDIR
55475 + select GRKERNSEC_CHROOT_PIVOT
55476 + select GRKERNSEC_CHROOT_DOUBLE
55477 + select GRKERNSEC_CHROOT_CHDIR
55478 + select GRKERNSEC_CHROOT_MKNOD
55479 + select GRKERNSEC_CHROOT_CAPS
55480 + select GRKERNSEC_CHROOT_SYSCTL
55481 + select GRKERNSEC_CHROOT_FINDTASK
55482 + select GRKERNSEC_SYSFS_RESTRICT
55483 + select GRKERNSEC_PROC
55484 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55485 + select GRKERNSEC_HIDESYM
55486 + select GRKERNSEC_BRUTE
55487 + select GRKERNSEC_PROC_USERGROUP
55488 + select GRKERNSEC_KMEM
55489 + select GRKERNSEC_RESLOG
55490 + select GRKERNSEC_RANDNET
55491 + select GRKERNSEC_PROC_ADD
55492 + select GRKERNSEC_CHROOT_CHMOD
55493 + select GRKERNSEC_CHROOT_NICE
55494 + select GRKERNSEC_AUDIT_MOUNT
55495 + select GRKERNSEC_MODHARDEN if (MODULES)
55496 + select GRKERNSEC_HARDEN_PTRACE
55497 + select GRKERNSEC_VM86 if (X86_32)
55498 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
55499 + select PAX
55500 + select PAX_RANDUSTACK
55501 + select PAX_ASLR
55502 + select PAX_RANDMMAP
55503 + select PAX_NOEXEC
55504 + select PAX_MPROTECT
55505 + select PAX_EI_PAX
55506 + select PAX_PT_PAX_FLAGS
55507 + select PAX_HAVE_ACL_FLAGS
55508 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
55509 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
55510 + select PAX_RANDKSTACK if (X86_TSC && X86)
55511 + select PAX_SEGMEXEC if (X86_32)
55512 + select PAX_PAGEEXEC
55513 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
55514 + select PAX_EMUTRAMP if (PARISC)
55515 + select PAX_EMUSIGRT if (PARISC)
55516 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
55517 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
55518 + select PAX_REFCOUNT if (X86 || SPARC64)
55519 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
55520 + help
55521 + If you say Y here, many of the features of grsecurity will be
55522 + enabled, which will protect you against many kinds of attacks
55523 + against your system. The heightened security comes at a cost
55524 + of an increased chance of incompatibilities with rare software
55525 + on your machine. Since this security level enables PaX, you should
55526 + view <http://pax.grsecurity.net> and read about the PaX
55527 + project. While you are there, download chpax and run it on
55528 + binaries that cause problems with PaX. Also remember that
55529 + since the /proc restrictions are enabled, you must run your
55530 + identd as gid 1001. This security level enables the following
55531 + features in addition to those listed in the low and medium
55532 + security levels:
55533 +
55534 + - Additional /proc restrictions
55535 + - Chmod restrictions in chroot
55536 + - No signals, ptrace, or viewing of processes outside of chroot
55537 + - Capability restrictions in chroot
55538 + - Deny fchdir out of chroot
55539 + - Priority restrictions in chroot
55540 + - Segmentation-based implementation of PaX
55541 + - Mprotect restrictions
55542 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
55543 + - Kernel stack randomization
55544 + - Mount/unmount/remount logging
55545 + - Kernel symbol hiding
55546 + - Prevention of memory exhaustion-based exploits
55547 + - Hardening of module auto-loading
55548 + - Ptrace restrictions
55549 + - Restricted vm86 mode
55550 + - Restricted sysfs/debugfs
55551 + - Active kernel exploit response
55552 +
55553 +config GRKERNSEC_CUSTOM
55554 + bool "Custom"
55555 + help
55556 + If you say Y here, you will be able to configure every grsecurity
55557 + option, which allows you to enable many more features that aren't
55558 + covered in the basic security levels. These additional features
55559 + include TPE, socket restrictions, and the sysctl system for
55560 + grsecurity. It is advised that you read through the help for
55561 + each option to determine its usefulness in your situation.
55562 +
55563 +endchoice
55564 +
55565 +menu "Address Space Protection"
55566 +depends on GRKERNSEC
55567 +
55568 +config GRKERNSEC_KMEM
55569 + bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
55570 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
55571 + help
55572 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
55573 + be written to via mmap or otherwise to modify the running kernel.
55574 + /dev/port will also not be allowed to be opened. If you have module
55575 + support disabled, enabling this will close up four ways that are
55576 + currently used to insert malicious code into the running kernel.
55577 + Even with all these features enabled, we still highly recommend that
55578 + you use the RBAC system, as it is still possible for an attacker to
55579 + modify the running kernel through privileged I/O granted by ioperm/iopl.
55580 + If you are not using XFree86, you may be able to stop this additional
55581 + case by enabling the 'Disable privileged I/O' option. Though nothing
55582 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
55583 + but only to video memory, which is the only writing we allow in this
55584 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
55585 + not be allowed to mprotect it with PROT_WRITE later.
55586 + It is highly recommended that you say Y here if you meet all the
55587 + conditions above.
55588 +
55589 +config GRKERNSEC_VM86
55590 + bool "Restrict VM86 mode"
55591 + depends on X86_32
55592 +
55593 + help
55594 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
55595 + make use of a special execution mode on 32bit x86 processors called
55596 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
55597 + video cards and will still work with this option enabled. The purpose
55598 + of the option is to prevent exploitation of emulation errors in
55599 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
55600 + Nearly all users should be able to enable this option.
55601 +
55602 +config GRKERNSEC_IO
55603 + bool "Disable privileged I/O"
55604 + depends on X86
55605 + select RTC_CLASS
55606 + select RTC_INTF_DEV
55607 + select RTC_DRV_CMOS
55608 +
55609 + help
55610 + If you say Y here, all ioperm and iopl calls will return an error.
55611 + Ioperm and iopl can be used to modify the running kernel.
55612 + Unfortunately, some programs need this access to operate properly,
55613 + the most notable of which are XFree86 and hwclock. hwclock can be
55614 + remedied by having RTC support in the kernel, so real-time
55615 + clock support is enabled if this option is enabled, to ensure
55616 + that hwclock operates correctly. XFree86 still will not
55617 + operate correctly with this option enabled, so DO NOT CHOOSE Y
55618 + IF YOU USE XFree86. If you use XFree86 and you still want to
55619 + protect your kernel against modification, use the RBAC system.
55620 +
55621 +config GRKERNSEC_PROC_MEMMAP
55622 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
55623 + default y if (PAX_NOEXEC || PAX_ASLR)
55624 + depends on PAX_NOEXEC || PAX_ASLR
55625 + help
55626 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
55627 + give no information about the addresses of its mappings if
55628 + PaX features that rely on random addresses are enabled on the task.
55629 + If you use PaX it is greatly recommended that you say Y here as it
55630 + closes up a hole that makes the full ASLR useless for suid
55631 + binaries.
55632 +
55633 +config GRKERNSEC_BRUTE
55634 + bool "Deter exploit bruteforcing"
55635 + help
55636 + If you say Y here, attempts to bruteforce exploits against forking
55637 + daemons such as apache or sshd, as well as against suid/sgid binaries
55638 + will be deterred. When a child of a forking daemon is killed by PaX
55639 + or crashes due to an illegal instruction or other suspicious signal,
55640 + the parent process will be delayed 30 seconds upon every subsequent
55641 + fork until the administrator is able to assess the situation and
55642 + restart the daemon.
55643 + In the suid/sgid case, the attempt is logged, the user has all their
55644 + processes terminated, and they are prevented from executing any further
55645 + processes for 15 minutes.
55646 + It is recommended that you also enable signal logging in the auditing
55647 + section so that logs are generated when a process triggers a suspicious
55648 + signal.
55649 + If the sysctl option is enabled, a sysctl option with name
55650 + "deter_bruteforce" is created.
55651 +
55652 +
55653 +config GRKERNSEC_MODHARDEN
55654 + bool "Harden module auto-loading"
55655 + depends on MODULES
55656 + help
55657 + If you say Y here, module auto-loading in response to use of some
55658 + feature implemented by an unloaded module will be restricted to
55659 + root users. Enabling this option helps defend against attacks
55660 + by unprivileged users who abuse the auto-loading behavior to
55661 + cause a vulnerable module to load that is then exploited.
55662 +
55663 + If this option prevents a legitimate use of auto-loading for a
55664 + non-root user, the administrator can execute modprobe manually
55665 + with the exact name of the module mentioned in the alert log.
55666 + Alternatively, the administrator can add the module to the list
55667 + of modules loaded at boot by modifying init scripts.
55668 +
55669 + Modification of init scripts will most likely be needed on
55670 + Ubuntu servers with encrypted home directory support enabled,
55671 + as the first non-root user logging in will cause the ecb(aes),
55672 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
55673 +
55674 +config GRKERNSEC_HIDESYM
55675 + bool "Hide kernel symbols"
55676 + help
55677 + If you say Y here, getting information on loaded modules, and
55678 + displaying all kernel symbols through a syscall will be restricted
55679 + to users with CAP_SYS_MODULE. For software compatibility reasons,
55680 + /proc/kallsyms will be restricted to the root user. The RBAC
55681 + system can hide that entry even from root.
55682 +
55683 + This option also prevents leaking of kernel addresses through
55684 + several /proc entries.
55685 +
55686 + Note that this option is only effective provided the following
55687 + conditions are met:
55688 + 1) The kernel using grsecurity is not precompiled by some distribution
55689 + 2) You have also enabled GRKERNSEC_DMESG
55690 + 3) You are using the RBAC system and hiding other files such as your
55691 + kernel image and System.map. Alternatively, enabling this option
55692 + causes the permissions on /boot, /lib/modules, and the kernel
55693 + source directory to change at compile time to prevent
55694 + reading by non-root users.
55695 + If the above conditions are met, this option will aid in providing a
55696 + useful protection against local kernel exploitation of overflows
55697 + and arbitrary read/write vulnerabilities.
55698 +
55699 +config GRKERNSEC_KERN_LOCKOUT
55700 + bool "Active kernel exploit response"
55701 + depends on X86 || ARM || PPC || SPARC
55702 + help
55703 + If you say Y here, when a PaX alert is triggered due to suspicious
55704 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
55705 + or an OOPs occurs due to bad memory accesses, instead of just
55706 + terminating the offending process (and potentially allowing
55707 + a subsequent exploit from the same user), we will take one of two
55708 + actions:
55709 + If the user was root, we will panic the system
55710 + If the user was non-root, we will log the attempt, terminate
55711 + all processes owned by the user, then prevent them from creating
55712 + any new processes until the system is restarted
55713 + This deters repeated kernel exploitation/bruteforcing attempts
55714 + and is useful for later forensics.
55715 +
55716 +endmenu
55717 +menu "Role Based Access Control Options"
55718 +depends on GRKERNSEC
55719 +
55720 +config GRKERNSEC_RBAC_DEBUG
55721 + bool
55722 +
55723 +config GRKERNSEC_NO_RBAC
55724 + bool "Disable RBAC system"
55725 + help
55726 + If you say Y here, the /dev/grsec device will be removed from the kernel,
55727 + preventing the RBAC system from being enabled. You should only say Y
55728 + here if you have no intention of using the RBAC system, so as to prevent
55729 + an attacker with root access from misusing the RBAC system to hide files
55730 + and processes when loadable module support and /dev/[k]mem have been
55731 + locked down.
55732 +
55733 +config GRKERNSEC_ACL_HIDEKERN
55734 + bool "Hide kernel processes"
55735 + help
55736 + If you say Y here, all kernel threads will be hidden to all
55737 + processes but those whose subject has the "view hidden processes"
55738 + flag.
55739 +
55740 +config GRKERNSEC_ACL_MAXTRIES
55741 + int "Maximum tries before password lockout"
55742 + default 3
55743 + help
55744 + This option enforces the maximum number of times a user can attempt
55745 + to authorize themselves with the grsecurity RBAC system before being
55746 + denied the ability to attempt authorization again for a specified time.
55747 + The lower the number, the harder it will be to brute-force a password.
55748 +
55749 +config GRKERNSEC_ACL_TIMEOUT
55750 + int "Time to wait after max password tries, in seconds"
55751 + default 30
55752 + help
55753 + This option specifies the time the user must wait after attempting to
55754 + authorize to the RBAC system with the maximum number of invalid
55755 + passwords. The higher the number, the harder it will be to brute-force
55756 + a password.
55757 +
55758 +endmenu
55759 +menu "Filesystem Protections"
55760 +depends on GRKERNSEC
55761 +
55762 +config GRKERNSEC_PROC
55763 + bool "Proc restrictions"
55764 + help
55765 + If you say Y here, the permissions of the /proc filesystem
55766 + will be altered to enhance system security and privacy. You MUST
55767 + choose either a user only restriction or a user and group restriction.
55768 + Depending upon the option you choose, you can either restrict users to
55769 + see only the processes they themselves run, or choose a group that can
55770 + view all processes and files normally restricted to root if you choose
55771 + the "restrict to user only" option. NOTE: If you're running identd as
55772 + a non-root user, you will have to run it as the group you specify here.
55773 +
55774 +config GRKERNSEC_PROC_USER
55775 + bool "Restrict /proc to user only"
55776 + depends on GRKERNSEC_PROC
55777 + help
55778 + If you say Y here, non-root users will only be able to view their own
55779 + processes, and restricts them from viewing network-related information,
55780 + and viewing kernel symbol and module information.
55781 +
55782 +config GRKERNSEC_PROC_USERGROUP
55783 + bool "Allow special group"
55784 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
55785 + help
55786 + If you say Y here, you will be able to select a group that will be
55787 + able to view all processes and network-related information. If you've
55788 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
55789 + remain hidden. This option is useful if you want to run identd as
55790 + a non-root user.
55791 +
55792 +config GRKERNSEC_PROC_GID
55793 + int "GID for special group"
55794 + depends on GRKERNSEC_PROC_USERGROUP
55795 + default 1001
55796 +
55797 +config GRKERNSEC_PROC_ADD
55798 + bool "Additional restrictions"
55799 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
55800 + help
55801 + If you say Y here, additional restrictions will be placed on
55802 + /proc that keep normal users from viewing device information and
55803 + slabinfo information that could be useful for exploits.
55804 +
55805 +config GRKERNSEC_LINK
55806 + bool "Linking restrictions"
55807 + help
55808 + If you say Y here, /tmp race exploits will be prevented, since users
55809 + will no longer be able to follow symlinks owned by other users in
55810 + world-writable +t directories (e.g. /tmp), unless the owner of the
55811 + symlink is the owner of the directory. users will also not be
55812 + able to hardlink to files they do not own. If the sysctl option is
55813 + enabled, a sysctl option with name "linking_restrictions" is created.
55814 +
55815 +config GRKERNSEC_FIFO
55816 + bool "FIFO restrictions"
55817 + help
55818 + If you say Y here, users will not be able to write to FIFOs they don't
55819 + own in world-writable +t directories (e.g. /tmp), unless the owner of
55820 + the FIFO is the same owner of the directory it's held in. If the sysctl
55821 + option is enabled, a sysctl option with name "fifo_restrictions" is
55822 + created.
55823 +
55824 +config GRKERNSEC_SYSFS_RESTRICT
55825 + bool "Sysfs/debugfs restriction"
55826 + depends on SYSFS
55827 + help
55828 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
55829 + any filesystem normally mounted under it (e.g. debugfs) will only
55830 + be accessible by root. These filesystems generally provide access
55831 + to hardware and debug information that isn't appropriate for unprivileged
55832 + users of the system. Sysfs and debugfs have also become a large source
55833 + of new vulnerabilities, ranging from infoleaks to local compromise.
55834 + There has been very little oversight with an eye toward security involved
55835 + in adding new exporters of information to these filesystems, so their
55836 + use is discouraged.
55837 + This option is equivalent to a chmod 0700 of the mount paths.
55838 +
55839 +config GRKERNSEC_ROFS
55840 + bool "Runtime read-only mount protection"
55841 + help
55842 + If you say Y here, a sysctl option with name "romount_protect" will
55843 + be created. By setting this option to 1 at runtime, filesystems
55844 + will be protected in the following ways:
55845 + * No new writable mounts will be allowed
55846 + * Existing read-only mounts won't be able to be remounted read/write
55847 + * Write operations will be denied on all block devices
55848 + This option acts independently of grsec_lock: once it is set to 1,
55849 + it cannot be turned off. Therefore, please be mindful of the resulting
55850 + behavior if this option is enabled in an init script on a read-only
55851 + filesystem. This feature is mainly intended for secure embedded systems.
55852 +
55853 +config GRKERNSEC_CHROOT
55854 + bool "Chroot jail restrictions"
55855 + help
55856 + If you say Y here, you will be able to choose several options that will
55857 + make breaking out of a chrooted jail much more difficult. If you
55858 + encounter no software incompatibilities with the following options, it
55859 + is recommended that you enable each one.
55860 +
55861 +config GRKERNSEC_CHROOT_MOUNT
55862 + bool "Deny mounts"
55863 + depends on GRKERNSEC_CHROOT
55864 + help
55865 + If you say Y here, processes inside a chroot will not be able to
55866 + mount or remount filesystems. If the sysctl option is enabled, a
55867 + sysctl option with name "chroot_deny_mount" is created.
55868 +
55869 +config GRKERNSEC_CHROOT_DOUBLE
55870 + bool "Deny double-chroots"
55871 + depends on GRKERNSEC_CHROOT
55872 + help
55873 + If you say Y here, processes inside a chroot will not be able to chroot
55874 + again outside the chroot. This is a widely used method of breaking
55875 + out of a chroot jail and should not be allowed. If the sysctl
55876 + option is enabled, a sysctl option with name
55877 + "chroot_deny_chroot" is created.
55878 +
55879 +config GRKERNSEC_CHROOT_PIVOT
55880 + bool "Deny pivot_root in chroot"
55881 + depends on GRKERNSEC_CHROOT
55882 + help
55883 + If you say Y here, processes inside a chroot will not be able to use
55884 + a function called pivot_root() that was introduced in Linux 2.3.41. It
55885 + works similar to chroot in that it changes the root filesystem. This
55886 + function could be misused in a chrooted process to attempt to break out
55887 + of the chroot, and therefore should not be allowed. If the sysctl
55888 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
55889 + created.
55890 +
55891 +config GRKERNSEC_CHROOT_CHDIR
55892 + bool "Enforce chdir(\"/\") on all chroots"
55893 + depends on GRKERNSEC_CHROOT
55894 + help
55895 + If you say Y here, the current working directory of all newly-chrooted
55896 + applications will be set to the the root directory of the chroot.
55897 + The man page on chroot(2) states:
55898 + Note that this call does not change the current working
55899 + directory, so that `.' can be outside the tree rooted at
55900 + `/'. In particular, the super-user can escape from a
55901 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
55902 +
55903 + It is recommended that you say Y here, since it's not known to break
55904 + any software. If the sysctl option is enabled, a sysctl option with
55905 + name "chroot_enforce_chdir" is created.
55906 +
55907 +config GRKERNSEC_CHROOT_CHMOD
55908 + bool "Deny (f)chmod +s"
55909 + depends on GRKERNSEC_CHROOT
55910 + help
55911 + If you say Y here, processes inside a chroot will not be able to chmod
55912 + or fchmod files to make them have suid or sgid bits. This protects
55913 + against another published method of breaking a chroot. If the sysctl
55914 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
55915 + created.
55916 +
55917 +config GRKERNSEC_CHROOT_FCHDIR
55918 + bool "Deny fchdir out of chroot"
55919 + depends on GRKERNSEC_CHROOT
55920 + help
55921 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
55922 + to a file descriptor of the chrooting process that points to a directory
55923 + outside the filesystem will be stopped. If the sysctl option
55924 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
55925 +
55926 +config GRKERNSEC_CHROOT_MKNOD
55927 + bool "Deny mknod"
55928 + depends on GRKERNSEC_CHROOT
55929 + help
55930 + If you say Y here, processes inside a chroot will not be allowed to
55931 + mknod. The problem with using mknod inside a chroot is that it
55932 + would allow an attacker to create a device entry that is the same
55933 + as one on the physical root of your system, which could range from
55934 + anything from the console device to a device for your harddrive (which
55935 + they could then use to wipe the drive or steal data). It is recommended
55936 + that you say Y here, unless you run into software incompatibilities.
55937 + If the sysctl option is enabled, a sysctl option with name
55938 + "chroot_deny_mknod" is created.
55939 +
55940 +config GRKERNSEC_CHROOT_SHMAT
55941 + bool "Deny shmat() out of chroot"
55942 + depends on GRKERNSEC_CHROOT
55943 + help
55944 + If you say Y here, processes inside a chroot will not be able to attach
55945 + to shared memory segments that were created outside of the chroot jail.
55946 + It is recommended that you say Y here. If the sysctl option is enabled,
55947 + a sysctl option with name "chroot_deny_shmat" is created.
55948 +
55949 +config GRKERNSEC_CHROOT_UNIX
55950 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
55951 + depends on GRKERNSEC_CHROOT
55952 + help
55953 + If you say Y here, processes inside a chroot will not be able to
55954 + connect to abstract (meaning not belonging to a filesystem) Unix
55955 + domain sockets that were bound outside of a chroot. It is recommended
55956 + that you say Y here. If the sysctl option is enabled, a sysctl option
55957 + with name "chroot_deny_unix" is created.
55958 +
55959 +config GRKERNSEC_CHROOT_FINDTASK
55960 + bool "Protect outside processes"
55961 + depends on GRKERNSEC_CHROOT
55962 + help
55963 + If you say Y here, processes inside a chroot will not be able to
55964 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
55965 + getsid, or view any process outside of the chroot. If the sysctl
55966 + option is enabled, a sysctl option with name "chroot_findtask" is
55967 + created.
55968 +
55969 +config GRKERNSEC_CHROOT_NICE
55970 + bool "Restrict priority changes"
55971 + depends on GRKERNSEC_CHROOT
55972 + help
55973 + If you say Y here, processes inside a chroot will not be able to raise
55974 + the priority of processes in the chroot, or alter the priority of
55975 + processes outside the chroot. This provides more security than simply
55976 + removing CAP_SYS_NICE from the process' capability set. If the
55977 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
55978 + is created.
55979 +
55980 +config GRKERNSEC_CHROOT_SYSCTL
55981 + bool "Deny sysctl writes"
55982 + depends on GRKERNSEC_CHROOT
55983 + help
55984 + If you say Y here, an attacker in a chroot will not be able to
55985 + write to sysctl entries, either by sysctl(2) or through a /proc
55986 + interface. It is strongly recommended that you say Y here. If the
55987 + sysctl option is enabled, a sysctl option with name
55988 + "chroot_deny_sysctl" is created.
55989 +
55990 +config GRKERNSEC_CHROOT_CAPS
55991 + bool "Capability restrictions"
55992 + depends on GRKERNSEC_CHROOT
55993 + help
55994 + If you say Y here, the capabilities on all processes within a
55995 + chroot jail will be lowered to stop module insertion, raw i/o,
55996 + system and net admin tasks, rebooting the system, modifying immutable
55997 + files, modifying IPC owned by another, and changing the system time.
55998 + This is left an option because it can break some apps. Disable this
55999 + if your chrooted apps are having problems performing those kinds of
56000 + tasks. If the sysctl option is enabled, a sysctl option with
56001 + name "chroot_caps" is created.
56002 +
56003 +endmenu
56004 +menu "Kernel Auditing"
56005 +depends on GRKERNSEC
56006 +
56007 +config GRKERNSEC_AUDIT_GROUP
56008 + bool "Single group for auditing"
56009 + help
56010 + If you say Y here, the exec, chdir, and (un)mount logging features
56011 + will only operate on a group you specify. This option is recommended
56012 + if you only want to watch certain users instead of having a large
56013 + amount of logs from the entire system. If the sysctl option is enabled,
56014 + a sysctl option with name "audit_group" is created.
56015 +
56016 +config GRKERNSEC_AUDIT_GID
56017 + int "GID for auditing"
56018 + depends on GRKERNSEC_AUDIT_GROUP
56019 + default 1007
56020 +
56021 +config GRKERNSEC_EXECLOG
56022 + bool "Exec logging"
56023 + help
56024 + If you say Y here, all execve() calls will be logged (since the
56025 + other exec*() calls are frontends to execve(), all execution
56026 + will be logged). Useful for shell-servers that like to keep track
56027 + of their users. If the sysctl option is enabled, a sysctl option with
56028 + name "exec_logging" is created.
56029 + WARNING: This option when enabled will produce a LOT of logs, especially
56030 + on an active system.
56031 +
56032 +config GRKERNSEC_RESLOG
56033 + bool "Resource logging"
56034 + help
56035 + If you say Y here, all attempts to overstep resource limits will
56036 + be logged with the resource name, the requested size, and the current
56037 + limit. It is highly recommended that you say Y here. If the sysctl
56038 + option is enabled, a sysctl option with name "resource_logging" is
56039 + created. If the RBAC system is enabled, the sysctl value is ignored.
56040 +
56041 +config GRKERNSEC_CHROOT_EXECLOG
56042 + bool "Log execs within chroot"
56043 + help
56044 + If you say Y here, all executions inside a chroot jail will be logged
56045 + to syslog. This can cause a large amount of logs if certain
56046 + applications (eg. djb's daemontools) are installed on the system, and
56047 + is therefore left as an option. If the sysctl option is enabled, a
56048 + sysctl option with name "chroot_execlog" is created.
56049 +
56050 +config GRKERNSEC_AUDIT_PTRACE
56051 + bool "Ptrace logging"
56052 + help
56053 + If you say Y here, all attempts to attach to a process via ptrace
56054 + will be logged. If the sysctl option is enabled, a sysctl option
56055 + with name "audit_ptrace" is created.
56056 +
56057 +config GRKERNSEC_AUDIT_CHDIR
56058 + bool "Chdir logging"
56059 + help
56060 + If you say Y here, all chdir() calls will be logged. If the sysctl
56061 + option is enabled, a sysctl option with name "audit_chdir" is created.
56062 +
56063 +config GRKERNSEC_AUDIT_MOUNT
56064 + bool "(Un)Mount logging"
56065 + help
56066 + If you say Y here, all mounts and unmounts will be logged. If the
56067 + sysctl option is enabled, a sysctl option with name "audit_mount" is
56068 + created.
56069 +
56070 +config GRKERNSEC_SIGNAL
56071 + bool "Signal logging"
56072 + help
56073 + If you say Y here, certain important signals will be logged, such as
56074 + SIGSEGV, which will as a result inform you of when a error in a program
56075 + occurred, which in some cases could mean a possible exploit attempt.
56076 + If the sysctl option is enabled, a sysctl option with name
56077 + "signal_logging" is created.
56078 +
56079 +config GRKERNSEC_FORKFAIL
56080 + bool "Fork failure logging"
56081 + help
56082 + If you say Y here, all failed fork() attempts will be logged.
56083 + This could suggest a fork bomb, or someone attempting to overstep
56084 + their process limit. If the sysctl option is enabled, a sysctl option
56085 + with name "forkfail_logging" is created.
56086 +
56087 +config GRKERNSEC_TIME
56088 + bool "Time change logging"
56089 + help
56090 + If you say Y here, any changes of the system clock will be logged.
56091 + If the sysctl option is enabled, a sysctl option with name
56092 + "timechange_logging" is created.
56093 +
56094 +config GRKERNSEC_PROC_IPADDR
56095 + bool "/proc/<pid>/ipaddr support"
56096 + help
56097 + If you say Y here, a new entry will be added to each /proc/<pid>
56098 + directory that contains the IP address of the person using the task.
56099 + The IP is carried across local TCP and AF_UNIX stream sockets.
56100 + This information can be useful for IDS/IPSes to perform remote response
56101 + to a local attack. The entry is readable by only the owner of the
56102 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
56103 + the RBAC system), and thus does not create privacy concerns.
56104 +
56105 +config GRKERNSEC_RWXMAP_LOG
56106 + bool 'Denied RWX mmap/mprotect logging'
56107 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
56108 + help
56109 + If you say Y here, calls to mmap() and mprotect() with explicit
56110 + usage of PROT_WRITE and PROT_EXEC together will be logged when
56111 + denied by the PAX_MPROTECT feature. If the sysctl option is
56112 + enabled, a sysctl option with name "rwxmap_logging" is created.
56113 +
56114 +config GRKERNSEC_AUDIT_TEXTREL
56115 + bool 'ELF text relocations logging (READ HELP)'
56116 + depends on PAX_MPROTECT
56117 + help
56118 + If you say Y here, text relocations will be logged with the filename
56119 + of the offending library or binary. The purpose of the feature is
56120 + to help Linux distribution developers get rid of libraries and
56121 + binaries that need text relocations which hinder the future progress
56122 + of PaX. Only Linux distribution developers should say Y here, and
56123 + never on a production machine, as this option creates an information
56124 + leak that could aid an attacker in defeating the randomization of
56125 + a single memory region. If the sysctl option is enabled, a sysctl
56126 + option with name "audit_textrel" is created.
56127 +
56128 +endmenu
56129 +
56130 +menu "Executable Protections"
56131 +depends on GRKERNSEC
56132 +
56133 +config GRKERNSEC_DMESG
56134 + bool "Dmesg(8) restriction"
56135 + help
56136 + If you say Y here, non-root users will not be able to use dmesg(8)
56137 + to view up to the last 4kb of messages in the kernel's log buffer.
56138 + The kernel's log buffer often contains kernel addresses and other
56139 + identifying information useful to an attacker in fingerprinting a
56140 + system for a targeted exploit.
56141 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
56142 + created.
56143 +
56144 +config GRKERNSEC_HARDEN_PTRACE
56145 + bool "Deter ptrace-based process snooping"
56146 + help
56147 + If you say Y here, TTY sniffers and other malicious monitoring
56148 + programs implemented through ptrace will be defeated. If you
56149 + have been using the RBAC system, this option has already been
56150 + enabled for several years for all users, with the ability to make
56151 + fine-grained exceptions.
56152 +
56153 + This option only affects the ability of non-root users to ptrace
56154 + processes that are not a descendent of the ptracing process.
56155 + This means that strace ./binary and gdb ./binary will still work,
56156 + but attaching to arbitrary processes will not. If the sysctl
56157 + option is enabled, a sysctl option with name "harden_ptrace" is
56158 + created.
56159 +
56160 +config GRKERNSEC_TPE
56161 + bool "Trusted Path Execution (TPE)"
56162 + help
56163 + If you say Y here, you will be able to choose a gid to add to the
56164 + supplementary groups of users you want to mark as "untrusted."
56165 + These users will not be able to execute any files that are not in
56166 + root-owned directories writable only by root. If the sysctl option
56167 + is enabled, a sysctl option with name "tpe" is created.
56168 +
56169 +config GRKERNSEC_TPE_ALL
56170 + bool "Partially restrict all non-root users"
56171 + depends on GRKERNSEC_TPE
56172 + help
56173 + If you say Y here, all non-root users will be covered under
56174 + a weaker TPE restriction. This is separate from, and in addition to,
56175 + the main TPE options that you have selected elsewhere. Thus, if a
56176 + "trusted" GID is chosen, this restriction applies to even that GID.
56177 + Under this restriction, all non-root users will only be allowed to
56178 + execute files in directories they own that are not group or
56179 + world-writable, or in directories owned by root and writable only by
56180 + root. If the sysctl option is enabled, a sysctl option with name
56181 + "tpe_restrict_all" is created.
56182 +
56183 +config GRKERNSEC_TPE_INVERT
56184 + bool "Invert GID option"
56185 + depends on GRKERNSEC_TPE
56186 + help
56187 + If you say Y here, the group you specify in the TPE configuration will
56188 + decide what group TPE restrictions will be *disabled* for. This
56189 + option is useful if you want TPE restrictions to be applied to most
56190 + users on the system. If the sysctl option is enabled, a sysctl option
56191 + with name "tpe_invert" is created. Unlike other sysctl options, this
56192 + entry will default to on for backward-compatibility.
56193 +
56194 +config GRKERNSEC_TPE_GID
56195 + int "GID for untrusted users"
56196 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
56197 + default 1005
56198 + help
56199 + Setting this GID determines what group TPE restrictions will be
56200 + *enabled* for. If the sysctl option is enabled, a sysctl option
56201 + with name "tpe_gid" is created.
56202 +
56203 +config GRKERNSEC_TPE_GID
56204 + int "GID for trusted users"
56205 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
56206 + default 1005
56207 + help
56208 + Setting this GID determines what group TPE restrictions will be
56209 + *disabled* for. If the sysctl option is enabled, a sysctl option
56210 + with name "tpe_gid" is created.
56211 +
56212 +endmenu
56213 +menu "Network Protections"
56214 +depends on GRKERNSEC
56215 +
56216 +config GRKERNSEC_RANDNET
56217 + bool "Larger entropy pools"
56218 + help
56219 + If you say Y here, the entropy pools used for many features of Linux
56220 + and grsecurity will be doubled in size. Since several grsecurity
56221 + features use additional randomness, it is recommended that you say Y
56222 + here. Saying Y here has a similar effect as modifying
56223 + /proc/sys/kernel/random/poolsize.
56224 +
56225 +config GRKERNSEC_BLACKHOLE
56226 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
56227 + depends on NET
56228 + help
56229 + If you say Y here, neither TCP resets nor ICMP
56230 + destination-unreachable packets will be sent in response to packets
56231 + sent to ports for which no associated listening process exists.
56232 + This feature supports both IPV4 and IPV6 and exempts the
56233 + loopback interface from blackholing. Enabling this feature
56234 + makes a host more resilient to DoS attacks and reduces network
56235 + visibility against scanners.
56236 +
56237 + The blackhole feature as-implemented is equivalent to the FreeBSD
56238 + blackhole feature, as it prevents RST responses to all packets, not
56239 + just SYNs. Under most application behavior this causes no
56240 + problems, but applications (like haproxy) may not close certain
56241 + connections in a way that cleanly terminates them on the remote
56242 + end, leaving the remote host in LAST_ACK state. Because of this
56243 + side-effect and to prevent intentional LAST_ACK DoSes, this
56244 + feature also adds automatic mitigation against such attacks.
56245 + The mitigation drastically reduces the amount of time a socket
56246 + can spend in LAST_ACK state. If you're using haproxy and not
56247 + all servers it connects to have this option enabled, consider
56248 + disabling this feature on the haproxy host.
56249 +
56250 + If the sysctl option is enabled, two sysctl options with names
56251 + "ip_blackhole" and "lastack_retries" will be created.
56252 + While "ip_blackhole" takes the standard zero/non-zero on/off
56253 + toggle, "lastack_retries" uses the same kinds of values as
56254 + "tcp_retries1" and "tcp_retries2". The default value of 4
56255 + prevents a socket from lasting more than 45 seconds in LAST_ACK
56256 + state.
56257 +
56258 +config GRKERNSEC_SOCKET
56259 + bool "Socket restrictions"
56260 + depends on NET
56261 + help
56262 + If you say Y here, you will be able to choose from several options.
56263 + If you assign a GID on your system and add it to the supplementary
56264 + groups of users you want to restrict socket access to, this patch
56265 + will perform up to three things, based on the option(s) you choose.
56266 +
56267 +config GRKERNSEC_SOCKET_ALL
56268 + bool "Deny any sockets to group"
56269 + depends on GRKERNSEC_SOCKET
56270 + help
56271 + If you say Y here, you will be able to choose a GID of whose users will
56272 + be unable to connect to other hosts from your machine or run server
56273 + applications from your machine. If the sysctl option is enabled, a
56274 + sysctl option with name "socket_all" is created.
56275 +
56276 +config GRKERNSEC_SOCKET_ALL_GID
56277 + int "GID to deny all sockets for"
56278 + depends on GRKERNSEC_SOCKET_ALL
56279 + default 1004
56280 + help
56281 + Here you can choose the GID to disable socket access for. Remember to
56282 + add the users you want socket access disabled for to the GID
56283 + specified here. If the sysctl option is enabled, a sysctl option
56284 + with name "socket_all_gid" is created.
56285 +
56286 +config GRKERNSEC_SOCKET_CLIENT
56287 + bool "Deny client sockets to group"
56288 + depends on GRKERNSEC_SOCKET
56289 + help
56290 + If you say Y here, you will be able to choose a GID of whose users will
56291 + be unable to connect to other hosts from your machine, but will be
56292 + able to run servers. If this option is enabled, all users in the group
56293 + you specify will have to use passive mode when initiating ftp transfers
56294 + from the shell on your machine. If the sysctl option is enabled, a
56295 + sysctl option with name "socket_client" is created.
56296 +
56297 +config GRKERNSEC_SOCKET_CLIENT_GID
56298 + int "GID to deny client sockets for"
56299 + depends on GRKERNSEC_SOCKET_CLIENT
56300 + default 1003
56301 + help
56302 + Here you can choose the GID to disable client socket access for.
56303 + Remember to add the users you want client socket access disabled for to
56304 + the GID specified here. If the sysctl option is enabled, a sysctl
56305 + option with name "socket_client_gid" is created.
56306 +
56307 +config GRKERNSEC_SOCKET_SERVER
56308 + bool "Deny server sockets to group"
56309 + depends on GRKERNSEC_SOCKET
56310 + help
56311 + If you say Y here, you will be able to choose a GID of whose users will
56312 + be unable to run server applications from your machine. If the sysctl
56313 + option is enabled, a sysctl option with name "socket_server" is created.
56314 +
56315 +config GRKERNSEC_SOCKET_SERVER_GID
56316 + int "GID to deny server sockets for"
56317 + depends on GRKERNSEC_SOCKET_SERVER
56318 + default 1002
56319 + help
56320 + Here you can choose the GID to disable server socket access for.
56321 + Remember to add the users you want server socket access disabled for to
56322 + the GID specified here. If the sysctl option is enabled, a sysctl
56323 + option with name "socket_server_gid" is created.
56324 +
56325 +endmenu
56326 +menu "Sysctl support"
56327 +depends on GRKERNSEC && SYSCTL
56328 +
56329 +config GRKERNSEC_SYSCTL
56330 + bool "Sysctl support"
56331 + help
56332 + If you say Y here, you will be able to change the options that
56333 + grsecurity runs with at bootup, without having to recompile your
56334 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
56335 + to enable (1) or disable (0) various features. All the sysctl entries
56336 + are mutable until the "grsec_lock" entry is set to a non-zero value.
56337 + All features enabled in the kernel configuration are disabled at boot
56338 + if you do not say Y to the "Turn on features by default" option.
56339 + All options should be set at startup, and the grsec_lock entry should
56340 + be set to a non-zero value after all the options are set.
56341 + *THIS IS EXTREMELY IMPORTANT*
56342 +
56343 +config GRKERNSEC_SYSCTL_DISTRO
56344 + bool "Extra sysctl support for distro makers (READ HELP)"
56345 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
56346 + help
56347 + If you say Y here, additional sysctl options will be created
56348 + for features that affect processes running as root. Therefore,
56349 + it is critical when using this option that the grsec_lock entry be
56350 + enabled after boot. Only distros with prebuilt kernel packages
56351 + with this option enabled that can ensure grsec_lock is enabled
56352 + after boot should use this option.
56353 + *Failure to set grsec_lock after boot makes all grsec features
56354 + this option covers useless*
56355 +
56356 + Currently this option creates the following sysctl entries:
56357 + "Disable Privileged I/O": "disable_priv_io"
56358 +
56359 +config GRKERNSEC_SYSCTL_ON
56360 + bool "Turn on features by default"
56361 + depends on GRKERNSEC_SYSCTL
56362 + help
56363 + If you say Y here, instead of having all features enabled in the
56364 + kernel configuration disabled at boot time, the features will be
56365 + enabled at boot time. It is recommended you say Y here unless
56366 + there is some reason you would want all sysctl-tunable features to
56367 + be disabled by default. As mentioned elsewhere, it is important
56368 + to enable the grsec_lock entry once you have finished modifying
56369 + the sysctl entries.
56370 +
56371 +endmenu
56372 +menu "Logging Options"
56373 +depends on GRKERNSEC
56374 +
56375 +config GRKERNSEC_FLOODTIME
56376 + int "Seconds in between log messages (minimum)"
56377 + default 10
56378 + help
56379 + This option allows you to enforce the number of seconds between
56380 + grsecurity log messages. The default should be suitable for most
56381 + people, however, if you choose to change it, choose a value small enough
56382 + to allow informative logs to be produced, but large enough to
56383 + prevent flooding.
56384 +
56385 +config GRKERNSEC_FLOODBURST
56386 + int "Number of messages in a burst (maximum)"
56387 + default 6
56388 + help
56389 + This option allows you to choose the maximum number of messages allowed
56390 + within the flood time interval you chose in a separate option. The
56391 + default should be suitable for most people, however if you find that
56392 + many of your logs are being interpreted as flooding, you may want to
56393 + raise this value.
56394 +
56395 +endmenu
56396 +
56397 +endmenu
56398 diff -urNp linux-3.0.7/grsecurity/Makefile linux-3.0.7/grsecurity/Makefile
56399 --- linux-3.0.7/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
56400 +++ linux-3.0.7/grsecurity/Makefile 2011-10-17 06:45:43.000000000 -0400
56401 @@ -0,0 +1,36 @@
56402 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
56403 +# during 2001-2009 it has been completely redesigned by Brad Spengler
56404 +# into an RBAC system
56405 +#
56406 +# All code in this directory and various hooks inserted throughout the kernel
56407 +# are copyright Brad Spengler - Open Source Security, Inc., and released
56408 +# under the GPL v2 or higher
56409 +
56410 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
56411 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
56412 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
56413 +
56414 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
56415 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
56416 + gracl_learn.o grsec_log.o
56417 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
56418 +
56419 +ifdef CONFIG_NET
56420 +obj-y += grsec_sock.o
56421 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
56422 +endif
56423 +
56424 +ifndef CONFIG_GRKERNSEC
56425 +obj-y += grsec_disabled.o
56426 +endif
56427 +
56428 +ifdef CONFIG_GRKERNSEC_HIDESYM
56429 +extra-y := grsec_hidesym.o
56430 +$(obj)/grsec_hidesym.o:
56431 + @-chmod -f 500 /boot
56432 + @-chmod -f 500 /lib/modules
56433 + @-chmod -f 500 /lib64/modules
56434 + @-chmod -f 500 /lib32/modules
56435 + @-chmod -f 700 .
56436 + @echo ' grsec: protected kernel image paths'
56437 +endif
56438 diff -urNp linux-3.0.7/include/acpi/acpi_bus.h linux-3.0.7/include/acpi/acpi_bus.h
56439 --- linux-3.0.7/include/acpi/acpi_bus.h 2011-07-21 22:17:23.000000000 -0400
56440 +++ linux-3.0.7/include/acpi/acpi_bus.h 2011-08-23 21:47:56.000000000 -0400
56441 @@ -107,7 +107,7 @@ struct acpi_device_ops {
56442 acpi_op_bind bind;
56443 acpi_op_unbind unbind;
56444 acpi_op_notify notify;
56445 -};
56446 +} __no_const;
56447
56448 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
56449
56450 diff -urNp linux-3.0.7/include/asm-generic/atomic-long.h linux-3.0.7/include/asm-generic/atomic-long.h
56451 --- linux-3.0.7/include/asm-generic/atomic-long.h 2011-07-21 22:17:23.000000000 -0400
56452 +++ linux-3.0.7/include/asm-generic/atomic-long.h 2011-08-23 21:47:56.000000000 -0400
56453 @@ -22,6 +22,12 @@
56454
56455 typedef atomic64_t atomic_long_t;
56456
56457 +#ifdef CONFIG_PAX_REFCOUNT
56458 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
56459 +#else
56460 +typedef atomic64_t atomic_long_unchecked_t;
56461 +#endif
56462 +
56463 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
56464
56465 static inline long atomic_long_read(atomic_long_t *l)
56466 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
56467 return (long)atomic64_read(v);
56468 }
56469
56470 +#ifdef CONFIG_PAX_REFCOUNT
56471 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56472 +{
56473 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56474 +
56475 + return (long)atomic64_read_unchecked(v);
56476 +}
56477 +#endif
56478 +
56479 static inline void atomic_long_set(atomic_long_t *l, long i)
56480 {
56481 atomic64_t *v = (atomic64_t *)l;
56482 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
56483 atomic64_set(v, i);
56484 }
56485
56486 +#ifdef CONFIG_PAX_REFCOUNT
56487 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56488 +{
56489 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56490 +
56491 + atomic64_set_unchecked(v, i);
56492 +}
56493 +#endif
56494 +
56495 static inline void atomic_long_inc(atomic_long_t *l)
56496 {
56497 atomic64_t *v = (atomic64_t *)l;
56498 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
56499 atomic64_inc(v);
56500 }
56501
56502 +#ifdef CONFIG_PAX_REFCOUNT
56503 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56504 +{
56505 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56506 +
56507 + atomic64_inc_unchecked(v);
56508 +}
56509 +#endif
56510 +
56511 static inline void atomic_long_dec(atomic_long_t *l)
56512 {
56513 atomic64_t *v = (atomic64_t *)l;
56514 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
56515 atomic64_dec(v);
56516 }
56517
56518 +#ifdef CONFIG_PAX_REFCOUNT
56519 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56520 +{
56521 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56522 +
56523 + atomic64_dec_unchecked(v);
56524 +}
56525 +#endif
56526 +
56527 static inline void atomic_long_add(long i, atomic_long_t *l)
56528 {
56529 atomic64_t *v = (atomic64_t *)l;
56530 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long
56531 atomic64_add(i, v);
56532 }
56533
56534 +#ifdef CONFIG_PAX_REFCOUNT
56535 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
56536 +{
56537 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56538 +
56539 + atomic64_add_unchecked(i, v);
56540 +}
56541 +#endif
56542 +
56543 static inline void atomic_long_sub(long i, atomic_long_t *l)
56544 {
56545 atomic64_t *v = (atomic64_t *)l;
56546 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
56547 atomic64_sub(i, v);
56548 }
56549
56550 +#ifdef CONFIG_PAX_REFCOUNT
56551 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
56552 +{
56553 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56554 +
56555 + atomic64_sub_unchecked(i, v);
56556 +}
56557 +#endif
56558 +
56559 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
56560 {
56561 atomic64_t *v = (atomic64_t *)l;
56562 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
56563 return (long)atomic64_inc_return(v);
56564 }
56565
56566 +#ifdef CONFIG_PAX_REFCOUNT
56567 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
56568 +{
56569 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56570 +
56571 + return (long)atomic64_inc_return_unchecked(v);
56572 +}
56573 +#endif
56574 +
56575 static inline long atomic_long_dec_return(atomic_long_t *l)
56576 {
56577 atomic64_t *v = (atomic64_t *)l;
56578 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
56579
56580 typedef atomic_t atomic_long_t;
56581
56582 +#ifdef CONFIG_PAX_REFCOUNT
56583 +typedef atomic_unchecked_t atomic_long_unchecked_t;
56584 +#else
56585 +typedef atomic_t atomic_long_unchecked_t;
56586 +#endif
56587 +
56588 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
56589 static inline long atomic_long_read(atomic_long_t *l)
56590 {
56591 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
56592 return (long)atomic_read(v);
56593 }
56594
56595 +#ifdef CONFIG_PAX_REFCOUNT
56596 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56597 +{
56598 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56599 +
56600 + return (long)atomic_read_unchecked(v);
56601 +}
56602 +#endif
56603 +
56604 static inline void atomic_long_set(atomic_long_t *l, long i)
56605 {
56606 atomic_t *v = (atomic_t *)l;
56607 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
56608 atomic_set(v, i);
56609 }
56610
56611 +#ifdef CONFIG_PAX_REFCOUNT
56612 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56613 +{
56614 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56615 +
56616 + atomic_set_unchecked(v, i);
56617 +}
56618 +#endif
56619 +
56620 static inline void atomic_long_inc(atomic_long_t *l)
56621 {
56622 atomic_t *v = (atomic_t *)l;
56623 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
56624 atomic_inc(v);
56625 }
56626
56627 +#ifdef CONFIG_PAX_REFCOUNT
56628 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56629 +{
56630 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56631 +
56632 + atomic_inc_unchecked(v);
56633 +}
56634 +#endif
56635 +
56636 static inline void atomic_long_dec(atomic_long_t *l)
56637 {
56638 atomic_t *v = (atomic_t *)l;
56639 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
56640 atomic_dec(v);
56641 }
56642
56643 +#ifdef CONFIG_PAX_REFCOUNT
56644 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56645 +{
56646 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56647 +
56648 + atomic_dec_unchecked(v);
56649 +}
56650 +#endif
56651 +
56652 static inline void atomic_long_add(long i, atomic_long_t *l)
56653 {
56654 atomic_t *v = (atomic_t *)l;
56655 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long
56656 atomic_add(i, v);
56657 }
56658
56659 +#ifdef CONFIG_PAX_REFCOUNT
56660 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
56661 +{
56662 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56663 +
56664 + atomic_add_unchecked(i, v);
56665 +}
56666 +#endif
56667 +
56668 static inline void atomic_long_sub(long i, atomic_long_t *l)
56669 {
56670 atomic_t *v = (atomic_t *)l;
56671 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
56672 atomic_sub(i, v);
56673 }
56674
56675 +#ifdef CONFIG_PAX_REFCOUNT
56676 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
56677 +{
56678 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56679 +
56680 + atomic_sub_unchecked(i, v);
56681 +}
56682 +#endif
56683 +
56684 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
56685 {
56686 atomic_t *v = (atomic_t *)l;
56687 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
56688 return (long)atomic_inc_return(v);
56689 }
56690
56691 +#ifdef CONFIG_PAX_REFCOUNT
56692 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
56693 +{
56694 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56695 +
56696 + return (long)atomic_inc_return_unchecked(v);
56697 +}
56698 +#endif
56699 +
56700 static inline long atomic_long_dec_return(atomic_long_t *l)
56701 {
56702 atomic_t *v = (atomic_t *)l;
56703 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
56704
56705 #endif /* BITS_PER_LONG == 64 */
56706
56707 +#ifdef CONFIG_PAX_REFCOUNT
56708 +static inline void pax_refcount_needs_these_functions(void)
56709 +{
56710 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
56711 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
56712 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
56713 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
56714 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
56715 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
56716 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
56717 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
56718 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
56719 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
56720 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
56721 +
56722 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
56723 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
56724 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
56725 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
56726 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
56727 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
56728 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
56729 +}
56730 +#else
56731 +#define atomic_read_unchecked(v) atomic_read(v)
56732 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
56733 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
56734 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
56735 +#define atomic_inc_unchecked(v) atomic_inc(v)
56736 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
56737 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
56738 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
56739 +#define atomic_dec_unchecked(v) atomic_dec(v)
56740 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
56741 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
56742 +
56743 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
56744 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
56745 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
56746 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
56747 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
56748 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
56749 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
56750 +#endif
56751 +
56752 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
56753 diff -urNp linux-3.0.7/include/asm-generic/cache.h linux-3.0.7/include/asm-generic/cache.h
56754 --- linux-3.0.7/include/asm-generic/cache.h 2011-07-21 22:17:23.000000000 -0400
56755 +++ linux-3.0.7/include/asm-generic/cache.h 2011-08-23 21:47:56.000000000 -0400
56756 @@ -6,7 +6,7 @@
56757 * cache lines need to provide their own cache.h.
56758 */
56759
56760 -#define L1_CACHE_SHIFT 5
56761 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
56762 +#define L1_CACHE_SHIFT 5UL
56763 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
56764
56765 #endif /* __ASM_GENERIC_CACHE_H */
56766 diff -urNp linux-3.0.7/include/asm-generic/int-l64.h linux-3.0.7/include/asm-generic/int-l64.h
56767 --- linux-3.0.7/include/asm-generic/int-l64.h 2011-07-21 22:17:23.000000000 -0400
56768 +++ linux-3.0.7/include/asm-generic/int-l64.h 2011-08-23 21:47:56.000000000 -0400
56769 @@ -46,6 +46,8 @@ typedef unsigned int u32;
56770 typedef signed long s64;
56771 typedef unsigned long u64;
56772
56773 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
56774 +
56775 #define S8_C(x) x
56776 #define U8_C(x) x ## U
56777 #define S16_C(x) x
56778 diff -urNp linux-3.0.7/include/asm-generic/int-ll64.h linux-3.0.7/include/asm-generic/int-ll64.h
56779 --- linux-3.0.7/include/asm-generic/int-ll64.h 2011-07-21 22:17:23.000000000 -0400
56780 +++ linux-3.0.7/include/asm-generic/int-ll64.h 2011-08-23 21:47:56.000000000 -0400
56781 @@ -51,6 +51,8 @@ typedef unsigned int u32;
56782 typedef signed long long s64;
56783 typedef unsigned long long u64;
56784
56785 +typedef unsigned long long intoverflow_t;
56786 +
56787 #define S8_C(x) x
56788 #define U8_C(x) x ## U
56789 #define S16_C(x) x
56790 diff -urNp linux-3.0.7/include/asm-generic/kmap_types.h linux-3.0.7/include/asm-generic/kmap_types.h
56791 --- linux-3.0.7/include/asm-generic/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
56792 +++ linux-3.0.7/include/asm-generic/kmap_types.h 2011-08-23 21:47:56.000000000 -0400
56793 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
56794 KMAP_D(17) KM_NMI,
56795 KMAP_D(18) KM_NMI_PTE,
56796 KMAP_D(19) KM_KDB,
56797 +KMAP_D(20) KM_CLEARPAGE,
56798 /*
56799 * Remember to update debug_kmap_atomic() when adding new kmap types!
56800 */
56801 -KMAP_D(20) KM_TYPE_NR
56802 +KMAP_D(21) KM_TYPE_NR
56803 };
56804
56805 #undef KMAP_D
56806 diff -urNp linux-3.0.7/include/asm-generic/pgtable.h linux-3.0.7/include/asm-generic/pgtable.h
56807 --- linux-3.0.7/include/asm-generic/pgtable.h 2011-07-21 22:17:23.000000000 -0400
56808 +++ linux-3.0.7/include/asm-generic/pgtable.h 2011-08-23 21:47:56.000000000 -0400
56809 @@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
56810 #endif /* __HAVE_ARCH_PMD_WRITE */
56811 #endif
56812
56813 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
56814 +static inline unsigned long pax_open_kernel(void) { return 0; }
56815 +#endif
56816 +
56817 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
56818 +static inline unsigned long pax_close_kernel(void) { return 0; }
56819 +#endif
56820 +
56821 #endif /* !__ASSEMBLY__ */
56822
56823 #endif /* _ASM_GENERIC_PGTABLE_H */
56824 diff -urNp linux-3.0.7/include/asm-generic/pgtable-nopmd.h linux-3.0.7/include/asm-generic/pgtable-nopmd.h
56825 --- linux-3.0.7/include/asm-generic/pgtable-nopmd.h 2011-07-21 22:17:23.000000000 -0400
56826 +++ linux-3.0.7/include/asm-generic/pgtable-nopmd.h 2011-08-23 21:47:56.000000000 -0400
56827 @@ -1,14 +1,19 @@
56828 #ifndef _PGTABLE_NOPMD_H
56829 #define _PGTABLE_NOPMD_H
56830
56831 -#ifndef __ASSEMBLY__
56832 -
56833 #include <asm-generic/pgtable-nopud.h>
56834
56835 -struct mm_struct;
56836 -
56837 #define __PAGETABLE_PMD_FOLDED
56838
56839 +#define PMD_SHIFT PUD_SHIFT
56840 +#define PTRS_PER_PMD 1
56841 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
56842 +#define PMD_MASK (~(PMD_SIZE-1))
56843 +
56844 +#ifndef __ASSEMBLY__
56845 +
56846 +struct mm_struct;
56847 +
56848 /*
56849 * Having the pmd type consist of a pud gets the size right, and allows
56850 * us to conceptually access the pud entry that this pmd is folded into
56851 @@ -16,11 +21,6 @@ struct mm_struct;
56852 */
56853 typedef struct { pud_t pud; } pmd_t;
56854
56855 -#define PMD_SHIFT PUD_SHIFT
56856 -#define PTRS_PER_PMD 1
56857 -#define PMD_SIZE (1UL << PMD_SHIFT)
56858 -#define PMD_MASK (~(PMD_SIZE-1))
56859 -
56860 /*
56861 * The "pud_xxx()" functions here are trivial for a folded two-level
56862 * setup: the pmd is never bad, and a pmd always exists (as it's folded
56863 diff -urNp linux-3.0.7/include/asm-generic/pgtable-nopud.h linux-3.0.7/include/asm-generic/pgtable-nopud.h
56864 --- linux-3.0.7/include/asm-generic/pgtable-nopud.h 2011-07-21 22:17:23.000000000 -0400
56865 +++ linux-3.0.7/include/asm-generic/pgtable-nopud.h 2011-08-23 21:47:56.000000000 -0400
56866 @@ -1,10 +1,15 @@
56867 #ifndef _PGTABLE_NOPUD_H
56868 #define _PGTABLE_NOPUD_H
56869
56870 -#ifndef __ASSEMBLY__
56871 -
56872 #define __PAGETABLE_PUD_FOLDED
56873
56874 +#define PUD_SHIFT PGDIR_SHIFT
56875 +#define PTRS_PER_PUD 1
56876 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
56877 +#define PUD_MASK (~(PUD_SIZE-1))
56878 +
56879 +#ifndef __ASSEMBLY__
56880 +
56881 /*
56882 * Having the pud type consist of a pgd gets the size right, and allows
56883 * us to conceptually access the pgd entry that this pud is folded into
56884 @@ -12,11 +17,6 @@
56885 */
56886 typedef struct { pgd_t pgd; } pud_t;
56887
56888 -#define PUD_SHIFT PGDIR_SHIFT
56889 -#define PTRS_PER_PUD 1
56890 -#define PUD_SIZE (1UL << PUD_SHIFT)
56891 -#define PUD_MASK (~(PUD_SIZE-1))
56892 -
56893 /*
56894 * The "pgd_xxx()" functions here are trivial for a folded two-level
56895 * setup: the pud is never bad, and a pud always exists (as it's folded
56896 diff -urNp linux-3.0.7/include/asm-generic/vmlinux.lds.h linux-3.0.7/include/asm-generic/vmlinux.lds.h
56897 --- linux-3.0.7/include/asm-generic/vmlinux.lds.h 2011-07-21 22:17:23.000000000 -0400
56898 +++ linux-3.0.7/include/asm-generic/vmlinux.lds.h 2011-08-23 21:47:56.000000000 -0400
56899 @@ -217,6 +217,7 @@
56900 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
56901 VMLINUX_SYMBOL(__start_rodata) = .; \
56902 *(.rodata) *(.rodata.*) \
56903 + *(.data..read_only) \
56904 *(__vermagic) /* Kernel version magic */ \
56905 . = ALIGN(8); \
56906 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
56907 @@ -723,17 +724,18 @@
56908 * section in the linker script will go there too. @phdr should have
56909 * a leading colon.
56910 *
56911 - * Note that this macros defines __per_cpu_load as an absolute symbol.
56912 + * Note that this macros defines per_cpu_load as an absolute symbol.
56913 * If there is no need to put the percpu section at a predetermined
56914 * address, use PERCPU_SECTION.
56915 */
56916 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
56917 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
56918 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
56919 + per_cpu_load = .; \
56920 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
56921 - LOAD_OFFSET) { \
56922 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
56923 PERCPU_INPUT(cacheline) \
56924 } phdr \
56925 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
56926 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
56927
56928 /**
56929 * PERCPU_SECTION - define output section for percpu area, simple version
56930 diff -urNp linux-3.0.7/include/drm/drm_crtc_helper.h linux-3.0.7/include/drm/drm_crtc_helper.h
56931 --- linux-3.0.7/include/drm/drm_crtc_helper.h 2011-07-21 22:17:23.000000000 -0400
56932 +++ linux-3.0.7/include/drm/drm_crtc_helper.h 2011-08-23 21:47:56.000000000 -0400
56933 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
56934
56935 /* disable crtc when not in use - more explicit than dpms off */
56936 void (*disable)(struct drm_crtc *crtc);
56937 -};
56938 +} __no_const;
56939
56940 struct drm_encoder_helper_funcs {
56941 void (*dpms)(struct drm_encoder *encoder, int mode);
56942 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
56943 struct drm_connector *connector);
56944 /* disable encoder when not in use - more explicit than dpms off */
56945 void (*disable)(struct drm_encoder *encoder);
56946 -};
56947 +} __no_const;
56948
56949 struct drm_connector_helper_funcs {
56950 int (*get_modes)(struct drm_connector *connector);
56951 diff -urNp linux-3.0.7/include/drm/drmP.h linux-3.0.7/include/drm/drmP.h
56952 --- linux-3.0.7/include/drm/drmP.h 2011-07-21 22:17:23.000000000 -0400
56953 +++ linux-3.0.7/include/drm/drmP.h 2011-08-23 21:47:56.000000000 -0400
56954 @@ -73,6 +73,7 @@
56955 #include <linux/workqueue.h>
56956 #include <linux/poll.h>
56957 #include <asm/pgalloc.h>
56958 +#include <asm/local.h>
56959 #include "drm.h"
56960
56961 #include <linux/idr.h>
56962 @@ -1033,7 +1034,7 @@ struct drm_device {
56963
56964 /** \name Usage Counters */
56965 /*@{ */
56966 - int open_count; /**< Outstanding files open */
56967 + local_t open_count; /**< Outstanding files open */
56968 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
56969 atomic_t vma_count; /**< Outstanding vma areas open */
56970 int buf_use; /**< Buffers in use -- cannot alloc */
56971 @@ -1044,7 +1045,7 @@ struct drm_device {
56972 /*@{ */
56973 unsigned long counters;
56974 enum drm_stat_type types[15];
56975 - atomic_t counts[15];
56976 + atomic_unchecked_t counts[15];
56977 /*@} */
56978
56979 struct list_head filelist;
56980 diff -urNp linux-3.0.7/include/drm/ttm/ttm_memory.h linux-3.0.7/include/drm/ttm/ttm_memory.h
56981 --- linux-3.0.7/include/drm/ttm/ttm_memory.h 2011-07-21 22:17:23.000000000 -0400
56982 +++ linux-3.0.7/include/drm/ttm/ttm_memory.h 2011-08-23 21:47:56.000000000 -0400
56983 @@ -47,7 +47,7 @@
56984
56985 struct ttm_mem_shrink {
56986 int (*do_shrink) (struct ttm_mem_shrink *);
56987 -};
56988 +} __no_const;
56989
56990 /**
56991 * struct ttm_mem_global - Global memory accounting structure.
56992 diff -urNp linux-3.0.7/include/linux/a.out.h linux-3.0.7/include/linux/a.out.h
56993 --- linux-3.0.7/include/linux/a.out.h 2011-07-21 22:17:23.000000000 -0400
56994 +++ linux-3.0.7/include/linux/a.out.h 2011-08-23 21:47:56.000000000 -0400
56995 @@ -39,6 +39,14 @@ enum machine_type {
56996 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
56997 };
56998
56999 +/* Constants for the N_FLAGS field */
57000 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57001 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57002 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57003 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57004 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57005 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57006 +
57007 #if !defined (N_MAGIC)
57008 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57009 #endif
57010 diff -urNp linux-3.0.7/include/linux/atmdev.h linux-3.0.7/include/linux/atmdev.h
57011 --- linux-3.0.7/include/linux/atmdev.h 2011-07-21 22:17:23.000000000 -0400
57012 +++ linux-3.0.7/include/linux/atmdev.h 2011-08-23 21:47:56.000000000 -0400
57013 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57014 #endif
57015
57016 struct k_atm_aal_stats {
57017 -#define __HANDLE_ITEM(i) atomic_t i
57018 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
57019 __AAL_STAT_ITEMS
57020 #undef __HANDLE_ITEM
57021 };
57022 diff -urNp linux-3.0.7/include/linux/binfmts.h linux-3.0.7/include/linux/binfmts.h
57023 --- linux-3.0.7/include/linux/binfmts.h 2011-07-21 22:17:23.000000000 -0400
57024 +++ linux-3.0.7/include/linux/binfmts.h 2011-08-23 21:47:56.000000000 -0400
57025 @@ -88,6 +88,7 @@ struct linux_binfmt {
57026 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57027 int (*load_shlib)(struct file *);
57028 int (*core_dump)(struct coredump_params *cprm);
57029 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57030 unsigned long min_coredump; /* minimal dump size */
57031 };
57032
57033 diff -urNp linux-3.0.7/include/linux/blkdev.h linux-3.0.7/include/linux/blkdev.h
57034 --- linux-3.0.7/include/linux/blkdev.h 2011-07-21 22:17:23.000000000 -0400
57035 +++ linux-3.0.7/include/linux/blkdev.h 2011-08-26 19:49:56.000000000 -0400
57036 @@ -1308,7 +1308,7 @@ struct block_device_operations {
57037 /* this callback is with swap_lock and sometimes page table lock held */
57038 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
57039 struct module *owner;
57040 -};
57041 +} __do_const;
57042
57043 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57044 unsigned long);
57045 diff -urNp linux-3.0.7/include/linux/blktrace_api.h linux-3.0.7/include/linux/blktrace_api.h
57046 --- linux-3.0.7/include/linux/blktrace_api.h 2011-07-21 22:17:23.000000000 -0400
57047 +++ linux-3.0.7/include/linux/blktrace_api.h 2011-08-23 21:47:56.000000000 -0400
57048 @@ -161,7 +161,7 @@ struct blk_trace {
57049 struct dentry *dir;
57050 struct dentry *dropped_file;
57051 struct dentry *msg_file;
57052 - atomic_t dropped;
57053 + atomic_unchecked_t dropped;
57054 };
57055
57056 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57057 diff -urNp linux-3.0.7/include/linux/byteorder/little_endian.h linux-3.0.7/include/linux/byteorder/little_endian.h
57058 --- linux-3.0.7/include/linux/byteorder/little_endian.h 2011-07-21 22:17:23.000000000 -0400
57059 +++ linux-3.0.7/include/linux/byteorder/little_endian.h 2011-08-23 21:47:56.000000000 -0400
57060 @@ -42,51 +42,51 @@
57061
57062 static inline __le64 __cpu_to_le64p(const __u64 *p)
57063 {
57064 - return (__force __le64)*p;
57065 + return (__force const __le64)*p;
57066 }
57067 static inline __u64 __le64_to_cpup(const __le64 *p)
57068 {
57069 - return (__force __u64)*p;
57070 + return (__force const __u64)*p;
57071 }
57072 static inline __le32 __cpu_to_le32p(const __u32 *p)
57073 {
57074 - return (__force __le32)*p;
57075 + return (__force const __le32)*p;
57076 }
57077 static inline __u32 __le32_to_cpup(const __le32 *p)
57078 {
57079 - return (__force __u32)*p;
57080 + return (__force const __u32)*p;
57081 }
57082 static inline __le16 __cpu_to_le16p(const __u16 *p)
57083 {
57084 - return (__force __le16)*p;
57085 + return (__force const __le16)*p;
57086 }
57087 static inline __u16 __le16_to_cpup(const __le16 *p)
57088 {
57089 - return (__force __u16)*p;
57090 + return (__force const __u16)*p;
57091 }
57092 static inline __be64 __cpu_to_be64p(const __u64 *p)
57093 {
57094 - return (__force __be64)__swab64p(p);
57095 + return (__force const __be64)__swab64p(p);
57096 }
57097 static inline __u64 __be64_to_cpup(const __be64 *p)
57098 {
57099 - return __swab64p((__u64 *)p);
57100 + return __swab64p((const __u64 *)p);
57101 }
57102 static inline __be32 __cpu_to_be32p(const __u32 *p)
57103 {
57104 - return (__force __be32)__swab32p(p);
57105 + return (__force const __be32)__swab32p(p);
57106 }
57107 static inline __u32 __be32_to_cpup(const __be32 *p)
57108 {
57109 - return __swab32p((__u32 *)p);
57110 + return __swab32p((const __u32 *)p);
57111 }
57112 static inline __be16 __cpu_to_be16p(const __u16 *p)
57113 {
57114 - return (__force __be16)__swab16p(p);
57115 + return (__force const __be16)__swab16p(p);
57116 }
57117 static inline __u16 __be16_to_cpup(const __be16 *p)
57118 {
57119 - return __swab16p((__u16 *)p);
57120 + return __swab16p((const __u16 *)p);
57121 }
57122 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
57123 #define __le64_to_cpus(x) do { (void)(x); } while (0)
57124 diff -urNp linux-3.0.7/include/linux/cache.h linux-3.0.7/include/linux/cache.h
57125 --- linux-3.0.7/include/linux/cache.h 2011-07-21 22:17:23.000000000 -0400
57126 +++ linux-3.0.7/include/linux/cache.h 2011-08-23 21:47:56.000000000 -0400
57127 @@ -16,6 +16,10 @@
57128 #define __read_mostly
57129 #endif
57130
57131 +#ifndef __read_only
57132 +#define __read_only __read_mostly
57133 +#endif
57134 +
57135 #ifndef ____cacheline_aligned
57136 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
57137 #endif
57138 diff -urNp linux-3.0.7/include/linux/capability.h linux-3.0.7/include/linux/capability.h
57139 --- linux-3.0.7/include/linux/capability.h 2011-07-21 22:17:23.000000000 -0400
57140 +++ linux-3.0.7/include/linux/capability.h 2011-08-23 21:48:14.000000000 -0400
57141 @@ -547,6 +547,9 @@ extern bool capable(int cap);
57142 extern bool ns_capable(struct user_namespace *ns, int cap);
57143 extern bool task_ns_capable(struct task_struct *t, int cap);
57144 extern bool nsown_capable(int cap);
57145 +extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
57146 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
57147 +extern bool capable_nolog(int cap);
57148
57149 /* audit system wants to get cap info from files as well */
57150 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
57151 diff -urNp linux-3.0.7/include/linux/cleancache.h linux-3.0.7/include/linux/cleancache.h
57152 --- linux-3.0.7/include/linux/cleancache.h 2011-07-21 22:17:23.000000000 -0400
57153 +++ linux-3.0.7/include/linux/cleancache.h 2011-08-23 21:47:56.000000000 -0400
57154 @@ -31,7 +31,7 @@ struct cleancache_ops {
57155 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
57156 void (*flush_inode)(int, struct cleancache_filekey);
57157 void (*flush_fs)(int);
57158 -};
57159 +} __no_const;
57160
57161 extern struct cleancache_ops
57162 cleancache_register_ops(struct cleancache_ops *ops);
57163 diff -urNp linux-3.0.7/include/linux/compiler-gcc4.h linux-3.0.7/include/linux/compiler-gcc4.h
57164 --- linux-3.0.7/include/linux/compiler-gcc4.h 2011-07-21 22:17:23.000000000 -0400
57165 +++ linux-3.0.7/include/linux/compiler-gcc4.h 2011-08-26 19:49:56.000000000 -0400
57166 @@ -31,6 +31,12 @@
57167
57168
57169 #if __GNUC_MINOR__ >= 5
57170 +
57171 +#ifdef CONSTIFY_PLUGIN
57172 +#define __no_const __attribute__((no_const))
57173 +#define __do_const __attribute__((do_const))
57174 +#endif
57175 +
57176 /*
57177 * Mark a position in code as unreachable. This can be used to
57178 * suppress control flow warnings after asm blocks that transfer
57179 @@ -46,6 +52,11 @@
57180 #define __noclone __attribute__((__noclone__))
57181
57182 #endif
57183 +
57184 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
57185 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
57186 +#define __bos0(ptr) __bos((ptr), 0)
57187 +#define __bos1(ptr) __bos((ptr), 1)
57188 #endif
57189
57190 #if __GNUC_MINOR__ > 0
57191 diff -urNp linux-3.0.7/include/linux/compiler.h linux-3.0.7/include/linux/compiler.h
57192 --- linux-3.0.7/include/linux/compiler.h 2011-07-21 22:17:23.000000000 -0400
57193 +++ linux-3.0.7/include/linux/compiler.h 2011-10-06 04:17:55.000000000 -0400
57194 @@ -5,31 +5,62 @@
57195
57196 #ifdef __CHECKER__
57197 # define __user __attribute__((noderef, address_space(1)))
57198 +# define __force_user __force __user
57199 # define __kernel __attribute__((address_space(0)))
57200 +# define __force_kernel __force __kernel
57201 # define __safe __attribute__((safe))
57202 # define __force __attribute__((force))
57203 # define __nocast __attribute__((nocast))
57204 # define __iomem __attribute__((noderef, address_space(2)))
57205 +# define __force_iomem __force __iomem
57206 # define __acquires(x) __attribute__((context(x,0,1)))
57207 # define __releases(x) __attribute__((context(x,1,0)))
57208 # define __acquire(x) __context__(x,1)
57209 # define __release(x) __context__(x,-1)
57210 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
57211 # define __percpu __attribute__((noderef, address_space(3)))
57212 +# define __force_percpu __force __percpu
57213 #ifdef CONFIG_SPARSE_RCU_POINTER
57214 # define __rcu __attribute__((noderef, address_space(4)))
57215 +# define __force_rcu __force __rcu
57216 #else
57217 # define __rcu
57218 +# define __force_rcu
57219 #endif
57220 extern void __chk_user_ptr(const volatile void __user *);
57221 extern void __chk_io_ptr(const volatile void __iomem *);
57222 +#elif defined(CHECKER_PLUGIN)
57223 +//# define __user
57224 +//# define __force_user
57225 +//# define __kernel
57226 +//# define __force_kernel
57227 +# define __safe
57228 +# define __force
57229 +# define __nocast
57230 +# define __iomem
57231 +# define __force_iomem
57232 +# define __chk_user_ptr(x) (void)0
57233 +# define __chk_io_ptr(x) (void)0
57234 +# define __builtin_warning(x, y...) (1)
57235 +# define __acquires(x)
57236 +# define __releases(x)
57237 +# define __acquire(x) (void)0
57238 +# define __release(x) (void)0
57239 +# define __cond_lock(x,c) (c)
57240 +# define __percpu
57241 +# define __force_percpu
57242 +# define __rcu
57243 +# define __force_rcu
57244 #else
57245 # define __user
57246 +# define __force_user
57247 # define __kernel
57248 +# define __force_kernel
57249 # define __safe
57250 # define __force
57251 # define __nocast
57252 # define __iomem
57253 +# define __force_iomem
57254 # define __chk_user_ptr(x) (void)0
57255 # define __chk_io_ptr(x) (void)0
57256 # define __builtin_warning(x, y...) (1)
57257 @@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile
57258 # define __release(x) (void)0
57259 # define __cond_lock(x,c) (c)
57260 # define __percpu
57261 +# define __force_percpu
57262 # define __rcu
57263 +# define __force_rcu
57264 #endif
57265
57266 #ifdef __KERNEL__
57267 @@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_
57268 # define __attribute_const__ /* unimplemented */
57269 #endif
57270
57271 +#ifndef __no_const
57272 +# define __no_const
57273 +#endif
57274 +
57275 +#ifndef __do_const
57276 +# define __do_const
57277 +#endif
57278 +
57279 /*
57280 * Tell gcc if a function is cold. The compiler will assume any path
57281 * directly leading to the call is unlikely.
57282 @@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_
57283 #define __cold
57284 #endif
57285
57286 +#ifndef __alloc_size
57287 +#define __alloc_size(...)
57288 +#endif
57289 +
57290 +#ifndef __bos
57291 +#define __bos(ptr, arg)
57292 +#endif
57293 +
57294 +#ifndef __bos0
57295 +#define __bos0(ptr)
57296 +#endif
57297 +
57298 +#ifndef __bos1
57299 +#define __bos1(ptr)
57300 +#endif
57301 +
57302 /* Simple shorthand for a section definition */
57303 #ifndef __section
57304 # define __section(S) __attribute__ ((__section__(#S)))
57305 @@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_
57306 * use is to mediate communication between process-level code and irq/NMI
57307 * handlers, all running on the same CPU.
57308 */
57309 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
57310 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
57311 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
57312
57313 #endif /* __LINUX_COMPILER_H */
57314 diff -urNp linux-3.0.7/include/linux/cpuset.h linux-3.0.7/include/linux/cpuset.h
57315 --- linux-3.0.7/include/linux/cpuset.h 2011-07-21 22:17:23.000000000 -0400
57316 +++ linux-3.0.7/include/linux/cpuset.h 2011-08-23 21:47:56.000000000 -0400
57317 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
57318 * nodemask.
57319 */
57320 smp_mb();
57321 - --ACCESS_ONCE(current->mems_allowed_change_disable);
57322 + --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
57323 }
57324
57325 static inline void set_mems_allowed(nodemask_t nodemask)
57326 diff -urNp linux-3.0.7/include/linux/crypto.h linux-3.0.7/include/linux/crypto.h
57327 --- linux-3.0.7/include/linux/crypto.h 2011-07-21 22:17:23.000000000 -0400
57328 +++ linux-3.0.7/include/linux/crypto.h 2011-08-23 21:47:56.000000000 -0400
57329 @@ -361,7 +361,7 @@ struct cipher_tfm {
57330 const u8 *key, unsigned int keylen);
57331 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57332 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57333 -};
57334 +} __no_const;
57335
57336 struct hash_tfm {
57337 int (*init)(struct hash_desc *desc);
57338 @@ -382,13 +382,13 @@ struct compress_tfm {
57339 int (*cot_decompress)(struct crypto_tfm *tfm,
57340 const u8 *src, unsigned int slen,
57341 u8 *dst, unsigned int *dlen);
57342 -};
57343 +} __no_const;
57344
57345 struct rng_tfm {
57346 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
57347 unsigned int dlen);
57348 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
57349 -};
57350 +} __no_const;
57351
57352 #define crt_ablkcipher crt_u.ablkcipher
57353 #define crt_aead crt_u.aead
57354 diff -urNp linux-3.0.7/include/linux/decompress/mm.h linux-3.0.7/include/linux/decompress/mm.h
57355 --- linux-3.0.7/include/linux/decompress/mm.h 2011-07-21 22:17:23.000000000 -0400
57356 +++ linux-3.0.7/include/linux/decompress/mm.h 2011-08-23 21:47:56.000000000 -0400
57357 @@ -77,7 +77,7 @@ static void free(void *where)
57358 * warnings when not needed (indeed large_malloc / large_free are not
57359 * needed by inflate */
57360
57361 -#define malloc(a) kmalloc(a, GFP_KERNEL)
57362 +#define malloc(a) kmalloc((a), GFP_KERNEL)
57363 #define free(a) kfree(a)
57364
57365 #define large_malloc(a) vmalloc(a)
57366 diff -urNp linux-3.0.7/include/linux/dma-mapping.h linux-3.0.7/include/linux/dma-mapping.h
57367 --- linux-3.0.7/include/linux/dma-mapping.h 2011-07-21 22:17:23.000000000 -0400
57368 +++ linux-3.0.7/include/linux/dma-mapping.h 2011-08-26 19:49:56.000000000 -0400
57369 @@ -50,7 +50,7 @@ struct dma_map_ops {
57370 int (*dma_supported)(struct device *dev, u64 mask);
57371 int (*set_dma_mask)(struct device *dev, u64 mask);
57372 int is_phys;
57373 -};
57374 +} __do_const;
57375
57376 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
57377
57378 diff -urNp linux-3.0.7/include/linux/efi.h linux-3.0.7/include/linux/efi.h
57379 --- linux-3.0.7/include/linux/efi.h 2011-07-21 22:17:23.000000000 -0400
57380 +++ linux-3.0.7/include/linux/efi.h 2011-08-23 21:47:56.000000000 -0400
57381 @@ -410,7 +410,7 @@ struct efivar_operations {
57382 efi_get_variable_t *get_variable;
57383 efi_get_next_variable_t *get_next_variable;
57384 efi_set_variable_t *set_variable;
57385 -};
57386 +} __no_const;
57387
57388 struct efivars {
57389 /*
57390 diff -urNp linux-3.0.7/include/linux/elf.h linux-3.0.7/include/linux/elf.h
57391 --- linux-3.0.7/include/linux/elf.h 2011-07-21 22:17:23.000000000 -0400
57392 +++ linux-3.0.7/include/linux/elf.h 2011-08-23 21:47:56.000000000 -0400
57393 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
57394 #define PT_GNU_EH_FRAME 0x6474e550
57395
57396 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
57397 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
57398 +
57399 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
57400 +
57401 +/* Constants for the e_flags field */
57402 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57403 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
57404 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
57405 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
57406 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57407 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57408
57409 /*
57410 * Extended Numbering
57411 @@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
57412 #define DT_DEBUG 21
57413 #define DT_TEXTREL 22
57414 #define DT_JMPREL 23
57415 +#define DT_FLAGS 30
57416 + #define DF_TEXTREL 0x00000004
57417 #define DT_ENCODING 32
57418 #define OLD_DT_LOOS 0x60000000
57419 #define DT_LOOS 0x6000000d
57420 @@ -252,6 +265,19 @@ typedef struct elf64_hdr {
57421 #define PF_W 0x2
57422 #define PF_X 0x1
57423
57424 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
57425 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
57426 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
57427 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
57428 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
57429 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
57430 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
57431 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
57432 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
57433 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
57434 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
57435 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
57436 +
57437 typedef struct elf32_phdr{
57438 Elf32_Word p_type;
57439 Elf32_Off p_offset;
57440 @@ -344,6 +370,8 @@ typedef struct elf64_shdr {
57441 #define EI_OSABI 7
57442 #define EI_PAD 8
57443
57444 +#define EI_PAX 14
57445 +
57446 #define ELFMAG0 0x7f /* EI_MAG */
57447 #define ELFMAG1 'E'
57448 #define ELFMAG2 'L'
57449 @@ -422,6 +450,7 @@ extern Elf32_Dyn _DYNAMIC [];
57450 #define elf_note elf32_note
57451 #define elf_addr_t Elf32_Off
57452 #define Elf_Half Elf32_Half
57453 +#define elf_dyn Elf32_Dyn
57454
57455 #else
57456
57457 @@ -432,6 +461,7 @@ extern Elf64_Dyn _DYNAMIC [];
57458 #define elf_note elf64_note
57459 #define elf_addr_t Elf64_Off
57460 #define Elf_Half Elf64_Half
57461 +#define elf_dyn Elf64_Dyn
57462
57463 #endif
57464
57465 diff -urNp linux-3.0.7/include/linux/firewire.h linux-3.0.7/include/linux/firewire.h
57466 --- linux-3.0.7/include/linux/firewire.h 2011-07-21 22:17:23.000000000 -0400
57467 +++ linux-3.0.7/include/linux/firewire.h 2011-08-23 21:47:56.000000000 -0400
57468 @@ -428,7 +428,7 @@ struct fw_iso_context {
57469 union {
57470 fw_iso_callback_t sc;
57471 fw_iso_mc_callback_t mc;
57472 - } callback;
57473 + } __no_const callback;
57474 void *callback_data;
57475 };
57476
57477 diff -urNp linux-3.0.7/include/linux/fscache-cache.h linux-3.0.7/include/linux/fscache-cache.h
57478 --- linux-3.0.7/include/linux/fscache-cache.h 2011-07-21 22:17:23.000000000 -0400
57479 +++ linux-3.0.7/include/linux/fscache-cache.h 2011-08-23 21:47:56.000000000 -0400
57480 @@ -102,7 +102,7 @@ struct fscache_operation {
57481 fscache_operation_release_t release;
57482 };
57483
57484 -extern atomic_t fscache_op_debug_id;
57485 +extern atomic_unchecked_t fscache_op_debug_id;
57486 extern void fscache_op_work_func(struct work_struct *work);
57487
57488 extern void fscache_enqueue_operation(struct fscache_operation *);
57489 @@ -122,7 +122,7 @@ static inline void fscache_operation_ini
57490 {
57491 INIT_WORK(&op->work, fscache_op_work_func);
57492 atomic_set(&op->usage, 1);
57493 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
57494 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
57495 op->processor = processor;
57496 op->release = release;
57497 INIT_LIST_HEAD(&op->pend_link);
57498 diff -urNp linux-3.0.7/include/linux/fs.h linux-3.0.7/include/linux/fs.h
57499 --- linux-3.0.7/include/linux/fs.h 2011-07-21 22:17:23.000000000 -0400
57500 +++ linux-3.0.7/include/linux/fs.h 2011-08-26 19:49:56.000000000 -0400
57501 @@ -109,6 +109,11 @@ struct inodes_stat_t {
57502 /* File was opened by fanotify and shouldn't generate fanotify events */
57503 #define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
57504
57505 +/* Hack for grsec so as not to require read permission simply to execute
57506 + * a binary
57507 + */
57508 +#define FMODE_GREXEC ((__force fmode_t)0x2000000)
57509 +
57510 /*
57511 * The below are the various read and write types that we support. Some of
57512 * them include behavioral modifiers that send information down to the
57513 @@ -1571,7 +1576,8 @@ struct file_operations {
57514 int (*setlease)(struct file *, long, struct file_lock **);
57515 long (*fallocate)(struct file *file, int mode, loff_t offset,
57516 loff_t len);
57517 -};
57518 +} __do_const;
57519 +typedef struct file_operations __no_const file_operations_no_const;
57520
57521 #define IPERM_FLAG_RCU 0x0001
57522
57523 diff -urNp linux-3.0.7/include/linux/fsnotify.h linux-3.0.7/include/linux/fsnotify.h
57524 --- linux-3.0.7/include/linux/fsnotify.h 2011-07-21 22:17:23.000000000 -0400
57525 +++ linux-3.0.7/include/linux/fsnotify.h 2011-08-24 18:10:29.000000000 -0400
57526 @@ -314,7 +314,7 @@ static inline void fsnotify_change(struc
57527 */
57528 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
57529 {
57530 - return kstrdup(name, GFP_KERNEL);
57531 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
57532 }
57533
57534 /*
57535 diff -urNp linux-3.0.7/include/linux/fs_struct.h linux-3.0.7/include/linux/fs_struct.h
57536 --- linux-3.0.7/include/linux/fs_struct.h 2011-07-21 22:17:23.000000000 -0400
57537 +++ linux-3.0.7/include/linux/fs_struct.h 2011-08-23 21:47:56.000000000 -0400
57538 @@ -6,7 +6,7 @@
57539 #include <linux/seqlock.h>
57540
57541 struct fs_struct {
57542 - int users;
57543 + atomic_t users;
57544 spinlock_t lock;
57545 seqcount_t seq;
57546 int umask;
57547 diff -urNp linux-3.0.7/include/linux/ftrace_event.h linux-3.0.7/include/linux/ftrace_event.h
57548 --- linux-3.0.7/include/linux/ftrace_event.h 2011-07-21 22:17:23.000000000 -0400
57549 +++ linux-3.0.7/include/linux/ftrace_event.h 2011-08-23 21:47:56.000000000 -0400
57550 @@ -96,7 +96,7 @@ struct trace_event_functions {
57551 trace_print_func raw;
57552 trace_print_func hex;
57553 trace_print_func binary;
57554 -};
57555 +} __no_const;
57556
57557 struct trace_event {
57558 struct hlist_node node;
57559 @@ -247,7 +247,7 @@ extern int trace_define_field(struct ftr
57560 extern int trace_add_event_call(struct ftrace_event_call *call);
57561 extern void trace_remove_event_call(struct ftrace_event_call *call);
57562
57563 -#define is_signed_type(type) (((type)(-1)) < 0)
57564 +#define is_signed_type(type) (((type)(-1)) < (type)1)
57565
57566 int trace_set_clr_event(const char *system, const char *event, int set);
57567
57568 diff -urNp linux-3.0.7/include/linux/genhd.h linux-3.0.7/include/linux/genhd.h
57569 --- linux-3.0.7/include/linux/genhd.h 2011-07-21 22:17:23.000000000 -0400
57570 +++ linux-3.0.7/include/linux/genhd.h 2011-08-23 21:47:56.000000000 -0400
57571 @@ -184,7 +184,7 @@ struct gendisk {
57572 struct kobject *slave_dir;
57573
57574 struct timer_rand_state *random;
57575 - atomic_t sync_io; /* RAID */
57576 + atomic_unchecked_t sync_io; /* RAID */
57577 struct disk_events *ev;
57578 #ifdef CONFIG_BLK_DEV_INTEGRITY
57579 struct blk_integrity *integrity;
57580 diff -urNp linux-3.0.7/include/linux/gracl.h linux-3.0.7/include/linux/gracl.h
57581 --- linux-3.0.7/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
57582 +++ linux-3.0.7/include/linux/gracl.h 2011-08-23 21:48:14.000000000 -0400
57583 @@ -0,0 +1,317 @@
57584 +#ifndef GR_ACL_H
57585 +#define GR_ACL_H
57586 +
57587 +#include <linux/grdefs.h>
57588 +#include <linux/resource.h>
57589 +#include <linux/capability.h>
57590 +#include <linux/dcache.h>
57591 +#include <asm/resource.h>
57592 +
57593 +/* Major status information */
57594 +
57595 +#define GR_VERSION "grsecurity 2.2.2"
57596 +#define GRSECURITY_VERSION 0x2202
57597 +
57598 +enum {
57599 + GR_SHUTDOWN = 0,
57600 + GR_ENABLE = 1,
57601 + GR_SPROLE = 2,
57602 + GR_RELOAD = 3,
57603 + GR_SEGVMOD = 4,
57604 + GR_STATUS = 5,
57605 + GR_UNSPROLE = 6,
57606 + GR_PASSSET = 7,
57607 + GR_SPROLEPAM = 8,
57608 +};
57609 +
57610 +/* Password setup definitions
57611 + * kernel/grhash.c */
57612 +enum {
57613 + GR_PW_LEN = 128,
57614 + GR_SALT_LEN = 16,
57615 + GR_SHA_LEN = 32,
57616 +};
57617 +
57618 +enum {
57619 + GR_SPROLE_LEN = 64,
57620 +};
57621 +
57622 +enum {
57623 + GR_NO_GLOB = 0,
57624 + GR_REG_GLOB,
57625 + GR_CREATE_GLOB
57626 +};
57627 +
57628 +#define GR_NLIMITS 32
57629 +
57630 +/* Begin Data Structures */
57631 +
57632 +struct sprole_pw {
57633 + unsigned char *rolename;
57634 + unsigned char salt[GR_SALT_LEN];
57635 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
57636 +};
57637 +
57638 +struct name_entry {
57639 + __u32 key;
57640 + ino_t inode;
57641 + dev_t device;
57642 + char *name;
57643 + __u16 len;
57644 + __u8 deleted;
57645 + struct name_entry *prev;
57646 + struct name_entry *next;
57647 +};
57648 +
57649 +struct inodev_entry {
57650 + struct name_entry *nentry;
57651 + struct inodev_entry *prev;
57652 + struct inodev_entry *next;
57653 +};
57654 +
57655 +struct acl_role_db {
57656 + struct acl_role_label **r_hash;
57657 + __u32 r_size;
57658 +};
57659 +
57660 +struct inodev_db {
57661 + struct inodev_entry **i_hash;
57662 + __u32 i_size;
57663 +};
57664 +
57665 +struct name_db {
57666 + struct name_entry **n_hash;
57667 + __u32 n_size;
57668 +};
57669 +
57670 +struct crash_uid {
57671 + uid_t uid;
57672 + unsigned long expires;
57673 +};
57674 +
57675 +struct gr_hash_struct {
57676 + void **table;
57677 + void **nametable;
57678 + void *first;
57679 + __u32 table_size;
57680 + __u32 used_size;
57681 + int type;
57682 +};
57683 +
57684 +/* Userspace Grsecurity ACL data structures */
57685 +
57686 +struct acl_subject_label {
57687 + char *filename;
57688 + ino_t inode;
57689 + dev_t device;
57690 + __u32 mode;
57691 + kernel_cap_t cap_mask;
57692 + kernel_cap_t cap_lower;
57693 + kernel_cap_t cap_invert_audit;
57694 +
57695 + struct rlimit res[GR_NLIMITS];
57696 + __u32 resmask;
57697 +
57698 + __u8 user_trans_type;
57699 + __u8 group_trans_type;
57700 + uid_t *user_transitions;
57701 + gid_t *group_transitions;
57702 + __u16 user_trans_num;
57703 + __u16 group_trans_num;
57704 +
57705 + __u32 sock_families[2];
57706 + __u32 ip_proto[8];
57707 + __u32 ip_type;
57708 + struct acl_ip_label **ips;
57709 + __u32 ip_num;
57710 + __u32 inaddr_any_override;
57711 +
57712 + __u32 crashes;
57713 + unsigned long expires;
57714 +
57715 + struct acl_subject_label *parent_subject;
57716 + struct gr_hash_struct *hash;
57717 + struct acl_subject_label *prev;
57718 + struct acl_subject_label *next;
57719 +
57720 + struct acl_object_label **obj_hash;
57721 + __u32 obj_hash_size;
57722 + __u16 pax_flags;
57723 +};
57724 +
57725 +struct role_allowed_ip {
57726 + __u32 addr;
57727 + __u32 netmask;
57728 +
57729 + struct role_allowed_ip *prev;
57730 + struct role_allowed_ip *next;
57731 +};
57732 +
57733 +struct role_transition {
57734 + char *rolename;
57735 +
57736 + struct role_transition *prev;
57737 + struct role_transition *next;
57738 +};
57739 +
57740 +struct acl_role_label {
57741 + char *rolename;
57742 + uid_t uidgid;
57743 + __u16 roletype;
57744 +
57745 + __u16 auth_attempts;
57746 + unsigned long expires;
57747 +
57748 + struct acl_subject_label *root_label;
57749 + struct gr_hash_struct *hash;
57750 +
57751 + struct acl_role_label *prev;
57752 + struct acl_role_label *next;
57753 +
57754 + struct role_transition *transitions;
57755 + struct role_allowed_ip *allowed_ips;
57756 + uid_t *domain_children;
57757 + __u16 domain_child_num;
57758 +
57759 + struct acl_subject_label **subj_hash;
57760 + __u32 subj_hash_size;
57761 +};
57762 +
57763 +struct user_acl_role_db {
57764 + struct acl_role_label **r_table;
57765 + __u32 num_pointers; /* Number of allocations to track */
57766 + __u32 num_roles; /* Number of roles */
57767 + __u32 num_domain_children; /* Number of domain children */
57768 + __u32 num_subjects; /* Number of subjects */
57769 + __u32 num_objects; /* Number of objects */
57770 +};
57771 +
57772 +struct acl_object_label {
57773 + char *filename;
57774 + ino_t inode;
57775 + dev_t device;
57776 + __u32 mode;
57777 +
57778 + struct acl_subject_label *nested;
57779 + struct acl_object_label *globbed;
57780 +
57781 + /* next two structures not used */
57782 +
57783 + struct acl_object_label *prev;
57784 + struct acl_object_label *next;
57785 +};
57786 +
57787 +struct acl_ip_label {
57788 + char *iface;
57789 + __u32 addr;
57790 + __u32 netmask;
57791 + __u16 low, high;
57792 + __u8 mode;
57793 + __u32 type;
57794 + __u32 proto[8];
57795 +
57796 + /* next two structures not used */
57797 +
57798 + struct acl_ip_label *prev;
57799 + struct acl_ip_label *next;
57800 +};
57801 +
57802 +struct gr_arg {
57803 + struct user_acl_role_db role_db;
57804 + unsigned char pw[GR_PW_LEN];
57805 + unsigned char salt[GR_SALT_LEN];
57806 + unsigned char sum[GR_SHA_LEN];
57807 + unsigned char sp_role[GR_SPROLE_LEN];
57808 + struct sprole_pw *sprole_pws;
57809 + dev_t segv_device;
57810 + ino_t segv_inode;
57811 + uid_t segv_uid;
57812 + __u16 num_sprole_pws;
57813 + __u16 mode;
57814 +};
57815 +
57816 +struct gr_arg_wrapper {
57817 + struct gr_arg *arg;
57818 + __u32 version;
57819 + __u32 size;
57820 +};
57821 +
57822 +struct subject_map {
57823 + struct acl_subject_label *user;
57824 + struct acl_subject_label *kernel;
57825 + struct subject_map *prev;
57826 + struct subject_map *next;
57827 +};
57828 +
57829 +struct acl_subj_map_db {
57830 + struct subject_map **s_hash;
57831 + __u32 s_size;
57832 +};
57833 +
57834 +/* End Data Structures Section */
57835 +
57836 +/* Hash functions generated by empirical testing by Brad Spengler
57837 + Makes good use of the low bits of the inode. Generally 0-1 times
57838 + in loop for successful match. 0-3 for unsuccessful match.
57839 + Shift/add algorithm with modulus of table size and an XOR*/
57840 +
57841 +static __inline__ unsigned int
57842 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
57843 +{
57844 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
57845 +}
57846 +
57847 + static __inline__ unsigned int
57848 +shash(const struct acl_subject_label *userp, const unsigned int sz)
57849 +{
57850 + return ((const unsigned long)userp % sz);
57851 +}
57852 +
57853 +static __inline__ unsigned int
57854 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
57855 +{
57856 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
57857 +}
57858 +
57859 +static __inline__ unsigned int
57860 +nhash(const char *name, const __u16 len, const unsigned int sz)
57861 +{
57862 + return full_name_hash((const unsigned char *)name, len) % sz;
57863 +}
57864 +
57865 +#define FOR_EACH_ROLE_START(role) \
57866 + role = role_list; \
57867 + while (role) {
57868 +
57869 +#define FOR_EACH_ROLE_END(role) \
57870 + role = role->prev; \
57871 + }
57872 +
57873 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
57874 + subj = NULL; \
57875 + iter = 0; \
57876 + while (iter < role->subj_hash_size) { \
57877 + if (subj == NULL) \
57878 + subj = role->subj_hash[iter]; \
57879 + if (subj == NULL) { \
57880 + iter++; \
57881 + continue; \
57882 + }
57883 +
57884 +#define FOR_EACH_SUBJECT_END(subj,iter) \
57885 + subj = subj->next; \
57886 + if (subj == NULL) \
57887 + iter++; \
57888 + }
57889 +
57890 +
57891 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
57892 + subj = role->hash->first; \
57893 + while (subj != NULL) {
57894 +
57895 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
57896 + subj = subj->next; \
57897 + }
57898 +
57899 +#endif
57900 +
57901 diff -urNp linux-3.0.7/include/linux/gralloc.h linux-3.0.7/include/linux/gralloc.h
57902 --- linux-3.0.7/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
57903 +++ linux-3.0.7/include/linux/gralloc.h 2011-08-23 21:48:14.000000000 -0400
57904 @@ -0,0 +1,9 @@
57905 +#ifndef __GRALLOC_H
57906 +#define __GRALLOC_H
57907 +
57908 +void acl_free_all(void);
57909 +int acl_alloc_stack_init(unsigned long size);
57910 +void *acl_alloc(unsigned long len);
57911 +void *acl_alloc_num(unsigned long num, unsigned long len);
57912 +
57913 +#endif
57914 diff -urNp linux-3.0.7/include/linux/grdefs.h linux-3.0.7/include/linux/grdefs.h
57915 --- linux-3.0.7/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
57916 +++ linux-3.0.7/include/linux/grdefs.h 2011-08-23 21:48:14.000000000 -0400
57917 @@ -0,0 +1,140 @@
57918 +#ifndef GRDEFS_H
57919 +#define GRDEFS_H
57920 +
57921 +/* Begin grsecurity status declarations */
57922 +
57923 +enum {
57924 + GR_READY = 0x01,
57925 + GR_STATUS_INIT = 0x00 // disabled state
57926 +};
57927 +
57928 +/* Begin ACL declarations */
57929 +
57930 +/* Role flags */
57931 +
57932 +enum {
57933 + GR_ROLE_USER = 0x0001,
57934 + GR_ROLE_GROUP = 0x0002,
57935 + GR_ROLE_DEFAULT = 0x0004,
57936 + GR_ROLE_SPECIAL = 0x0008,
57937 + GR_ROLE_AUTH = 0x0010,
57938 + GR_ROLE_NOPW = 0x0020,
57939 + GR_ROLE_GOD = 0x0040,
57940 + GR_ROLE_LEARN = 0x0080,
57941 + GR_ROLE_TPE = 0x0100,
57942 + GR_ROLE_DOMAIN = 0x0200,
57943 + GR_ROLE_PAM = 0x0400,
57944 + GR_ROLE_PERSIST = 0x0800
57945 +};
57946 +
57947 +/* ACL Subject and Object mode flags */
57948 +enum {
57949 + GR_DELETED = 0x80000000
57950 +};
57951 +
57952 +/* ACL Object-only mode flags */
57953 +enum {
57954 + GR_READ = 0x00000001,
57955 + GR_APPEND = 0x00000002,
57956 + GR_WRITE = 0x00000004,
57957 + GR_EXEC = 0x00000008,
57958 + GR_FIND = 0x00000010,
57959 + GR_INHERIT = 0x00000020,
57960 + GR_SETID = 0x00000040,
57961 + GR_CREATE = 0x00000080,
57962 + GR_DELETE = 0x00000100,
57963 + GR_LINK = 0x00000200,
57964 + GR_AUDIT_READ = 0x00000400,
57965 + GR_AUDIT_APPEND = 0x00000800,
57966 + GR_AUDIT_WRITE = 0x00001000,
57967 + GR_AUDIT_EXEC = 0x00002000,
57968 + GR_AUDIT_FIND = 0x00004000,
57969 + GR_AUDIT_INHERIT= 0x00008000,
57970 + GR_AUDIT_SETID = 0x00010000,
57971 + GR_AUDIT_CREATE = 0x00020000,
57972 + GR_AUDIT_DELETE = 0x00040000,
57973 + GR_AUDIT_LINK = 0x00080000,
57974 + GR_PTRACERD = 0x00100000,
57975 + GR_NOPTRACE = 0x00200000,
57976 + GR_SUPPRESS = 0x00400000,
57977 + GR_NOLEARN = 0x00800000,
57978 + GR_INIT_TRANSFER= 0x01000000
57979 +};
57980 +
57981 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
57982 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
57983 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
57984 +
57985 +/* ACL subject-only mode flags */
57986 +enum {
57987 + GR_KILL = 0x00000001,
57988 + GR_VIEW = 0x00000002,
57989 + GR_PROTECTED = 0x00000004,
57990 + GR_LEARN = 0x00000008,
57991 + GR_OVERRIDE = 0x00000010,
57992 + /* just a placeholder, this mode is only used in userspace */
57993 + GR_DUMMY = 0x00000020,
57994 + GR_PROTSHM = 0x00000040,
57995 + GR_KILLPROC = 0x00000080,
57996 + GR_KILLIPPROC = 0x00000100,
57997 + /* just a placeholder, this mode is only used in userspace */
57998 + GR_NOTROJAN = 0x00000200,
57999 + GR_PROTPROCFD = 0x00000400,
58000 + GR_PROCACCT = 0x00000800,
58001 + GR_RELAXPTRACE = 0x00001000,
58002 + GR_NESTED = 0x00002000,
58003 + GR_INHERITLEARN = 0x00004000,
58004 + GR_PROCFIND = 0x00008000,
58005 + GR_POVERRIDE = 0x00010000,
58006 + GR_KERNELAUTH = 0x00020000,
58007 + GR_ATSECURE = 0x00040000,
58008 + GR_SHMEXEC = 0x00080000
58009 +};
58010 +
58011 +enum {
58012 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58013 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58014 + GR_PAX_ENABLE_MPROTECT = 0x0004,
58015 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
58016 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58017 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58018 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58019 + GR_PAX_DISABLE_MPROTECT = 0x0400,
58020 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
58021 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58022 +};
58023 +
58024 +enum {
58025 + GR_ID_USER = 0x01,
58026 + GR_ID_GROUP = 0x02,
58027 +};
58028 +
58029 +enum {
58030 + GR_ID_ALLOW = 0x01,
58031 + GR_ID_DENY = 0x02,
58032 +};
58033 +
58034 +#define GR_CRASH_RES 31
58035 +#define GR_UIDTABLE_MAX 500
58036 +
58037 +/* begin resource learning section */
58038 +enum {
58039 + GR_RLIM_CPU_BUMP = 60,
58040 + GR_RLIM_FSIZE_BUMP = 50000,
58041 + GR_RLIM_DATA_BUMP = 10000,
58042 + GR_RLIM_STACK_BUMP = 1000,
58043 + GR_RLIM_CORE_BUMP = 10000,
58044 + GR_RLIM_RSS_BUMP = 500000,
58045 + GR_RLIM_NPROC_BUMP = 1,
58046 + GR_RLIM_NOFILE_BUMP = 5,
58047 + GR_RLIM_MEMLOCK_BUMP = 50000,
58048 + GR_RLIM_AS_BUMP = 500000,
58049 + GR_RLIM_LOCKS_BUMP = 2,
58050 + GR_RLIM_SIGPENDING_BUMP = 5,
58051 + GR_RLIM_MSGQUEUE_BUMP = 10000,
58052 + GR_RLIM_NICE_BUMP = 1,
58053 + GR_RLIM_RTPRIO_BUMP = 1,
58054 + GR_RLIM_RTTIME_BUMP = 1000000
58055 +};
58056 +
58057 +#endif
58058 diff -urNp linux-3.0.7/include/linux/grinternal.h linux-3.0.7/include/linux/grinternal.h
58059 --- linux-3.0.7/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
58060 +++ linux-3.0.7/include/linux/grinternal.h 2011-10-17 00:25:19.000000000 -0400
58061 @@ -0,0 +1,219 @@
58062 +#ifndef __GRINTERNAL_H
58063 +#define __GRINTERNAL_H
58064 +
58065 +#ifdef CONFIG_GRKERNSEC
58066 +
58067 +#include <linux/fs.h>
58068 +#include <linux/mnt_namespace.h>
58069 +#include <linux/nsproxy.h>
58070 +#include <linux/gracl.h>
58071 +#include <linux/grdefs.h>
58072 +#include <linux/grmsg.h>
58073 +
58074 +void gr_add_learn_entry(const char *fmt, ...)
58075 + __attribute__ ((format (printf, 1, 2)));
58076 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
58077 + const struct vfsmount *mnt);
58078 +__u32 gr_check_create(const struct dentry *new_dentry,
58079 + const struct dentry *parent,
58080 + const struct vfsmount *mnt, const __u32 mode);
58081 +int gr_check_protected_task(const struct task_struct *task);
58082 +__u32 to_gr_audit(const __u32 reqmode);
58083 +int gr_set_acls(const int type);
58084 +int gr_apply_subject_to_task(struct task_struct *task);
58085 +int gr_acl_is_enabled(void);
58086 +char gr_roletype_to_char(void);
58087 +
58088 +void gr_handle_alertkill(struct task_struct *task);
58089 +char *gr_to_filename(const struct dentry *dentry,
58090 + const struct vfsmount *mnt);
58091 +char *gr_to_filename1(const struct dentry *dentry,
58092 + const struct vfsmount *mnt);
58093 +char *gr_to_filename2(const struct dentry *dentry,
58094 + const struct vfsmount *mnt);
58095 +char *gr_to_filename3(const struct dentry *dentry,
58096 + const struct vfsmount *mnt);
58097 +
58098 +extern int grsec_enable_harden_ptrace;
58099 +extern int grsec_enable_link;
58100 +extern int grsec_enable_fifo;
58101 +extern int grsec_enable_execve;
58102 +extern int grsec_enable_shm;
58103 +extern int grsec_enable_execlog;
58104 +extern int grsec_enable_signal;
58105 +extern int grsec_enable_audit_ptrace;
58106 +extern int grsec_enable_forkfail;
58107 +extern int grsec_enable_time;
58108 +extern int grsec_enable_rofs;
58109 +extern int grsec_enable_chroot_shmat;
58110 +extern int grsec_enable_chroot_mount;
58111 +extern int grsec_enable_chroot_double;
58112 +extern int grsec_enable_chroot_pivot;
58113 +extern int grsec_enable_chroot_chdir;
58114 +extern int grsec_enable_chroot_chmod;
58115 +extern int grsec_enable_chroot_mknod;
58116 +extern int grsec_enable_chroot_fchdir;
58117 +extern int grsec_enable_chroot_nice;
58118 +extern int grsec_enable_chroot_execlog;
58119 +extern int grsec_enable_chroot_caps;
58120 +extern int grsec_enable_chroot_sysctl;
58121 +extern int grsec_enable_chroot_unix;
58122 +extern int grsec_enable_tpe;
58123 +extern int grsec_tpe_gid;
58124 +extern int grsec_enable_tpe_all;
58125 +extern int grsec_enable_tpe_invert;
58126 +extern int grsec_enable_socket_all;
58127 +extern int grsec_socket_all_gid;
58128 +extern int grsec_enable_socket_client;
58129 +extern int grsec_socket_client_gid;
58130 +extern int grsec_enable_socket_server;
58131 +extern int grsec_socket_server_gid;
58132 +extern int grsec_audit_gid;
58133 +extern int grsec_enable_group;
58134 +extern int grsec_enable_audit_textrel;
58135 +extern int grsec_enable_log_rwxmaps;
58136 +extern int grsec_enable_mount;
58137 +extern int grsec_enable_chdir;
58138 +extern int grsec_resource_logging;
58139 +extern int grsec_enable_blackhole;
58140 +extern int grsec_lastack_retries;
58141 +extern int grsec_enable_brute;
58142 +extern int grsec_lock;
58143 +
58144 +extern spinlock_t grsec_alert_lock;
58145 +extern unsigned long grsec_alert_wtime;
58146 +extern unsigned long grsec_alert_fyet;
58147 +
58148 +extern spinlock_t grsec_audit_lock;
58149 +
58150 +extern rwlock_t grsec_exec_file_lock;
58151 +
58152 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
58153 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
58154 + (tsk)->exec_file->f_vfsmnt) : "/")
58155 +
58156 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
58157 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
58158 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58159 +
58160 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
58161 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
58162 + (tsk)->exec_file->f_vfsmnt) : "/")
58163 +
58164 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
58165 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
58166 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58167 +
58168 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
58169 +
58170 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
58171 +
58172 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
58173 + (task)->pid, (cred)->uid, \
58174 + (cred)->euid, (cred)->gid, (cred)->egid, \
58175 + gr_parent_task_fullpath(task), \
58176 + (task)->real_parent->comm, (task)->real_parent->pid, \
58177 + (pcred)->uid, (pcred)->euid, \
58178 + (pcred)->gid, (pcred)->egid
58179 +
58180 +#define GR_CHROOT_CAPS {{ \
58181 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
58182 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
58183 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
58184 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
58185 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
58186 + CAP_TO_MASK(CAP_IPC_OWNER) , CAP_TO_MASK(CAP_SYSLOG) }}
58187 +
58188 +#define security_learn(normal_msg,args...) \
58189 +({ \
58190 + read_lock(&grsec_exec_file_lock); \
58191 + gr_add_learn_entry(normal_msg "\n", ## args); \
58192 + read_unlock(&grsec_exec_file_lock); \
58193 +})
58194 +
58195 +enum {
58196 + GR_DO_AUDIT,
58197 + GR_DONT_AUDIT,
58198 + /* used for non-audit messages that we shouldn't kill the task on */
58199 + GR_DONT_AUDIT_GOOD
58200 +};
58201 +
58202 +enum {
58203 + GR_TTYSNIFF,
58204 + GR_RBAC,
58205 + GR_RBAC_STR,
58206 + GR_STR_RBAC,
58207 + GR_RBAC_MODE2,
58208 + GR_RBAC_MODE3,
58209 + GR_FILENAME,
58210 + GR_SYSCTL_HIDDEN,
58211 + GR_NOARGS,
58212 + GR_ONE_INT,
58213 + GR_ONE_INT_TWO_STR,
58214 + GR_ONE_STR,
58215 + GR_STR_INT,
58216 + GR_TWO_STR_INT,
58217 + GR_TWO_INT,
58218 + GR_TWO_U64,
58219 + GR_THREE_INT,
58220 + GR_FIVE_INT_TWO_STR,
58221 + GR_TWO_STR,
58222 + GR_THREE_STR,
58223 + GR_FOUR_STR,
58224 + GR_STR_FILENAME,
58225 + GR_FILENAME_STR,
58226 + GR_FILENAME_TWO_INT,
58227 + GR_FILENAME_TWO_INT_STR,
58228 + GR_TEXTREL,
58229 + GR_PTRACE,
58230 + GR_RESOURCE,
58231 + GR_CAP,
58232 + GR_SIG,
58233 + GR_SIG2,
58234 + GR_CRASH1,
58235 + GR_CRASH2,
58236 + GR_PSACCT,
58237 + GR_RWXMAP
58238 +};
58239 +
58240 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
58241 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
58242 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
58243 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
58244 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
58245 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
58246 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
58247 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
58248 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
58249 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
58250 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
58251 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
58252 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
58253 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
58254 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
58255 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
58256 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
58257 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
58258 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
58259 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
58260 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
58261 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
58262 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
58263 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
58264 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
58265 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
58266 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
58267 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
58268 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
58269 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
58270 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
58271 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
58272 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
58273 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
58274 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
58275 +
58276 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
58277 +
58278 +#endif
58279 +
58280 +#endif
58281 diff -urNp linux-3.0.7/include/linux/grmsg.h linux-3.0.7/include/linux/grmsg.h
58282 --- linux-3.0.7/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
58283 +++ linux-3.0.7/include/linux/grmsg.h 2011-09-14 09:16:54.000000000 -0400
58284 @@ -0,0 +1,108 @@
58285 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
58286 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
58287 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
58288 +#define GR_STOPMOD_MSG "denied modification of module state by "
58289 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
58290 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
58291 +#define GR_IOPERM_MSG "denied use of ioperm() by "
58292 +#define GR_IOPL_MSG "denied use of iopl() by "
58293 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
58294 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
58295 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
58296 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
58297 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
58298 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
58299 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
58300 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
58301 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
58302 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
58303 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
58304 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
58305 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
58306 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
58307 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
58308 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
58309 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
58310 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
58311 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
58312 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
58313 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
58314 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
58315 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
58316 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
58317 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
58318 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
58319 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
58320 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
58321 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
58322 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
58323 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
58324 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
58325 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
58326 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
58327 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
58328 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
58329 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
58330 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
58331 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
58332 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
58333 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
58334 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
58335 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
58336 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
58337 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
58338 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
58339 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
58340 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
58341 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
58342 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
58343 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
58344 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
58345 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
58346 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
58347 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
58348 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
58349 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
58350 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
58351 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
58352 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
58353 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
58354 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
58355 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
58356 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
58357 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
58358 +#define GR_NICE_CHROOT_MSG "denied priority change by "
58359 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
58360 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
58361 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
58362 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
58363 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
58364 +#define GR_TIME_MSG "time set by "
58365 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
58366 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
58367 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
58368 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
58369 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
58370 +#define GR_BIND_MSG "denied bind() by "
58371 +#define GR_CONNECT_MSG "denied connect() by "
58372 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
58373 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
58374 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
58375 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
58376 +#define GR_CAP_ACL_MSG "use of %s denied for "
58377 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
58378 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
58379 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
58380 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
58381 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
58382 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
58383 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
58384 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
58385 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
58386 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
58387 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
58388 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
58389 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
58390 +#define GR_VM86_MSG "denied use of vm86 by "
58391 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
58392 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
58393 diff -urNp linux-3.0.7/include/linux/grsecurity.h linux-3.0.7/include/linux/grsecurity.h
58394 --- linux-3.0.7/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
58395 +++ linux-3.0.7/include/linux/grsecurity.h 2011-10-17 06:35:30.000000000 -0400
58396 @@ -0,0 +1,228 @@
58397 +#ifndef GR_SECURITY_H
58398 +#define GR_SECURITY_H
58399 +#include <linux/fs.h>
58400 +#include <linux/fs_struct.h>
58401 +#include <linux/binfmts.h>
58402 +#include <linux/gracl.h>
58403 +
58404 +/* notify of brain-dead configs */
58405 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58406 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
58407 +#endif
58408 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
58409 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
58410 +#endif
58411 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58412 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58413 +#endif
58414 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58415 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58416 +#endif
58417 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
58418 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
58419 +#endif
58420 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
58421 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
58422 +#endif
58423 +
58424 +#include <linux/compat.h>
58425 +
58426 +struct user_arg_ptr {
58427 +#ifdef CONFIG_COMPAT
58428 + bool is_compat;
58429 +#endif
58430 + union {
58431 + const char __user *const __user *native;
58432 +#ifdef CONFIG_COMPAT
58433 + compat_uptr_t __user *compat;
58434 +#endif
58435 + } ptr;
58436 +};
58437 +
58438 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
58439 +void gr_handle_brute_check(void);
58440 +void gr_handle_kernel_exploit(void);
58441 +int gr_process_user_ban(void);
58442 +
58443 +char gr_roletype_to_char(void);
58444 +
58445 +int gr_acl_enable_at_secure(void);
58446 +
58447 +int gr_check_user_change(int real, int effective, int fs);
58448 +int gr_check_group_change(int real, int effective, int fs);
58449 +
58450 +void gr_del_task_from_ip_table(struct task_struct *p);
58451 +
58452 +int gr_pid_is_chrooted(struct task_struct *p);
58453 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
58454 +int gr_handle_chroot_nice(void);
58455 +int gr_handle_chroot_sysctl(const int op);
58456 +int gr_handle_chroot_setpriority(struct task_struct *p,
58457 + const int niceval);
58458 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
58459 +int gr_handle_chroot_chroot(const struct dentry *dentry,
58460 + const struct vfsmount *mnt);
58461 +void gr_handle_chroot_chdir(struct path *path);
58462 +int gr_handle_chroot_chmod(const struct dentry *dentry,
58463 + const struct vfsmount *mnt, const int mode);
58464 +int gr_handle_chroot_mknod(const struct dentry *dentry,
58465 + const struct vfsmount *mnt, const int mode);
58466 +int gr_handle_chroot_mount(const struct dentry *dentry,
58467 + const struct vfsmount *mnt,
58468 + const char *dev_name);
58469 +int gr_handle_chroot_pivot(void);
58470 +int gr_handle_chroot_unix(const pid_t pid);
58471 +
58472 +int gr_handle_rawio(const struct inode *inode);
58473 +
58474 +void gr_handle_ioperm(void);
58475 +void gr_handle_iopl(void);
58476 +
58477 +int gr_tpe_allow(const struct file *file);
58478 +
58479 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
58480 +void gr_clear_chroot_entries(struct task_struct *task);
58481 +
58482 +void gr_log_forkfail(const int retval);
58483 +void gr_log_timechange(void);
58484 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
58485 +void gr_log_chdir(const struct dentry *dentry,
58486 + const struct vfsmount *mnt);
58487 +void gr_log_chroot_exec(const struct dentry *dentry,
58488 + const struct vfsmount *mnt);
58489 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
58490 +void gr_log_remount(const char *devname, const int retval);
58491 +void gr_log_unmount(const char *devname, const int retval);
58492 +void gr_log_mount(const char *from, const char *to, const int retval);
58493 +void gr_log_textrel(struct vm_area_struct *vma);
58494 +void gr_log_rwxmmap(struct file *file);
58495 +void gr_log_rwxmprotect(struct file *file);
58496 +
58497 +int gr_handle_follow_link(const struct inode *parent,
58498 + const struct inode *inode,
58499 + const struct dentry *dentry,
58500 + const struct vfsmount *mnt);
58501 +int gr_handle_fifo(const struct dentry *dentry,
58502 + const struct vfsmount *mnt,
58503 + const struct dentry *dir, const int flag,
58504 + const int acc_mode);
58505 +int gr_handle_hardlink(const struct dentry *dentry,
58506 + const struct vfsmount *mnt,
58507 + struct inode *inode,
58508 + const int mode, const char *to);
58509 +
58510 +int gr_is_capable(const int cap);
58511 +int gr_is_capable_nolog(const int cap);
58512 +void gr_learn_resource(const struct task_struct *task, const int limit,
58513 + const unsigned long wanted, const int gt);
58514 +void gr_copy_label(struct task_struct *tsk);
58515 +void gr_handle_crash(struct task_struct *task, const int sig);
58516 +int gr_handle_signal(const struct task_struct *p, const int sig);
58517 +int gr_check_crash_uid(const uid_t uid);
58518 +int gr_check_protected_task(const struct task_struct *task);
58519 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
58520 +int gr_acl_handle_mmap(const struct file *file,
58521 + const unsigned long prot);
58522 +int gr_acl_handle_mprotect(const struct file *file,
58523 + const unsigned long prot);
58524 +int gr_check_hidden_task(const struct task_struct *tsk);
58525 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
58526 + const struct vfsmount *mnt);
58527 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
58528 + const struct vfsmount *mnt);
58529 +__u32 gr_acl_handle_access(const struct dentry *dentry,
58530 + const struct vfsmount *mnt, const int fmode);
58531 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
58532 + const struct vfsmount *mnt, mode_t mode);
58533 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
58534 + const struct vfsmount *mnt, mode_t mode);
58535 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
58536 + const struct vfsmount *mnt);
58537 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
58538 + const struct vfsmount *mnt);
58539 +int gr_handle_ptrace(struct task_struct *task, const long request);
58540 +int gr_handle_proc_ptrace(struct task_struct *task);
58541 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
58542 + const struct vfsmount *mnt);
58543 +int gr_check_crash_exec(const struct file *filp);
58544 +int gr_acl_is_enabled(void);
58545 +void gr_set_kernel_label(struct task_struct *task);
58546 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
58547 + const gid_t gid);
58548 +int gr_set_proc_label(const struct dentry *dentry,
58549 + const struct vfsmount *mnt,
58550 + const int unsafe_share);
58551 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
58552 + const struct vfsmount *mnt);
58553 +__u32 gr_acl_handle_open(const struct dentry *dentry,
58554 + const struct vfsmount *mnt, const int fmode);
58555 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
58556 + const struct dentry *p_dentry,
58557 + const struct vfsmount *p_mnt, const int fmode,
58558 + const int imode);
58559 +void gr_handle_create(const struct dentry *dentry,
58560 + const struct vfsmount *mnt);
58561 +void gr_handle_proc_create(const struct dentry *dentry,
58562 + const struct inode *inode);
58563 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
58564 + const struct dentry *parent_dentry,
58565 + const struct vfsmount *parent_mnt,
58566 + const int mode);
58567 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
58568 + const struct dentry *parent_dentry,
58569 + const struct vfsmount *parent_mnt);
58570 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
58571 + const struct vfsmount *mnt);
58572 +void gr_handle_delete(const ino_t ino, const dev_t dev);
58573 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
58574 + const struct vfsmount *mnt);
58575 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
58576 + const struct dentry *parent_dentry,
58577 + const struct vfsmount *parent_mnt,
58578 + const char *from);
58579 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
58580 + const struct dentry *parent_dentry,
58581 + const struct vfsmount *parent_mnt,
58582 + const struct dentry *old_dentry,
58583 + const struct vfsmount *old_mnt, const char *to);
58584 +int gr_acl_handle_rename(struct dentry *new_dentry,
58585 + struct dentry *parent_dentry,
58586 + const struct vfsmount *parent_mnt,
58587 + struct dentry *old_dentry,
58588 + struct inode *old_parent_inode,
58589 + struct vfsmount *old_mnt, const char *newname);
58590 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
58591 + struct dentry *old_dentry,
58592 + struct dentry *new_dentry,
58593 + struct vfsmount *mnt, const __u8 replace);
58594 +__u32 gr_check_link(const struct dentry *new_dentry,
58595 + const struct dentry *parent_dentry,
58596 + const struct vfsmount *parent_mnt,
58597 + const struct dentry *old_dentry,
58598 + const struct vfsmount *old_mnt);
58599 +int gr_acl_handle_filldir(const struct file *file, const char *name,
58600 + const unsigned int namelen, const ino_t ino);
58601 +
58602 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
58603 + const struct vfsmount *mnt);
58604 +void gr_acl_handle_exit(void);
58605 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
58606 +int gr_acl_handle_procpidmem(const struct task_struct *task);
58607 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
58608 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
58609 +void gr_audit_ptrace(struct task_struct *task);
58610 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
58611 +
58612 +#ifdef CONFIG_GRKERNSEC
58613 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
58614 +void gr_handle_vm86(void);
58615 +void gr_handle_mem_readwrite(u64 from, u64 to);
58616 +
58617 +extern int grsec_enable_dmesg;
58618 +extern int grsec_disable_privio;
58619 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
58620 +extern int grsec_enable_chroot_findtask;
58621 +#endif
58622 +#endif
58623 +
58624 +#endif
58625 diff -urNp linux-3.0.7/include/linux/grsock.h linux-3.0.7/include/linux/grsock.h
58626 --- linux-3.0.7/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
58627 +++ linux-3.0.7/include/linux/grsock.h 2011-08-23 21:48:14.000000000 -0400
58628 @@ -0,0 +1,19 @@
58629 +#ifndef __GRSOCK_H
58630 +#define __GRSOCK_H
58631 +
58632 +extern void gr_attach_curr_ip(const struct sock *sk);
58633 +extern int gr_handle_sock_all(const int family, const int type,
58634 + const int protocol);
58635 +extern int gr_handle_sock_server(const struct sockaddr *sck);
58636 +extern int gr_handle_sock_server_other(const struct sock *sck);
58637 +extern int gr_handle_sock_client(const struct sockaddr *sck);
58638 +extern int gr_search_connect(struct socket * sock,
58639 + struct sockaddr_in * addr);
58640 +extern int gr_search_bind(struct socket * sock,
58641 + struct sockaddr_in * addr);
58642 +extern int gr_search_listen(struct socket * sock);
58643 +extern int gr_search_accept(struct socket * sock);
58644 +extern int gr_search_socket(const int domain, const int type,
58645 + const int protocol);
58646 +
58647 +#endif
58648 diff -urNp linux-3.0.7/include/linux/hid.h linux-3.0.7/include/linux/hid.h
58649 --- linux-3.0.7/include/linux/hid.h 2011-07-21 22:17:23.000000000 -0400
58650 +++ linux-3.0.7/include/linux/hid.h 2011-08-23 21:47:56.000000000 -0400
58651 @@ -675,7 +675,7 @@ struct hid_ll_driver {
58652 unsigned int code, int value);
58653
58654 int (*parse)(struct hid_device *hdev);
58655 -};
58656 +} __no_const;
58657
58658 #define PM_HINT_FULLON 1<<5
58659 #define PM_HINT_NORMAL 1<<1
58660 diff -urNp linux-3.0.7/include/linux/highmem.h linux-3.0.7/include/linux/highmem.h
58661 --- linux-3.0.7/include/linux/highmem.h 2011-07-21 22:17:23.000000000 -0400
58662 +++ linux-3.0.7/include/linux/highmem.h 2011-08-23 21:47:56.000000000 -0400
58663 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct
58664 kunmap_atomic(kaddr, KM_USER0);
58665 }
58666
58667 +static inline void sanitize_highpage(struct page *page)
58668 +{
58669 + void *kaddr;
58670 + unsigned long flags;
58671 +
58672 + local_irq_save(flags);
58673 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
58674 + clear_page(kaddr);
58675 + kunmap_atomic(kaddr, KM_CLEARPAGE);
58676 + local_irq_restore(flags);
58677 +}
58678 +
58679 static inline void zero_user_segments(struct page *page,
58680 unsigned start1, unsigned end1,
58681 unsigned start2, unsigned end2)
58682 diff -urNp linux-3.0.7/include/linux/i2c.h linux-3.0.7/include/linux/i2c.h
58683 --- linux-3.0.7/include/linux/i2c.h 2011-07-21 22:17:23.000000000 -0400
58684 +++ linux-3.0.7/include/linux/i2c.h 2011-08-23 21:47:56.000000000 -0400
58685 @@ -346,6 +346,7 @@ struct i2c_algorithm {
58686 /* To determine what the adapter supports */
58687 u32 (*functionality) (struct i2c_adapter *);
58688 };
58689 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
58690
58691 /*
58692 * i2c_adapter is the structure used to identify a physical i2c bus along
58693 diff -urNp linux-3.0.7/include/linux/i2o.h linux-3.0.7/include/linux/i2o.h
58694 --- linux-3.0.7/include/linux/i2o.h 2011-07-21 22:17:23.000000000 -0400
58695 +++ linux-3.0.7/include/linux/i2o.h 2011-08-23 21:47:56.000000000 -0400
58696 @@ -564,7 +564,7 @@ struct i2o_controller {
58697 struct i2o_device *exec; /* Executive */
58698 #if BITS_PER_LONG == 64
58699 spinlock_t context_list_lock; /* lock for context_list */
58700 - atomic_t context_list_counter; /* needed for unique contexts */
58701 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
58702 struct list_head context_list; /* list of context id's
58703 and pointers */
58704 #endif
58705 diff -urNp linux-3.0.7/include/linux/init.h linux-3.0.7/include/linux/init.h
58706 --- linux-3.0.7/include/linux/init.h 2011-07-21 22:17:23.000000000 -0400
58707 +++ linux-3.0.7/include/linux/init.h 2011-08-23 21:47:56.000000000 -0400
58708 @@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
58709
58710 /* Each module must use one module_init(). */
58711 #define module_init(initfn) \
58712 - static inline initcall_t __inittest(void) \
58713 + static inline __used initcall_t __inittest(void) \
58714 { return initfn; } \
58715 int init_module(void) __attribute__((alias(#initfn)));
58716
58717 /* This is only required if you want to be unloadable. */
58718 #define module_exit(exitfn) \
58719 - static inline exitcall_t __exittest(void) \
58720 + static inline __used exitcall_t __exittest(void) \
58721 { return exitfn; } \
58722 void cleanup_module(void) __attribute__((alias(#exitfn)));
58723
58724 diff -urNp linux-3.0.7/include/linux/init_task.h linux-3.0.7/include/linux/init_task.h
58725 --- linux-3.0.7/include/linux/init_task.h 2011-07-21 22:17:23.000000000 -0400
58726 +++ linux-3.0.7/include/linux/init_task.h 2011-08-23 21:47:56.000000000 -0400
58727 @@ -126,6 +126,12 @@ extern struct cred init_cred;
58728 # define INIT_PERF_EVENTS(tsk)
58729 #endif
58730
58731 +#ifdef CONFIG_X86
58732 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
58733 +#else
58734 +#define INIT_TASK_THREAD_INFO
58735 +#endif
58736 +
58737 /*
58738 * INIT_TASK is used to set up the first task table, touch at
58739 * your own risk!. Base=0, limit=0x1fffff (=2MB)
58740 @@ -164,6 +170,7 @@ extern struct cred init_cred;
58741 RCU_INIT_POINTER(.cred, &init_cred), \
58742 .comm = "swapper", \
58743 .thread = INIT_THREAD, \
58744 + INIT_TASK_THREAD_INFO \
58745 .fs = &init_fs, \
58746 .files = &init_files, \
58747 .signal = &init_signals, \
58748 diff -urNp linux-3.0.7/include/linux/intel-iommu.h linux-3.0.7/include/linux/intel-iommu.h
58749 --- linux-3.0.7/include/linux/intel-iommu.h 2011-07-21 22:17:23.000000000 -0400
58750 +++ linux-3.0.7/include/linux/intel-iommu.h 2011-08-23 21:47:56.000000000 -0400
58751 @@ -296,7 +296,7 @@ struct iommu_flush {
58752 u8 fm, u64 type);
58753 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
58754 unsigned int size_order, u64 type);
58755 -};
58756 +} __no_const;
58757
58758 enum {
58759 SR_DMAR_FECTL_REG,
58760 diff -urNp linux-3.0.7/include/linux/interrupt.h linux-3.0.7/include/linux/interrupt.h
58761 --- linux-3.0.7/include/linux/interrupt.h 2011-07-21 22:17:23.000000000 -0400
58762 +++ linux-3.0.7/include/linux/interrupt.h 2011-08-23 21:47:56.000000000 -0400
58763 @@ -422,7 +422,7 @@ enum
58764 /* map softirq index to softirq name. update 'softirq_to_name' in
58765 * kernel/softirq.c when adding a new softirq.
58766 */
58767 -extern char *softirq_to_name[NR_SOFTIRQS];
58768 +extern const char * const softirq_to_name[NR_SOFTIRQS];
58769
58770 /* softirq mask and active fields moved to irq_cpustat_t in
58771 * asm/hardirq.h to get better cache usage. KAO
58772 @@ -430,12 +430,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
58773
58774 struct softirq_action
58775 {
58776 - void (*action)(struct softirq_action *);
58777 + void (*action)(void);
58778 };
58779
58780 asmlinkage void do_softirq(void);
58781 asmlinkage void __do_softirq(void);
58782 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
58783 +extern void open_softirq(int nr, void (*action)(void));
58784 extern void softirq_init(void);
58785 static inline void __raise_softirq_irqoff(unsigned int nr)
58786 {
58787 diff -urNp linux-3.0.7/include/linux/kallsyms.h linux-3.0.7/include/linux/kallsyms.h
58788 --- linux-3.0.7/include/linux/kallsyms.h 2011-07-21 22:17:23.000000000 -0400
58789 +++ linux-3.0.7/include/linux/kallsyms.h 2011-08-23 21:48:14.000000000 -0400
58790 @@ -15,7 +15,8 @@
58791
58792 struct module;
58793
58794 -#ifdef CONFIG_KALLSYMS
58795 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
58796 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
58797 /* Lookup the address for a symbol. Returns 0 if not found. */
58798 unsigned long kallsyms_lookup_name(const char *name);
58799
58800 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
58801 /* Stupid that this does nothing, but I didn't create this mess. */
58802 #define __print_symbol(fmt, addr)
58803 #endif /*CONFIG_KALLSYMS*/
58804 +#else /* when included by kallsyms.c, vsnprintf.c, or
58805 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
58806 +extern void __print_symbol(const char *fmt, unsigned long address);
58807 +extern int sprint_backtrace(char *buffer, unsigned long address);
58808 +extern int sprint_symbol(char *buffer, unsigned long address);
58809 +const char *kallsyms_lookup(unsigned long addr,
58810 + unsigned long *symbolsize,
58811 + unsigned long *offset,
58812 + char **modname, char *namebuf);
58813 +#endif
58814
58815 /* This macro allows us to keep printk typechecking */
58816 static void __check_printsym_format(const char *fmt, ...)
58817 diff -urNp linux-3.0.7/include/linux/kgdb.h linux-3.0.7/include/linux/kgdb.h
58818 --- linux-3.0.7/include/linux/kgdb.h 2011-07-21 22:17:23.000000000 -0400
58819 +++ linux-3.0.7/include/linux/kgdb.h 2011-08-26 19:49:56.000000000 -0400
58820 @@ -53,7 +53,7 @@ extern int kgdb_connected;
58821 extern int kgdb_io_module_registered;
58822
58823 extern atomic_t kgdb_setting_breakpoint;
58824 -extern atomic_t kgdb_cpu_doing_single_step;
58825 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
58826
58827 extern struct task_struct *kgdb_usethread;
58828 extern struct task_struct *kgdb_contthread;
58829 @@ -251,7 +251,7 @@ struct kgdb_arch {
58830 void (*disable_hw_break)(struct pt_regs *regs);
58831 void (*remove_all_hw_break)(void);
58832 void (*correct_hw_break)(void);
58833 -};
58834 +} __do_const;
58835
58836 /**
58837 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
58838 @@ -276,7 +276,7 @@ struct kgdb_io {
58839 void (*pre_exception) (void);
58840 void (*post_exception) (void);
58841 int is_console;
58842 -};
58843 +} __do_const;
58844
58845 extern struct kgdb_arch arch_kgdb_ops;
58846
58847 diff -urNp linux-3.0.7/include/linux/kmod.h linux-3.0.7/include/linux/kmod.h
58848 --- linux-3.0.7/include/linux/kmod.h 2011-07-21 22:17:23.000000000 -0400
58849 +++ linux-3.0.7/include/linux/kmod.h 2011-08-23 21:48:14.000000000 -0400
58850 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysc
58851 * usually useless though. */
58852 extern int __request_module(bool wait, const char *name, ...) \
58853 __attribute__((format(printf, 2, 3)));
58854 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
58855 + __attribute__((format(printf, 3, 4)));
58856 #define request_module(mod...) __request_module(true, mod)
58857 #define request_module_nowait(mod...) __request_module(false, mod)
58858 #define try_then_request_module(x, mod...) \
58859 diff -urNp linux-3.0.7/include/linux/kvm_host.h linux-3.0.7/include/linux/kvm_host.h
58860 --- linux-3.0.7/include/linux/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
58861 +++ linux-3.0.7/include/linux/kvm_host.h 2011-08-23 21:47:56.000000000 -0400
58862 @@ -307,7 +307,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
58863 void vcpu_load(struct kvm_vcpu *vcpu);
58864 void vcpu_put(struct kvm_vcpu *vcpu);
58865
58866 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
58867 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
58868 struct module *module);
58869 void kvm_exit(void);
58870
58871 @@ -446,7 +446,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
58872 struct kvm_guest_debug *dbg);
58873 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
58874
58875 -int kvm_arch_init(void *opaque);
58876 +int kvm_arch_init(const void *opaque);
58877 void kvm_arch_exit(void);
58878
58879 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
58880 diff -urNp linux-3.0.7/include/linux/libata.h linux-3.0.7/include/linux/libata.h
58881 --- linux-3.0.7/include/linux/libata.h 2011-07-21 22:17:23.000000000 -0400
58882 +++ linux-3.0.7/include/linux/libata.h 2011-08-26 19:49:56.000000000 -0400
58883 @@ -899,7 +899,7 @@ struct ata_port_operations {
58884 * fields must be pointers.
58885 */
58886 const struct ata_port_operations *inherits;
58887 -};
58888 +} __do_const;
58889
58890 struct ata_port_info {
58891 unsigned long flags;
58892 diff -urNp linux-3.0.7/include/linux/linkage.h linux-3.0.7/include/linux/linkage.h
58893 --- linux-3.0.7/include/linux/linkage.h 2011-07-21 22:17:23.000000000 -0400
58894 +++ linux-3.0.7/include/linux/linkage.h 2011-10-11 10:44:33.000000000 -0400
58895 @@ -82,6 +82,7 @@
58896 */
58897 #ifndef ENDPROC
58898 #define ENDPROC(name) \
58899 + .size name, .-name; \
58900 .type name, @function; \
58901 END(name)
58902 #endif
58903 diff -urNp linux-3.0.7/include/linux/mca.h linux-3.0.7/include/linux/mca.h
58904 --- linux-3.0.7/include/linux/mca.h 2011-07-21 22:17:23.000000000 -0400
58905 +++ linux-3.0.7/include/linux/mca.h 2011-08-23 21:47:56.000000000 -0400
58906 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
58907 int region);
58908 void * (*mca_transform_memory)(struct mca_device *,
58909 void *memory);
58910 -};
58911 +} __no_const;
58912
58913 struct mca_bus {
58914 u64 default_dma_mask;
58915 diff -urNp linux-3.0.7/include/linux/memory.h linux-3.0.7/include/linux/memory.h
58916 --- linux-3.0.7/include/linux/memory.h 2011-07-21 22:17:23.000000000 -0400
58917 +++ linux-3.0.7/include/linux/memory.h 2011-08-23 21:47:56.000000000 -0400
58918 @@ -144,7 +144,7 @@ struct memory_accessor {
58919 size_t count);
58920 ssize_t (*write)(struct memory_accessor *, const char *buf,
58921 off_t offset, size_t count);
58922 -};
58923 +} __no_const;
58924
58925 /*
58926 * Kernel text modification mutex, used for code patching. Users of this lock
58927 diff -urNp linux-3.0.7/include/linux/mfd/abx500.h linux-3.0.7/include/linux/mfd/abx500.h
58928 --- linux-3.0.7/include/linux/mfd/abx500.h 2011-07-21 22:17:23.000000000 -0400
58929 +++ linux-3.0.7/include/linux/mfd/abx500.h 2011-08-23 21:47:56.000000000 -0400
58930 @@ -234,6 +234,7 @@ struct abx500_ops {
58931 int (*event_registers_startup_state_get) (struct device *, u8 *);
58932 int (*startup_irq_enabled) (struct device *, unsigned int);
58933 };
58934 +typedef struct abx500_ops __no_const abx500_ops_no_const;
58935
58936 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
58937 void abx500_remove_ops(struct device *dev);
58938 diff -urNp linux-3.0.7/include/linux/mm.h linux-3.0.7/include/linux/mm.h
58939 --- linux-3.0.7/include/linux/mm.h 2011-09-02 18:11:21.000000000 -0400
58940 +++ linux-3.0.7/include/linux/mm.h 2011-08-23 21:47:56.000000000 -0400
58941 @@ -113,7 +113,14 @@ extern unsigned int kobjsize(const void
58942
58943 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
58944 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
58945 +
58946 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
58947 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
58948 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
58949 +#else
58950 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
58951 +#endif
58952 +
58953 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
58954 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
58955
58956 @@ -1009,34 +1016,6 @@ int set_page_dirty(struct page *page);
58957 int set_page_dirty_lock(struct page *page);
58958 int clear_page_dirty_for_io(struct page *page);
58959
58960 -/* Is the vma a continuation of the stack vma above it? */
58961 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
58962 -{
58963 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
58964 -}
58965 -
58966 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
58967 - unsigned long addr)
58968 -{
58969 - return (vma->vm_flags & VM_GROWSDOWN) &&
58970 - (vma->vm_start == addr) &&
58971 - !vma_growsdown(vma->vm_prev, addr);
58972 -}
58973 -
58974 -/* Is the vma a continuation of the stack vma below it? */
58975 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
58976 -{
58977 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
58978 -}
58979 -
58980 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
58981 - unsigned long addr)
58982 -{
58983 - return (vma->vm_flags & VM_GROWSUP) &&
58984 - (vma->vm_end == addr) &&
58985 - !vma_growsup(vma->vm_next, addr);
58986 -}
58987 -
58988 extern unsigned long move_page_tables(struct vm_area_struct *vma,
58989 unsigned long old_addr, struct vm_area_struct *new_vma,
58990 unsigned long new_addr, unsigned long len);
58991 @@ -1169,6 +1148,15 @@ struct shrinker {
58992 extern void register_shrinker(struct shrinker *);
58993 extern void unregister_shrinker(struct shrinker *);
58994
58995 +#ifdef CONFIG_MMU
58996 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
58997 +#else
58998 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
58999 +{
59000 + return __pgprot(0);
59001 +}
59002 +#endif
59003 +
59004 int vma_wants_writenotify(struct vm_area_struct *vma);
59005
59006 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
59007 @@ -1452,6 +1440,7 @@ out:
59008 }
59009
59010 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
59011 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
59012
59013 extern unsigned long do_brk(unsigned long, unsigned long);
59014
59015 @@ -1510,6 +1499,10 @@ extern struct vm_area_struct * find_vma(
59016 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
59017 struct vm_area_struct **pprev);
59018
59019 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
59020 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
59021 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
59022 +
59023 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
59024 NULL if none. Assume start_addr < end_addr. */
59025 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
59026 @@ -1526,15 +1519,6 @@ static inline unsigned long vma_pages(st
59027 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
59028 }
59029
59030 -#ifdef CONFIG_MMU
59031 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
59032 -#else
59033 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
59034 -{
59035 - return __pgprot(0);
59036 -}
59037 -#endif
59038 -
59039 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
59040 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
59041 unsigned long pfn, unsigned long size, pgprot_t);
59042 @@ -1647,7 +1631,7 @@ extern int unpoison_memory(unsigned long
59043 extern int sysctl_memory_failure_early_kill;
59044 extern int sysctl_memory_failure_recovery;
59045 extern void shake_page(struct page *p, int access);
59046 -extern atomic_long_t mce_bad_pages;
59047 +extern atomic_long_unchecked_t mce_bad_pages;
59048 extern int soft_offline_page(struct page *page, int flags);
59049
59050 extern void dump_page(struct page *page);
59051 @@ -1661,5 +1645,11 @@ extern void copy_user_huge_page(struct p
59052 unsigned int pages_per_huge_page);
59053 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
59054
59055 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59056 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
59057 +#else
59058 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
59059 +#endif
59060 +
59061 #endif /* __KERNEL__ */
59062 #endif /* _LINUX_MM_H */
59063 diff -urNp linux-3.0.7/include/linux/mm_types.h linux-3.0.7/include/linux/mm_types.h
59064 --- linux-3.0.7/include/linux/mm_types.h 2011-07-21 22:17:23.000000000 -0400
59065 +++ linux-3.0.7/include/linux/mm_types.h 2011-08-23 21:47:56.000000000 -0400
59066 @@ -184,6 +184,8 @@ struct vm_area_struct {
59067 #ifdef CONFIG_NUMA
59068 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
59069 #endif
59070 +
59071 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
59072 };
59073
59074 struct core_thread {
59075 @@ -316,6 +318,24 @@ struct mm_struct {
59076 #ifdef CONFIG_CPUMASK_OFFSTACK
59077 struct cpumask cpumask_allocation;
59078 #endif
59079 +
59080 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59081 + unsigned long pax_flags;
59082 +#endif
59083 +
59084 +#ifdef CONFIG_PAX_DLRESOLVE
59085 + unsigned long call_dl_resolve;
59086 +#endif
59087 +
59088 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59089 + unsigned long call_syscall;
59090 +#endif
59091 +
59092 +#ifdef CONFIG_PAX_ASLR
59093 + unsigned long delta_mmap; /* randomized offset */
59094 + unsigned long delta_stack; /* randomized offset */
59095 +#endif
59096 +
59097 };
59098
59099 static inline void mm_init_cpumask(struct mm_struct *mm)
59100 diff -urNp linux-3.0.7/include/linux/mmu_notifier.h linux-3.0.7/include/linux/mmu_notifier.h
59101 --- linux-3.0.7/include/linux/mmu_notifier.h 2011-07-21 22:17:23.000000000 -0400
59102 +++ linux-3.0.7/include/linux/mmu_notifier.h 2011-08-23 21:47:56.000000000 -0400
59103 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
59104 */
59105 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
59106 ({ \
59107 - pte_t __pte; \
59108 + pte_t ___pte; \
59109 struct vm_area_struct *___vma = __vma; \
59110 unsigned long ___address = __address; \
59111 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
59112 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
59113 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
59114 - __pte; \
59115 + ___pte; \
59116 })
59117
59118 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
59119 diff -urNp linux-3.0.7/include/linux/mmzone.h linux-3.0.7/include/linux/mmzone.h
59120 --- linux-3.0.7/include/linux/mmzone.h 2011-07-21 22:17:23.000000000 -0400
59121 +++ linux-3.0.7/include/linux/mmzone.h 2011-08-23 21:47:56.000000000 -0400
59122 @@ -350,7 +350,7 @@ struct zone {
59123 unsigned long flags; /* zone flags, see below */
59124
59125 /* Zone statistics */
59126 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59127 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59128
59129 /*
59130 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
59131 diff -urNp linux-3.0.7/include/linux/mod_devicetable.h linux-3.0.7/include/linux/mod_devicetable.h
59132 --- linux-3.0.7/include/linux/mod_devicetable.h 2011-07-21 22:17:23.000000000 -0400
59133 +++ linux-3.0.7/include/linux/mod_devicetable.h 2011-08-23 21:47:56.000000000 -0400
59134 @@ -12,7 +12,7 @@
59135 typedef unsigned long kernel_ulong_t;
59136 #endif
59137
59138 -#define PCI_ANY_ID (~0)
59139 +#define PCI_ANY_ID ((__u16)~0)
59140
59141 struct pci_device_id {
59142 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
59143 @@ -131,7 +131,7 @@ struct usb_device_id {
59144 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
59145 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
59146
59147 -#define HID_ANY_ID (~0)
59148 +#define HID_ANY_ID (~0U)
59149
59150 struct hid_device_id {
59151 __u16 bus;
59152 diff -urNp linux-3.0.7/include/linux/module.h linux-3.0.7/include/linux/module.h
59153 --- linux-3.0.7/include/linux/module.h 2011-07-21 22:17:23.000000000 -0400
59154 +++ linux-3.0.7/include/linux/module.h 2011-08-23 21:47:56.000000000 -0400
59155 @@ -16,6 +16,7 @@
59156 #include <linux/kobject.h>
59157 #include <linux/moduleparam.h>
59158 #include <linux/tracepoint.h>
59159 +#include <linux/fs.h>
59160
59161 #include <linux/percpu.h>
59162 #include <asm/module.h>
59163 @@ -325,19 +326,16 @@ struct module
59164 int (*init)(void);
59165
59166 /* If this is non-NULL, vfree after init() returns */
59167 - void *module_init;
59168 + void *module_init_rx, *module_init_rw;
59169
59170 /* Here is the actual code + data, vfree'd on unload. */
59171 - void *module_core;
59172 + void *module_core_rx, *module_core_rw;
59173
59174 /* Here are the sizes of the init and core sections */
59175 - unsigned int init_size, core_size;
59176 + unsigned int init_size_rw, core_size_rw;
59177
59178 /* The size of the executable code in each section. */
59179 - unsigned int init_text_size, core_text_size;
59180 -
59181 - /* Size of RO sections of the module (text+rodata) */
59182 - unsigned int init_ro_size, core_ro_size;
59183 + unsigned int init_size_rx, core_size_rx;
59184
59185 /* Arch-specific module values */
59186 struct mod_arch_specific arch;
59187 @@ -393,6 +391,10 @@ struct module
59188 #ifdef CONFIG_EVENT_TRACING
59189 struct ftrace_event_call **trace_events;
59190 unsigned int num_trace_events;
59191 + struct file_operations trace_id;
59192 + struct file_operations trace_enable;
59193 + struct file_operations trace_format;
59194 + struct file_operations trace_filter;
59195 #endif
59196 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
59197 unsigned int num_ftrace_callsites;
59198 @@ -443,16 +445,46 @@ bool is_module_address(unsigned long add
59199 bool is_module_percpu_address(unsigned long addr);
59200 bool is_module_text_address(unsigned long addr);
59201
59202 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
59203 +{
59204 +
59205 +#ifdef CONFIG_PAX_KERNEXEC
59206 + if (ktla_ktva(addr) >= (unsigned long)start &&
59207 + ktla_ktva(addr) < (unsigned long)start + size)
59208 + return 1;
59209 +#endif
59210 +
59211 + return ((void *)addr >= start && (void *)addr < start + size);
59212 +}
59213 +
59214 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
59215 +{
59216 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
59217 +}
59218 +
59219 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
59220 +{
59221 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
59222 +}
59223 +
59224 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
59225 +{
59226 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
59227 +}
59228 +
59229 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
59230 +{
59231 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
59232 +}
59233 +
59234 static inline int within_module_core(unsigned long addr, struct module *mod)
59235 {
59236 - return (unsigned long)mod->module_core <= addr &&
59237 - addr < (unsigned long)mod->module_core + mod->core_size;
59238 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
59239 }
59240
59241 static inline int within_module_init(unsigned long addr, struct module *mod)
59242 {
59243 - return (unsigned long)mod->module_init <= addr &&
59244 - addr < (unsigned long)mod->module_init + mod->init_size;
59245 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
59246 }
59247
59248 /* Search for module by name: must hold module_mutex. */
59249 diff -urNp linux-3.0.7/include/linux/moduleloader.h linux-3.0.7/include/linux/moduleloader.h
59250 --- linux-3.0.7/include/linux/moduleloader.h 2011-07-21 22:17:23.000000000 -0400
59251 +++ linux-3.0.7/include/linux/moduleloader.h 2011-08-23 21:47:56.000000000 -0400
59252 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
59253 sections. Returns NULL on failure. */
59254 void *module_alloc(unsigned long size);
59255
59256 +#ifdef CONFIG_PAX_KERNEXEC
59257 +void *module_alloc_exec(unsigned long size);
59258 +#else
59259 +#define module_alloc_exec(x) module_alloc(x)
59260 +#endif
59261 +
59262 /* Free memory returned from module_alloc. */
59263 void module_free(struct module *mod, void *module_region);
59264
59265 +#ifdef CONFIG_PAX_KERNEXEC
59266 +void module_free_exec(struct module *mod, void *module_region);
59267 +#else
59268 +#define module_free_exec(x, y) module_free((x), (y))
59269 +#endif
59270 +
59271 /* Apply the given relocation to the (simplified) ELF. Return -error
59272 or 0. */
59273 int apply_relocate(Elf_Shdr *sechdrs,
59274 diff -urNp linux-3.0.7/include/linux/moduleparam.h linux-3.0.7/include/linux/moduleparam.h
59275 --- linux-3.0.7/include/linux/moduleparam.h 2011-07-21 22:17:23.000000000 -0400
59276 +++ linux-3.0.7/include/linux/moduleparam.h 2011-08-23 21:47:56.000000000 -0400
59277 @@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
59278 * @len is usually just sizeof(string).
59279 */
59280 #define module_param_string(name, string, len, perm) \
59281 - static const struct kparam_string __param_string_##name \
59282 + static const struct kparam_string __param_string_##name __used \
59283 = { len, string }; \
59284 __module_param_call(MODULE_PARAM_PREFIX, name, \
59285 &param_ops_string, \
59286 @@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
59287 * module_param_named() for why this might be necessary.
59288 */
59289 #define module_param_array_named(name, array, type, nump, perm) \
59290 - static const struct kparam_array __param_arr_##name \
59291 + static const struct kparam_array __param_arr_##name __used \
59292 = { .max = ARRAY_SIZE(array), .num = nump, \
59293 .ops = &param_ops_##type, \
59294 .elemsize = sizeof(array[0]), .elem = array }; \
59295 diff -urNp linux-3.0.7/include/linux/namei.h linux-3.0.7/include/linux/namei.h
59296 --- linux-3.0.7/include/linux/namei.h 2011-07-21 22:17:23.000000000 -0400
59297 +++ linux-3.0.7/include/linux/namei.h 2011-08-23 21:47:56.000000000 -0400
59298 @@ -24,7 +24,7 @@ struct nameidata {
59299 unsigned seq;
59300 int last_type;
59301 unsigned depth;
59302 - char *saved_names[MAX_NESTED_LINKS + 1];
59303 + const char *saved_names[MAX_NESTED_LINKS + 1];
59304
59305 /* Intent data */
59306 union {
59307 @@ -91,12 +91,12 @@ extern int follow_up(struct path *);
59308 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
59309 extern void unlock_rename(struct dentry *, struct dentry *);
59310
59311 -static inline void nd_set_link(struct nameidata *nd, char *path)
59312 +static inline void nd_set_link(struct nameidata *nd, const char *path)
59313 {
59314 nd->saved_names[nd->depth] = path;
59315 }
59316
59317 -static inline char *nd_get_link(struct nameidata *nd)
59318 +static inline const char *nd_get_link(const struct nameidata *nd)
59319 {
59320 return nd->saved_names[nd->depth];
59321 }
59322 diff -urNp linux-3.0.7/include/linux/netdevice.h linux-3.0.7/include/linux/netdevice.h
59323 --- linux-3.0.7/include/linux/netdevice.h 2011-09-02 18:11:21.000000000 -0400
59324 +++ linux-3.0.7/include/linux/netdevice.h 2011-08-23 21:47:56.000000000 -0400
59325 @@ -979,6 +979,7 @@ struct net_device_ops {
59326 int (*ndo_set_features)(struct net_device *dev,
59327 u32 features);
59328 };
59329 +typedef struct net_device_ops __no_const net_device_ops_no_const;
59330
59331 /*
59332 * The DEVICE structure.
59333 diff -urNp linux-3.0.7/include/linux/netfilter/xt_gradm.h linux-3.0.7/include/linux/netfilter/xt_gradm.h
59334 --- linux-3.0.7/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
59335 +++ linux-3.0.7/include/linux/netfilter/xt_gradm.h 2011-08-23 21:48:14.000000000 -0400
59336 @@ -0,0 +1,9 @@
59337 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
59338 +#define _LINUX_NETFILTER_XT_GRADM_H 1
59339 +
59340 +struct xt_gradm_mtinfo {
59341 + __u16 flags;
59342 + __u16 invflags;
59343 +};
59344 +
59345 +#endif
59346 diff -urNp linux-3.0.7/include/linux/of_pdt.h linux-3.0.7/include/linux/of_pdt.h
59347 --- linux-3.0.7/include/linux/of_pdt.h 2011-07-21 22:17:23.000000000 -0400
59348 +++ linux-3.0.7/include/linux/of_pdt.h 2011-08-30 06:20:11.000000000 -0400
59349 @@ -32,7 +32,7 @@ struct of_pdt_ops {
59350
59351 /* return 0 on success; fill in 'len' with number of bytes in path */
59352 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
59353 -};
59354 +} __no_const;
59355
59356 extern void *prom_early_alloc(unsigned long size);
59357
59358 diff -urNp linux-3.0.7/include/linux/oprofile.h linux-3.0.7/include/linux/oprofile.h
59359 --- linux-3.0.7/include/linux/oprofile.h 2011-07-21 22:17:23.000000000 -0400
59360 +++ linux-3.0.7/include/linux/oprofile.h 2011-08-23 21:47:56.000000000 -0400
59361 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
59362 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
59363 char const * name, ulong * val);
59364
59365 -/** Create a file for read-only access to an atomic_t. */
59366 +/** Create a file for read-only access to an atomic_unchecked_t. */
59367 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
59368 - char const * name, atomic_t * val);
59369 + char const * name, atomic_unchecked_t * val);
59370
59371 /** create a directory */
59372 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
59373 diff -urNp linux-3.0.7/include/linux/padata.h linux-3.0.7/include/linux/padata.h
59374 --- linux-3.0.7/include/linux/padata.h 2011-07-21 22:17:23.000000000 -0400
59375 +++ linux-3.0.7/include/linux/padata.h 2011-08-23 21:47:56.000000000 -0400
59376 @@ -129,7 +129,7 @@ struct parallel_data {
59377 struct padata_instance *pinst;
59378 struct padata_parallel_queue __percpu *pqueue;
59379 struct padata_serial_queue __percpu *squeue;
59380 - atomic_t seq_nr;
59381 + atomic_unchecked_t seq_nr;
59382 atomic_t reorder_objects;
59383 atomic_t refcnt;
59384 unsigned int max_seq_nr;
59385 diff -urNp linux-3.0.7/include/linux/perf_event.h linux-3.0.7/include/linux/perf_event.h
59386 --- linux-3.0.7/include/linux/perf_event.h 2011-07-21 22:17:23.000000000 -0400
59387 +++ linux-3.0.7/include/linux/perf_event.h 2011-08-23 21:47:56.000000000 -0400
59388 @@ -761,8 +761,8 @@ struct perf_event {
59389
59390 enum perf_event_active_state state;
59391 unsigned int attach_state;
59392 - local64_t count;
59393 - atomic64_t child_count;
59394 + local64_t count; /* PaX: fix it one day */
59395 + atomic64_unchecked_t child_count;
59396
59397 /*
59398 * These are the total time in nanoseconds that the event
59399 @@ -813,8 +813,8 @@ struct perf_event {
59400 * These accumulate total time (in nanoseconds) that children
59401 * events have been enabled and running, respectively.
59402 */
59403 - atomic64_t child_total_time_enabled;
59404 - atomic64_t child_total_time_running;
59405 + atomic64_unchecked_t child_total_time_enabled;
59406 + atomic64_unchecked_t child_total_time_running;
59407
59408 /*
59409 * Protect attach/detach and child_list:
59410 diff -urNp linux-3.0.7/include/linux/pipe_fs_i.h linux-3.0.7/include/linux/pipe_fs_i.h
59411 --- linux-3.0.7/include/linux/pipe_fs_i.h 2011-07-21 22:17:23.000000000 -0400
59412 +++ linux-3.0.7/include/linux/pipe_fs_i.h 2011-08-23 21:47:56.000000000 -0400
59413 @@ -46,9 +46,9 @@ struct pipe_buffer {
59414 struct pipe_inode_info {
59415 wait_queue_head_t wait;
59416 unsigned int nrbufs, curbuf, buffers;
59417 - unsigned int readers;
59418 - unsigned int writers;
59419 - unsigned int waiting_writers;
59420 + atomic_t readers;
59421 + atomic_t writers;
59422 + atomic_t waiting_writers;
59423 unsigned int r_counter;
59424 unsigned int w_counter;
59425 struct page *tmp_page;
59426 diff -urNp linux-3.0.7/include/linux/pm_runtime.h linux-3.0.7/include/linux/pm_runtime.h
59427 --- linux-3.0.7/include/linux/pm_runtime.h 2011-07-21 22:17:23.000000000 -0400
59428 +++ linux-3.0.7/include/linux/pm_runtime.h 2011-08-23 21:47:56.000000000 -0400
59429 @@ -94,7 +94,7 @@ static inline bool pm_runtime_callbacks_
59430
59431 static inline void pm_runtime_mark_last_busy(struct device *dev)
59432 {
59433 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
59434 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
59435 }
59436
59437 #else /* !CONFIG_PM_RUNTIME */
59438 diff -urNp linux-3.0.7/include/linux/poison.h linux-3.0.7/include/linux/poison.h
59439 --- linux-3.0.7/include/linux/poison.h 2011-07-21 22:17:23.000000000 -0400
59440 +++ linux-3.0.7/include/linux/poison.h 2011-08-23 21:47:56.000000000 -0400
59441 @@ -19,8 +19,8 @@
59442 * under normal circumstances, used to verify that nobody uses
59443 * non-initialized list entries.
59444 */
59445 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
59446 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
59447 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
59448 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
59449
59450 /********** include/linux/timer.h **********/
59451 /*
59452 diff -urNp linux-3.0.7/include/linux/preempt.h linux-3.0.7/include/linux/preempt.h
59453 --- linux-3.0.7/include/linux/preempt.h 2011-07-21 22:17:23.000000000 -0400
59454 +++ linux-3.0.7/include/linux/preempt.h 2011-08-23 21:47:56.000000000 -0400
59455 @@ -115,7 +115,7 @@ struct preempt_ops {
59456 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
59457 void (*sched_out)(struct preempt_notifier *notifier,
59458 struct task_struct *next);
59459 -};
59460 +} __no_const;
59461
59462 /**
59463 * preempt_notifier - key for installing preemption notifiers
59464 diff -urNp linux-3.0.7/include/linux/proc_fs.h linux-3.0.7/include/linux/proc_fs.h
59465 --- linux-3.0.7/include/linux/proc_fs.h 2011-07-21 22:17:23.000000000 -0400
59466 +++ linux-3.0.7/include/linux/proc_fs.h 2011-08-23 21:48:14.000000000 -0400
59467 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
59468 return proc_create_data(name, mode, parent, proc_fops, NULL);
59469 }
59470
59471 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
59472 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
59473 +{
59474 +#ifdef CONFIG_GRKERNSEC_PROC_USER
59475 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
59476 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59477 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
59478 +#else
59479 + return proc_create_data(name, mode, parent, proc_fops, NULL);
59480 +#endif
59481 +}
59482 +
59483 +
59484 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
59485 mode_t mode, struct proc_dir_entry *base,
59486 read_proc_t *read_proc, void * data)
59487 @@ -258,7 +271,7 @@ union proc_op {
59488 int (*proc_show)(struct seq_file *m,
59489 struct pid_namespace *ns, struct pid *pid,
59490 struct task_struct *task);
59491 -};
59492 +} __no_const;
59493
59494 struct ctl_table_header;
59495 struct ctl_table;
59496 diff -urNp linux-3.0.7/include/linux/ptrace.h linux-3.0.7/include/linux/ptrace.h
59497 --- linux-3.0.7/include/linux/ptrace.h 2011-07-21 22:17:23.000000000 -0400
59498 +++ linux-3.0.7/include/linux/ptrace.h 2011-08-23 21:48:14.000000000 -0400
59499 @@ -115,10 +115,10 @@ extern void __ptrace_unlink(struct task_
59500 extern void exit_ptrace(struct task_struct *tracer);
59501 #define PTRACE_MODE_READ 1
59502 #define PTRACE_MODE_ATTACH 2
59503 -/* Returns 0 on success, -errno on denial. */
59504 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
59505 /* Returns true on success, false on denial. */
59506 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
59507 +/* Returns true on success, false on denial. */
59508 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
59509
59510 static inline int ptrace_reparented(struct task_struct *child)
59511 {
59512 diff -urNp linux-3.0.7/include/linux/random.h linux-3.0.7/include/linux/random.h
59513 --- linux-3.0.7/include/linux/random.h 2011-09-02 18:11:21.000000000 -0400
59514 +++ linux-3.0.7/include/linux/random.h 2011-08-23 21:47:56.000000000 -0400
59515 @@ -69,12 +69,17 @@ void srandom32(u32 seed);
59516
59517 u32 prandom32(struct rnd_state *);
59518
59519 +static inline unsigned long pax_get_random_long(void)
59520 +{
59521 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
59522 +}
59523 +
59524 /*
59525 * Handle minimum values for seeds
59526 */
59527 static inline u32 __seed(u32 x, u32 m)
59528 {
59529 - return (x < m) ? x + m : x;
59530 + return (x <= m) ? x + m + 1 : x;
59531 }
59532
59533 /**
59534 diff -urNp linux-3.0.7/include/linux/reboot.h linux-3.0.7/include/linux/reboot.h
59535 --- linux-3.0.7/include/linux/reboot.h 2011-07-21 22:17:23.000000000 -0400
59536 +++ linux-3.0.7/include/linux/reboot.h 2011-08-23 21:47:56.000000000 -0400
59537 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
59538 * Architecture-specific implementations of sys_reboot commands.
59539 */
59540
59541 -extern void machine_restart(char *cmd);
59542 -extern void machine_halt(void);
59543 -extern void machine_power_off(void);
59544 +extern void machine_restart(char *cmd) __noreturn;
59545 +extern void machine_halt(void) __noreturn;
59546 +extern void machine_power_off(void) __noreturn;
59547
59548 extern void machine_shutdown(void);
59549 struct pt_regs;
59550 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
59551 */
59552
59553 extern void kernel_restart_prepare(char *cmd);
59554 -extern void kernel_restart(char *cmd);
59555 -extern void kernel_halt(void);
59556 -extern void kernel_power_off(void);
59557 +extern void kernel_restart(char *cmd) __noreturn;
59558 +extern void kernel_halt(void) __noreturn;
59559 +extern void kernel_power_off(void) __noreturn;
59560
59561 extern int C_A_D; /* for sysctl */
59562 void ctrl_alt_del(void);
59563 @@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
59564 * Emergency restart, callable from an interrupt handler.
59565 */
59566
59567 -extern void emergency_restart(void);
59568 +extern void emergency_restart(void) __noreturn;
59569 #include <asm/emergency-restart.h>
59570
59571 #endif
59572 diff -urNp linux-3.0.7/include/linux/reiserfs_fs.h linux-3.0.7/include/linux/reiserfs_fs.h
59573 --- linux-3.0.7/include/linux/reiserfs_fs.h 2011-07-21 22:17:23.000000000 -0400
59574 +++ linux-3.0.7/include/linux/reiserfs_fs.h 2011-08-23 21:47:56.000000000 -0400
59575 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
59576 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
59577
59578 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
59579 -#define get_generation(s) atomic_read (&fs_generation(s))
59580 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
59581 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
59582 #define __fs_changed(gen,s) (gen != get_generation (s))
59583 #define fs_changed(gen,s) \
59584 diff -urNp linux-3.0.7/include/linux/reiserfs_fs_sb.h linux-3.0.7/include/linux/reiserfs_fs_sb.h
59585 --- linux-3.0.7/include/linux/reiserfs_fs_sb.h 2011-07-21 22:17:23.000000000 -0400
59586 +++ linux-3.0.7/include/linux/reiserfs_fs_sb.h 2011-08-23 21:47:56.000000000 -0400
59587 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
59588 /* Comment? -Hans */
59589 wait_queue_head_t s_wait;
59590 /* To be obsoleted soon by per buffer seals.. -Hans */
59591 - atomic_t s_generation_counter; // increased by one every time the
59592 + atomic_unchecked_t s_generation_counter; // increased by one every time the
59593 // tree gets re-balanced
59594 unsigned long s_properties; /* File system properties. Currently holds
59595 on-disk FS format */
59596 diff -urNp linux-3.0.7/include/linux/relay.h linux-3.0.7/include/linux/relay.h
59597 --- linux-3.0.7/include/linux/relay.h 2011-07-21 22:17:23.000000000 -0400
59598 +++ linux-3.0.7/include/linux/relay.h 2011-08-23 21:47:56.000000000 -0400
59599 @@ -159,7 +159,7 @@ struct rchan_callbacks
59600 * The callback should return 0 if successful, negative if not.
59601 */
59602 int (*remove_buf_file)(struct dentry *dentry);
59603 -};
59604 +} __no_const;
59605
59606 /*
59607 * CONFIG_RELAY kernel API, kernel/relay.c
59608 diff -urNp linux-3.0.7/include/linux/rfkill.h linux-3.0.7/include/linux/rfkill.h
59609 --- linux-3.0.7/include/linux/rfkill.h 2011-07-21 22:17:23.000000000 -0400
59610 +++ linux-3.0.7/include/linux/rfkill.h 2011-08-23 21:47:56.000000000 -0400
59611 @@ -147,6 +147,7 @@ struct rfkill_ops {
59612 void (*query)(struct rfkill *rfkill, void *data);
59613 int (*set_block)(void *data, bool blocked);
59614 };
59615 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
59616
59617 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
59618 /**
59619 diff -urNp linux-3.0.7/include/linux/rmap.h linux-3.0.7/include/linux/rmap.h
59620 --- linux-3.0.7/include/linux/rmap.h 2011-07-21 22:17:23.000000000 -0400
59621 +++ linux-3.0.7/include/linux/rmap.h 2011-08-23 21:47:56.000000000 -0400
59622 @@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
59623 void anon_vma_init(void); /* create anon_vma_cachep */
59624 int anon_vma_prepare(struct vm_area_struct *);
59625 void unlink_anon_vmas(struct vm_area_struct *);
59626 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
59627 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
59628 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
59629 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
59630 void __anon_vma_link(struct vm_area_struct *);
59631
59632 static inline void anon_vma_merge(struct vm_area_struct *vma,
59633 diff -urNp linux-3.0.7/include/linux/sched.h linux-3.0.7/include/linux/sched.h
59634 --- linux-3.0.7/include/linux/sched.h 2011-10-17 23:17:09.000000000 -0400
59635 +++ linux-3.0.7/include/linux/sched.h 2011-10-17 23:17:19.000000000 -0400
59636 @@ -100,6 +100,7 @@ struct bio_list;
59637 struct fs_struct;
59638 struct perf_event_context;
59639 struct blk_plug;
59640 +struct linux_binprm;
59641
59642 /*
59643 * List of flags we want to share for kernel threads,
59644 @@ -380,10 +381,13 @@ struct user_namespace;
59645 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
59646
59647 extern int sysctl_max_map_count;
59648 +extern unsigned long sysctl_heap_stack_gap;
59649
59650 #include <linux/aio.h>
59651
59652 #ifdef CONFIG_MMU
59653 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
59654 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
59655 extern void arch_pick_mmap_layout(struct mm_struct *mm);
59656 extern unsigned long
59657 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
59658 @@ -629,6 +633,17 @@ struct signal_struct {
59659 #ifdef CONFIG_TASKSTATS
59660 struct taskstats *stats;
59661 #endif
59662 +
59663 +#ifdef CONFIG_GRKERNSEC
59664 + u32 curr_ip;
59665 + u32 saved_ip;
59666 + u32 gr_saddr;
59667 + u32 gr_daddr;
59668 + u16 gr_sport;
59669 + u16 gr_dport;
59670 + u8 used_accept:1;
59671 +#endif
59672 +
59673 #ifdef CONFIG_AUDIT
59674 unsigned audit_tty;
59675 struct tty_audit_buf *tty_audit_buf;
59676 @@ -710,6 +725,11 @@ struct user_struct {
59677 struct key *session_keyring; /* UID's default session keyring */
59678 #endif
59679
59680 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
59681 + unsigned int banned;
59682 + unsigned long ban_expires;
59683 +#endif
59684 +
59685 /* Hash table maintenance information */
59686 struct hlist_node uidhash_node;
59687 uid_t uid;
59688 @@ -1340,8 +1360,8 @@ struct task_struct {
59689 struct list_head thread_group;
59690
59691 struct completion *vfork_done; /* for vfork() */
59692 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
59693 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
59694 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
59695 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
59696
59697 cputime_t utime, stime, utimescaled, stimescaled;
59698 cputime_t gtime;
59699 @@ -1357,13 +1377,6 @@ struct task_struct {
59700 struct task_cputime cputime_expires;
59701 struct list_head cpu_timers[3];
59702
59703 -/* process credentials */
59704 - const struct cred __rcu *real_cred; /* objective and real subjective task
59705 - * credentials (COW) */
59706 - const struct cred __rcu *cred; /* effective (overridable) subjective task
59707 - * credentials (COW) */
59708 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
59709 -
59710 char comm[TASK_COMM_LEN]; /* executable name excluding path
59711 - access with [gs]et_task_comm (which lock
59712 it with task_lock())
59713 @@ -1380,8 +1393,16 @@ struct task_struct {
59714 #endif
59715 /* CPU-specific state of this task */
59716 struct thread_struct thread;
59717 +/* thread_info moved to task_struct */
59718 +#ifdef CONFIG_X86
59719 + struct thread_info tinfo;
59720 +#endif
59721 /* filesystem information */
59722 struct fs_struct *fs;
59723 +
59724 + const struct cred __rcu *cred; /* effective (overridable) subjective task
59725 + * credentials (COW) */
59726 +
59727 /* open file information */
59728 struct files_struct *files;
59729 /* namespaces */
59730 @@ -1428,6 +1449,11 @@ struct task_struct {
59731 struct rt_mutex_waiter *pi_blocked_on;
59732 #endif
59733
59734 +/* process credentials */
59735 + const struct cred __rcu *real_cred; /* objective and real subjective task
59736 + * credentials (COW) */
59737 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
59738 +
59739 #ifdef CONFIG_DEBUG_MUTEXES
59740 /* mutex deadlock detection */
59741 struct mutex_waiter *blocked_on;
59742 @@ -1538,6 +1564,21 @@ struct task_struct {
59743 unsigned long default_timer_slack_ns;
59744
59745 struct list_head *scm_work_list;
59746 +
59747 +#ifdef CONFIG_GRKERNSEC
59748 + /* grsecurity */
59749 + struct dentry *gr_chroot_dentry;
59750 + struct acl_subject_label *acl;
59751 + struct acl_role_label *role;
59752 + struct file *exec_file;
59753 + u16 acl_role_id;
59754 + /* is this the task that authenticated to the special role */
59755 + u8 acl_sp_role;
59756 + u8 is_writable;
59757 + u8 brute;
59758 + u8 gr_is_chrooted;
59759 +#endif
59760 +
59761 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
59762 /* Index of current stored address in ret_stack */
59763 int curr_ret_stack;
59764 @@ -1572,6 +1613,57 @@ struct task_struct {
59765 #endif
59766 };
59767
59768 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
59769 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
59770 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
59771 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
59772 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
59773 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
59774 +
59775 +#ifdef CONFIG_PAX_SOFTMODE
59776 +extern int pax_softmode;
59777 +#endif
59778 +
59779 +extern int pax_check_flags(unsigned long *);
59780 +
59781 +/* if tsk != current then task_lock must be held on it */
59782 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59783 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
59784 +{
59785 + if (likely(tsk->mm))
59786 + return tsk->mm->pax_flags;
59787 + else
59788 + return 0UL;
59789 +}
59790 +
59791 +/* if tsk != current then task_lock must be held on it */
59792 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
59793 +{
59794 + if (likely(tsk->mm)) {
59795 + tsk->mm->pax_flags = flags;
59796 + return 0;
59797 + }
59798 + return -EINVAL;
59799 +}
59800 +#endif
59801 +
59802 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
59803 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
59804 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
59805 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
59806 +#endif
59807 +
59808 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
59809 +extern void pax_report_insns(void *pc, void *sp);
59810 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
59811 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
59812 +
59813 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
59814 +extern void pax_track_stack(void);
59815 +#else
59816 +static inline void pax_track_stack(void) {}
59817 +#endif
59818 +
59819 /* Future-safe accessor for struct task_struct's cpus_allowed. */
59820 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
59821
59822 @@ -1768,6 +1860,7 @@ extern void thread_group_times(struct ta
59823 #define PF_DUMPCORE 0x00000200 /* dumped core */
59824 #define PF_SIGNALED 0x00000400 /* killed by a signal */
59825 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
59826 +#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
59827 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
59828 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
59829 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
59830 @@ -2055,7 +2148,9 @@ void yield(void);
59831 extern struct exec_domain default_exec_domain;
59832
59833 union thread_union {
59834 +#ifndef CONFIG_X86
59835 struct thread_info thread_info;
59836 +#endif
59837 unsigned long stack[THREAD_SIZE/sizeof(long)];
59838 };
59839
59840 @@ -2088,6 +2183,7 @@ extern struct pid_namespace init_pid_ns;
59841 */
59842
59843 extern struct task_struct *find_task_by_vpid(pid_t nr);
59844 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
59845 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
59846 struct pid_namespace *ns);
59847
59848 @@ -2224,7 +2320,7 @@ extern void __cleanup_sighand(struct sig
59849 extern void exit_itimers(struct signal_struct *);
59850 extern void flush_itimer_signals(void);
59851
59852 -extern NORET_TYPE void do_group_exit(int);
59853 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
59854
59855 extern void daemonize(const char *, ...);
59856 extern int allow_signal(int);
59857 @@ -2392,13 +2488,17 @@ static inline unsigned long *end_of_stac
59858
59859 #endif
59860
59861 -static inline int object_is_on_stack(void *obj)
59862 +static inline int object_starts_on_stack(void *obj)
59863 {
59864 - void *stack = task_stack_page(current);
59865 + const void *stack = task_stack_page(current);
59866
59867 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
59868 }
59869
59870 +#ifdef CONFIG_PAX_USERCOPY
59871 +extern int object_is_on_stack(const void *obj, unsigned long len);
59872 +#endif
59873 +
59874 extern void thread_info_cache_init(void);
59875
59876 #ifdef CONFIG_DEBUG_STACK_USAGE
59877 diff -urNp linux-3.0.7/include/linux/screen_info.h linux-3.0.7/include/linux/screen_info.h
59878 --- linux-3.0.7/include/linux/screen_info.h 2011-07-21 22:17:23.000000000 -0400
59879 +++ linux-3.0.7/include/linux/screen_info.h 2011-08-23 21:47:56.000000000 -0400
59880 @@ -43,7 +43,8 @@ struct screen_info {
59881 __u16 pages; /* 0x32 */
59882 __u16 vesa_attributes; /* 0x34 */
59883 __u32 capabilities; /* 0x36 */
59884 - __u8 _reserved[6]; /* 0x3a */
59885 + __u16 vesapm_size; /* 0x3a */
59886 + __u8 _reserved[4]; /* 0x3c */
59887 } __attribute__((packed));
59888
59889 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
59890 diff -urNp linux-3.0.7/include/linux/security.h linux-3.0.7/include/linux/security.h
59891 --- linux-3.0.7/include/linux/security.h 2011-07-21 22:17:23.000000000 -0400
59892 +++ linux-3.0.7/include/linux/security.h 2011-08-23 21:48:14.000000000 -0400
59893 @@ -36,6 +36,7 @@
59894 #include <linux/key.h>
59895 #include <linux/xfrm.h>
59896 #include <linux/slab.h>
59897 +#include <linux/grsecurity.h>
59898 #include <net/flow.h>
59899
59900 /* Maximum number of letters for an LSM name string */
59901 diff -urNp linux-3.0.7/include/linux/seq_file.h linux-3.0.7/include/linux/seq_file.h
59902 --- linux-3.0.7/include/linux/seq_file.h 2011-07-21 22:17:23.000000000 -0400
59903 +++ linux-3.0.7/include/linux/seq_file.h 2011-08-23 21:47:56.000000000 -0400
59904 @@ -32,6 +32,7 @@ struct seq_operations {
59905 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
59906 int (*show) (struct seq_file *m, void *v);
59907 };
59908 +typedef struct seq_operations __no_const seq_operations_no_const;
59909
59910 #define SEQ_SKIP 1
59911
59912 diff -urNp linux-3.0.7/include/linux/shmem_fs.h linux-3.0.7/include/linux/shmem_fs.h
59913 --- linux-3.0.7/include/linux/shmem_fs.h 2011-07-21 22:17:23.000000000 -0400
59914 +++ linux-3.0.7/include/linux/shmem_fs.h 2011-08-23 21:47:56.000000000 -0400
59915 @@ -10,7 +10,7 @@
59916
59917 #define SHMEM_NR_DIRECT 16
59918
59919 -#define SHMEM_SYMLINK_INLINE_LEN (SHMEM_NR_DIRECT * sizeof(swp_entry_t))
59920 +#define SHMEM_SYMLINK_INLINE_LEN 64
59921
59922 struct shmem_inode_info {
59923 spinlock_t lock;
59924 diff -urNp linux-3.0.7/include/linux/shm.h linux-3.0.7/include/linux/shm.h
59925 --- linux-3.0.7/include/linux/shm.h 2011-07-21 22:17:23.000000000 -0400
59926 +++ linux-3.0.7/include/linux/shm.h 2011-08-23 21:48:14.000000000 -0400
59927 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
59928 pid_t shm_cprid;
59929 pid_t shm_lprid;
59930 struct user_struct *mlock_user;
59931 +#ifdef CONFIG_GRKERNSEC
59932 + time_t shm_createtime;
59933 + pid_t shm_lapid;
59934 +#endif
59935 };
59936
59937 /* shm_mode upper byte flags */
59938 diff -urNp linux-3.0.7/include/linux/skbuff.h linux-3.0.7/include/linux/skbuff.h
59939 --- linux-3.0.7/include/linux/skbuff.h 2011-07-21 22:17:23.000000000 -0400
59940 +++ linux-3.0.7/include/linux/skbuff.h 2011-08-23 21:47:56.000000000 -0400
59941 @@ -592,7 +592,7 @@ static inline struct skb_shared_hwtstamp
59942 */
59943 static inline int skb_queue_empty(const struct sk_buff_head *list)
59944 {
59945 - return list->next == (struct sk_buff *)list;
59946 + return list->next == (const struct sk_buff *)list;
59947 }
59948
59949 /**
59950 @@ -605,7 +605,7 @@ static inline int skb_queue_empty(const
59951 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
59952 const struct sk_buff *skb)
59953 {
59954 - return skb->next == (struct sk_buff *)list;
59955 + return skb->next == (const struct sk_buff *)list;
59956 }
59957
59958 /**
59959 @@ -618,7 +618,7 @@ static inline bool skb_queue_is_last(con
59960 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
59961 const struct sk_buff *skb)
59962 {
59963 - return skb->prev == (struct sk_buff *)list;
59964 + return skb->prev == (const struct sk_buff *)list;
59965 }
59966
59967 /**
59968 @@ -1440,7 +1440,7 @@ static inline int pskb_network_may_pull(
59969 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
59970 */
59971 #ifndef NET_SKB_PAD
59972 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
59973 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
59974 #endif
59975
59976 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
59977 diff -urNp linux-3.0.7/include/linux/slab_def.h linux-3.0.7/include/linux/slab_def.h
59978 --- linux-3.0.7/include/linux/slab_def.h 2011-07-21 22:17:23.000000000 -0400
59979 +++ linux-3.0.7/include/linux/slab_def.h 2011-08-23 21:47:56.000000000 -0400
59980 @@ -96,10 +96,10 @@ struct kmem_cache {
59981 unsigned long node_allocs;
59982 unsigned long node_frees;
59983 unsigned long node_overflow;
59984 - atomic_t allochit;
59985 - atomic_t allocmiss;
59986 - atomic_t freehit;
59987 - atomic_t freemiss;
59988 + atomic_unchecked_t allochit;
59989 + atomic_unchecked_t allocmiss;
59990 + atomic_unchecked_t freehit;
59991 + atomic_unchecked_t freemiss;
59992
59993 /*
59994 * If debugging is enabled, then the allocator can add additional
59995 diff -urNp linux-3.0.7/include/linux/slab.h linux-3.0.7/include/linux/slab.h
59996 --- linux-3.0.7/include/linux/slab.h 2011-07-21 22:17:23.000000000 -0400
59997 +++ linux-3.0.7/include/linux/slab.h 2011-08-23 21:47:56.000000000 -0400
59998 @@ -11,12 +11,20 @@
59999
60000 #include <linux/gfp.h>
60001 #include <linux/types.h>
60002 +#include <linux/err.h>
60003
60004 /*
60005 * Flags to pass to kmem_cache_create().
60006 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
60007 */
60008 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
60009 +
60010 +#ifdef CONFIG_PAX_USERCOPY
60011 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
60012 +#else
60013 +#define SLAB_USERCOPY 0x00000000UL
60014 +#endif
60015 +
60016 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
60017 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
60018 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
60019 @@ -87,10 +95,13 @@
60020 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
60021 * Both make kfree a no-op.
60022 */
60023 -#define ZERO_SIZE_PTR ((void *)16)
60024 +#define ZERO_SIZE_PTR \
60025 +({ \
60026 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
60027 + (void *)(-MAX_ERRNO-1L); \
60028 +})
60029
60030 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
60031 - (unsigned long)ZERO_SIZE_PTR)
60032 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
60033
60034 /*
60035 * struct kmem_cache related prototypes
60036 @@ -141,6 +152,7 @@ void * __must_check krealloc(const void
60037 void kfree(const void *);
60038 void kzfree(const void *);
60039 size_t ksize(const void *);
60040 +void check_object_size(const void *ptr, unsigned long n, bool to);
60041
60042 /*
60043 * Allocator specific definitions. These are mainly used to establish optimized
60044 @@ -333,4 +345,59 @@ static inline void *kzalloc_node(size_t
60045
60046 void __init kmem_cache_init_late(void);
60047
60048 +#define kmalloc(x, y) \
60049 +({ \
60050 + void *___retval; \
60051 + intoverflow_t ___x = (intoverflow_t)x; \
60052 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
60053 + ___retval = NULL; \
60054 + else \
60055 + ___retval = kmalloc((size_t)___x, (y)); \
60056 + ___retval; \
60057 +})
60058 +
60059 +#define kmalloc_node(x, y, z) \
60060 +({ \
60061 + void *___retval; \
60062 + intoverflow_t ___x = (intoverflow_t)x; \
60063 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
60064 + ___retval = NULL; \
60065 + else \
60066 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
60067 + ___retval; \
60068 +})
60069 +
60070 +#define kzalloc(x, y) \
60071 +({ \
60072 + void *___retval; \
60073 + intoverflow_t ___x = (intoverflow_t)x; \
60074 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
60075 + ___retval = NULL; \
60076 + else \
60077 + ___retval = kzalloc((size_t)___x, (y)); \
60078 + ___retval; \
60079 +})
60080 +
60081 +#define __krealloc(x, y, z) \
60082 +({ \
60083 + void *___retval; \
60084 + intoverflow_t ___y = (intoverflow_t)y; \
60085 + if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
60086 + ___retval = NULL; \
60087 + else \
60088 + ___retval = __krealloc((x), (size_t)___y, (z)); \
60089 + ___retval; \
60090 +})
60091 +
60092 +#define krealloc(x, y, z) \
60093 +({ \
60094 + void *___retval; \
60095 + intoverflow_t ___y = (intoverflow_t)y; \
60096 + if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
60097 + ___retval = NULL; \
60098 + else \
60099 + ___retval = krealloc((x), (size_t)___y, (z)); \
60100 + ___retval; \
60101 +})
60102 +
60103 #endif /* _LINUX_SLAB_H */
60104 diff -urNp linux-3.0.7/include/linux/slub_def.h linux-3.0.7/include/linux/slub_def.h
60105 --- linux-3.0.7/include/linux/slub_def.h 2011-07-21 22:17:23.000000000 -0400
60106 +++ linux-3.0.7/include/linux/slub_def.h 2011-08-23 21:47:56.000000000 -0400
60107 @@ -82,7 +82,7 @@ struct kmem_cache {
60108 struct kmem_cache_order_objects max;
60109 struct kmem_cache_order_objects min;
60110 gfp_t allocflags; /* gfp flags to use on each alloc */
60111 - int refcount; /* Refcount for slab cache destroy */
60112 + atomic_t refcount; /* Refcount for slab cache destroy */
60113 void (*ctor)(void *);
60114 int inuse; /* Offset to metadata */
60115 int align; /* Alignment */
60116 @@ -218,7 +218,7 @@ static __always_inline struct kmem_cache
60117 }
60118
60119 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
60120 -void *__kmalloc(size_t size, gfp_t flags);
60121 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
60122
60123 static __always_inline void *
60124 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
60125 diff -urNp linux-3.0.7/include/linux/sonet.h linux-3.0.7/include/linux/sonet.h
60126 --- linux-3.0.7/include/linux/sonet.h 2011-07-21 22:17:23.000000000 -0400
60127 +++ linux-3.0.7/include/linux/sonet.h 2011-08-23 21:47:56.000000000 -0400
60128 @@ -61,7 +61,7 @@ struct sonet_stats {
60129 #include <asm/atomic.h>
60130
60131 struct k_sonet_stats {
60132 -#define __HANDLE_ITEM(i) atomic_t i
60133 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
60134 __SONET_ITEMS
60135 #undef __HANDLE_ITEM
60136 };
60137 diff -urNp linux-3.0.7/include/linux/sunrpc/clnt.h linux-3.0.7/include/linux/sunrpc/clnt.h
60138 --- linux-3.0.7/include/linux/sunrpc/clnt.h 2011-07-21 22:17:23.000000000 -0400
60139 +++ linux-3.0.7/include/linux/sunrpc/clnt.h 2011-08-23 21:47:56.000000000 -0400
60140 @@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
60141 {
60142 switch (sap->sa_family) {
60143 case AF_INET:
60144 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
60145 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
60146 case AF_INET6:
60147 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
60148 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
60149 }
60150 return 0;
60151 }
60152 @@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
60153 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
60154 const struct sockaddr *src)
60155 {
60156 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
60157 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
60158 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
60159
60160 dsin->sin_family = ssin->sin_family;
60161 @@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
60162 if (sa->sa_family != AF_INET6)
60163 return 0;
60164
60165 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
60166 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
60167 }
60168
60169 #endif /* __KERNEL__ */
60170 diff -urNp linux-3.0.7/include/linux/sunrpc/svc_rdma.h linux-3.0.7/include/linux/sunrpc/svc_rdma.h
60171 --- linux-3.0.7/include/linux/sunrpc/svc_rdma.h 2011-07-21 22:17:23.000000000 -0400
60172 +++ linux-3.0.7/include/linux/sunrpc/svc_rdma.h 2011-08-23 21:47:56.000000000 -0400
60173 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
60174 extern unsigned int svcrdma_max_requests;
60175 extern unsigned int svcrdma_max_req_size;
60176
60177 -extern atomic_t rdma_stat_recv;
60178 -extern atomic_t rdma_stat_read;
60179 -extern atomic_t rdma_stat_write;
60180 -extern atomic_t rdma_stat_sq_starve;
60181 -extern atomic_t rdma_stat_rq_starve;
60182 -extern atomic_t rdma_stat_rq_poll;
60183 -extern atomic_t rdma_stat_rq_prod;
60184 -extern atomic_t rdma_stat_sq_poll;
60185 -extern atomic_t rdma_stat_sq_prod;
60186 +extern atomic_unchecked_t rdma_stat_recv;
60187 +extern atomic_unchecked_t rdma_stat_read;
60188 +extern atomic_unchecked_t rdma_stat_write;
60189 +extern atomic_unchecked_t rdma_stat_sq_starve;
60190 +extern atomic_unchecked_t rdma_stat_rq_starve;
60191 +extern atomic_unchecked_t rdma_stat_rq_poll;
60192 +extern atomic_unchecked_t rdma_stat_rq_prod;
60193 +extern atomic_unchecked_t rdma_stat_sq_poll;
60194 +extern atomic_unchecked_t rdma_stat_sq_prod;
60195
60196 #define RPCRDMA_VERSION 1
60197
60198 diff -urNp linux-3.0.7/include/linux/sysctl.h linux-3.0.7/include/linux/sysctl.h
60199 --- linux-3.0.7/include/linux/sysctl.h 2011-07-21 22:17:23.000000000 -0400
60200 +++ linux-3.0.7/include/linux/sysctl.h 2011-08-23 21:48:14.000000000 -0400
60201 @@ -155,7 +155,11 @@ enum
60202 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
60203 };
60204
60205 -
60206 +#ifdef CONFIG_PAX_SOFTMODE
60207 +enum {
60208 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
60209 +};
60210 +#endif
60211
60212 /* CTL_VM names: */
60213 enum
60214 @@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
60215
60216 extern int proc_dostring(struct ctl_table *, int,
60217 void __user *, size_t *, loff_t *);
60218 +extern int proc_dostring_modpriv(struct ctl_table *, int,
60219 + void __user *, size_t *, loff_t *);
60220 extern int proc_dointvec(struct ctl_table *, int,
60221 void __user *, size_t *, loff_t *);
60222 extern int proc_dointvec_minmax(struct ctl_table *, int,
60223 diff -urNp linux-3.0.7/include/linux/tty_ldisc.h linux-3.0.7/include/linux/tty_ldisc.h
60224 --- linux-3.0.7/include/linux/tty_ldisc.h 2011-07-21 22:17:23.000000000 -0400
60225 +++ linux-3.0.7/include/linux/tty_ldisc.h 2011-08-23 21:47:56.000000000 -0400
60226 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
60227
60228 struct module *owner;
60229
60230 - int refcount;
60231 + atomic_t refcount;
60232 };
60233
60234 struct tty_ldisc {
60235 diff -urNp linux-3.0.7/include/linux/types.h linux-3.0.7/include/linux/types.h
60236 --- linux-3.0.7/include/linux/types.h 2011-07-21 22:17:23.000000000 -0400
60237 +++ linux-3.0.7/include/linux/types.h 2011-08-23 21:47:56.000000000 -0400
60238 @@ -213,10 +213,26 @@ typedef struct {
60239 int counter;
60240 } atomic_t;
60241
60242 +#ifdef CONFIG_PAX_REFCOUNT
60243 +typedef struct {
60244 + int counter;
60245 +} atomic_unchecked_t;
60246 +#else
60247 +typedef atomic_t atomic_unchecked_t;
60248 +#endif
60249 +
60250 #ifdef CONFIG_64BIT
60251 typedef struct {
60252 long counter;
60253 } atomic64_t;
60254 +
60255 +#ifdef CONFIG_PAX_REFCOUNT
60256 +typedef struct {
60257 + long counter;
60258 +} atomic64_unchecked_t;
60259 +#else
60260 +typedef atomic64_t atomic64_unchecked_t;
60261 +#endif
60262 #endif
60263
60264 struct list_head {
60265 diff -urNp linux-3.0.7/include/linux/uaccess.h linux-3.0.7/include/linux/uaccess.h
60266 --- linux-3.0.7/include/linux/uaccess.h 2011-07-21 22:17:23.000000000 -0400
60267 +++ linux-3.0.7/include/linux/uaccess.h 2011-10-06 04:17:55.000000000 -0400
60268 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
60269 long ret; \
60270 mm_segment_t old_fs = get_fs(); \
60271 \
60272 - set_fs(KERNEL_DS); \
60273 pagefault_disable(); \
60274 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
60275 - pagefault_enable(); \
60276 + set_fs(KERNEL_DS); \
60277 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
60278 set_fs(old_fs); \
60279 + pagefault_enable(); \
60280 ret; \
60281 })
60282
60283 diff -urNp linux-3.0.7/include/linux/unaligned/access_ok.h linux-3.0.7/include/linux/unaligned/access_ok.h
60284 --- linux-3.0.7/include/linux/unaligned/access_ok.h 2011-07-21 22:17:23.000000000 -0400
60285 +++ linux-3.0.7/include/linux/unaligned/access_ok.h 2011-08-23 21:47:56.000000000 -0400
60286 @@ -6,32 +6,32 @@
60287
60288 static inline u16 get_unaligned_le16(const void *p)
60289 {
60290 - return le16_to_cpup((__le16 *)p);
60291 + return le16_to_cpup((const __le16 *)p);
60292 }
60293
60294 static inline u32 get_unaligned_le32(const void *p)
60295 {
60296 - return le32_to_cpup((__le32 *)p);
60297 + return le32_to_cpup((const __le32 *)p);
60298 }
60299
60300 static inline u64 get_unaligned_le64(const void *p)
60301 {
60302 - return le64_to_cpup((__le64 *)p);
60303 + return le64_to_cpup((const __le64 *)p);
60304 }
60305
60306 static inline u16 get_unaligned_be16(const void *p)
60307 {
60308 - return be16_to_cpup((__be16 *)p);
60309 + return be16_to_cpup((const __be16 *)p);
60310 }
60311
60312 static inline u32 get_unaligned_be32(const void *p)
60313 {
60314 - return be32_to_cpup((__be32 *)p);
60315 + return be32_to_cpup((const __be32 *)p);
60316 }
60317
60318 static inline u64 get_unaligned_be64(const void *p)
60319 {
60320 - return be64_to_cpup((__be64 *)p);
60321 + return be64_to_cpup((const __be64 *)p);
60322 }
60323
60324 static inline void put_unaligned_le16(u16 val, void *p)
60325 diff -urNp linux-3.0.7/include/linux/vermagic.h linux-3.0.7/include/linux/vermagic.h
60326 --- linux-3.0.7/include/linux/vermagic.h 2011-07-21 22:17:23.000000000 -0400
60327 +++ linux-3.0.7/include/linux/vermagic.h 2011-10-07 19:25:35.000000000 -0400
60328 @@ -26,9 +26,28 @@
60329 #define MODULE_ARCH_VERMAGIC ""
60330 #endif
60331
60332 +#ifdef CONFIG_PAX_REFCOUNT
60333 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
60334 +#else
60335 +#define MODULE_PAX_REFCOUNT ""
60336 +#endif
60337 +
60338 +#ifdef CONSTIFY_PLUGIN
60339 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
60340 +#else
60341 +#define MODULE_CONSTIFY_PLUGIN ""
60342 +#endif
60343 +
60344 +#ifdef CONFIG_GRKERNSEC
60345 +#define MODULE_GRSEC "GRSEC "
60346 +#else
60347 +#define MODULE_GRSEC ""
60348 +#endif
60349 +
60350 #define VERMAGIC_STRING \
60351 UTS_RELEASE " " \
60352 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
60353 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
60354 - MODULE_ARCH_VERMAGIC
60355 + MODULE_ARCH_VERMAGIC \
60356 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_GRSEC
60357
60358 diff -urNp linux-3.0.7/include/linux/vmalloc.h linux-3.0.7/include/linux/vmalloc.h
60359 --- linux-3.0.7/include/linux/vmalloc.h 2011-07-21 22:17:23.000000000 -0400
60360 +++ linux-3.0.7/include/linux/vmalloc.h 2011-08-23 21:47:56.000000000 -0400
60361 @@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
60362 #define VM_MAP 0x00000004 /* vmap()ed pages */
60363 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
60364 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
60365 +
60366 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
60367 +#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
60368 +#endif
60369 +
60370 /* bits [20..32] reserved for arch specific ioremap internals */
60371
60372 /*
60373 @@ -155,4 +160,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
60374 # endif
60375 #endif
60376
60377 +#define vmalloc(x) \
60378 +({ \
60379 + void *___retval; \
60380 + intoverflow_t ___x = (intoverflow_t)x; \
60381 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
60382 + ___retval = NULL; \
60383 + else \
60384 + ___retval = vmalloc((unsigned long)___x); \
60385 + ___retval; \
60386 +})
60387 +
60388 +#define vzalloc(x) \
60389 +({ \
60390 + void *___retval; \
60391 + intoverflow_t ___x = (intoverflow_t)x; \
60392 + if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
60393 + ___retval = NULL; \
60394 + else \
60395 + ___retval = vzalloc((unsigned long)___x); \
60396 + ___retval; \
60397 +})
60398 +
60399 +#define __vmalloc(x, y, z) \
60400 +({ \
60401 + void *___retval; \
60402 + intoverflow_t ___x = (intoverflow_t)x; \
60403 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
60404 + ___retval = NULL; \
60405 + else \
60406 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
60407 + ___retval; \
60408 +})
60409 +
60410 +#define vmalloc_user(x) \
60411 +({ \
60412 + void *___retval; \
60413 + intoverflow_t ___x = (intoverflow_t)x; \
60414 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
60415 + ___retval = NULL; \
60416 + else \
60417 + ___retval = vmalloc_user((unsigned long)___x); \
60418 + ___retval; \
60419 +})
60420 +
60421 +#define vmalloc_exec(x) \
60422 +({ \
60423 + void *___retval; \
60424 + intoverflow_t ___x = (intoverflow_t)x; \
60425 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
60426 + ___retval = NULL; \
60427 + else \
60428 + ___retval = vmalloc_exec((unsigned long)___x); \
60429 + ___retval; \
60430 +})
60431 +
60432 +#define vmalloc_node(x, y) \
60433 +({ \
60434 + void *___retval; \
60435 + intoverflow_t ___x = (intoverflow_t)x; \
60436 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
60437 + ___retval = NULL; \
60438 + else \
60439 + ___retval = vmalloc_node((unsigned long)___x, (y));\
60440 + ___retval; \
60441 +})
60442 +
60443 +#define vzalloc_node(x, y) \
60444 +({ \
60445 + void *___retval; \
60446 + intoverflow_t ___x = (intoverflow_t)x; \
60447 + if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
60448 + ___retval = NULL; \
60449 + else \
60450 + ___retval = vzalloc_node((unsigned long)___x, (y));\
60451 + ___retval; \
60452 +})
60453 +
60454 +#define vmalloc_32(x) \
60455 +({ \
60456 + void *___retval; \
60457 + intoverflow_t ___x = (intoverflow_t)x; \
60458 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
60459 + ___retval = NULL; \
60460 + else \
60461 + ___retval = vmalloc_32((unsigned long)___x); \
60462 + ___retval; \
60463 +})
60464 +
60465 +#define vmalloc_32_user(x) \
60466 +({ \
60467 +void *___retval; \
60468 + intoverflow_t ___x = (intoverflow_t)x; \
60469 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
60470 + ___retval = NULL; \
60471 + else \
60472 + ___retval = vmalloc_32_user((unsigned long)___x);\
60473 + ___retval; \
60474 +})
60475 +
60476 #endif /* _LINUX_VMALLOC_H */
60477 diff -urNp linux-3.0.7/include/linux/vmstat.h linux-3.0.7/include/linux/vmstat.h
60478 --- linux-3.0.7/include/linux/vmstat.h 2011-07-21 22:17:23.000000000 -0400
60479 +++ linux-3.0.7/include/linux/vmstat.h 2011-08-23 21:47:56.000000000 -0400
60480 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(in
60481 /*
60482 * Zone based page accounting with per cpu differentials.
60483 */
60484 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60485 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60486
60487 static inline void zone_page_state_add(long x, struct zone *zone,
60488 enum zone_stat_item item)
60489 {
60490 - atomic_long_add(x, &zone->vm_stat[item]);
60491 - atomic_long_add(x, &vm_stat[item]);
60492 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
60493 + atomic_long_add_unchecked(x, &vm_stat[item]);
60494 }
60495
60496 static inline unsigned long global_page_state(enum zone_stat_item item)
60497 {
60498 - long x = atomic_long_read(&vm_stat[item]);
60499 + long x = atomic_long_read_unchecked(&vm_stat[item]);
60500 #ifdef CONFIG_SMP
60501 if (x < 0)
60502 x = 0;
60503 @@ -109,7 +109,7 @@ static inline unsigned long global_page_
60504 static inline unsigned long zone_page_state(struct zone *zone,
60505 enum zone_stat_item item)
60506 {
60507 - long x = atomic_long_read(&zone->vm_stat[item]);
60508 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
60509 #ifdef CONFIG_SMP
60510 if (x < 0)
60511 x = 0;
60512 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_st
60513 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
60514 enum zone_stat_item item)
60515 {
60516 - long x = atomic_long_read(&zone->vm_stat[item]);
60517 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
60518
60519 #ifdef CONFIG_SMP
60520 int cpu;
60521 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state
60522
60523 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
60524 {
60525 - atomic_long_inc(&zone->vm_stat[item]);
60526 - atomic_long_inc(&vm_stat[item]);
60527 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
60528 + atomic_long_inc_unchecked(&vm_stat[item]);
60529 }
60530
60531 static inline void __inc_zone_page_state(struct page *page,
60532 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state
60533
60534 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
60535 {
60536 - atomic_long_dec(&zone->vm_stat[item]);
60537 - atomic_long_dec(&vm_stat[item]);
60538 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
60539 + atomic_long_dec_unchecked(&vm_stat[item]);
60540 }
60541
60542 static inline void __dec_zone_page_state(struct page *page,
60543 diff -urNp linux-3.0.7/include/media/saa7146_vv.h linux-3.0.7/include/media/saa7146_vv.h
60544 --- linux-3.0.7/include/media/saa7146_vv.h 2011-07-21 22:17:23.000000000 -0400
60545 +++ linux-3.0.7/include/media/saa7146_vv.h 2011-10-07 19:07:40.000000000 -0400
60546 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
60547 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
60548
60549 /* the extension can override this */
60550 - struct v4l2_ioctl_ops ops;
60551 + v4l2_ioctl_ops_no_const ops;
60552 /* pointer to the saa7146 core ops */
60553 const struct v4l2_ioctl_ops *core_ops;
60554
60555 diff -urNp linux-3.0.7/include/media/v4l2-dev.h linux-3.0.7/include/media/v4l2-dev.h
60556 --- linux-3.0.7/include/media/v4l2-dev.h 2011-07-21 22:17:23.000000000 -0400
60557 +++ linux-3.0.7/include/media/v4l2-dev.h 2011-10-07 19:07:40.000000000 -0400
60558 @@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_sta
60559
60560
60561 struct v4l2_file_operations {
60562 - struct module *owner;
60563 + struct module * const owner;
60564 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
60565 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
60566 unsigned int (*poll) (struct file *, struct poll_table_struct *);
60567 @@ -68,6 +68,7 @@ struct v4l2_file_operations {
60568 int (*open) (struct file *);
60569 int (*release) (struct file *);
60570 };
60571 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
60572
60573 /*
60574 * Newer version of video_device, handled by videodev2.c
60575 diff -urNp linux-3.0.7/include/media/v4l2-ioctl.h linux-3.0.7/include/media/v4l2-ioctl.h
60576 --- linux-3.0.7/include/media/v4l2-ioctl.h 2011-07-21 22:17:23.000000000 -0400
60577 +++ linux-3.0.7/include/media/v4l2-ioctl.h 2011-08-24 18:25:45.000000000 -0400
60578 @@ -272,6 +272,7 @@ struct v4l2_ioctl_ops {
60579 long (*vidioc_default) (struct file *file, void *fh,
60580 bool valid_prio, int cmd, void *arg);
60581 };
60582 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
60583
60584
60585 /* v4l debugging and diagnostics */
60586 diff -urNp linux-3.0.7/include/net/caif/cfctrl.h linux-3.0.7/include/net/caif/cfctrl.h
60587 --- linux-3.0.7/include/net/caif/cfctrl.h 2011-07-21 22:17:23.000000000 -0400
60588 +++ linux-3.0.7/include/net/caif/cfctrl.h 2011-08-23 21:47:56.000000000 -0400
60589 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
60590 void (*radioset_rsp)(void);
60591 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
60592 struct cflayer *client_layer);
60593 -};
60594 +} __no_const;
60595
60596 /* Link Setup Parameters for CAIF-Links. */
60597 struct cfctrl_link_param {
60598 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
60599 struct cfctrl {
60600 struct cfsrvl serv;
60601 struct cfctrl_rsp res;
60602 - atomic_t req_seq_no;
60603 - atomic_t rsp_seq_no;
60604 + atomic_unchecked_t req_seq_no;
60605 + atomic_unchecked_t rsp_seq_no;
60606 struct list_head list;
60607 /* Protects from simultaneous access to first_req list */
60608 spinlock_t info_list_lock;
60609 diff -urNp linux-3.0.7/include/net/flow.h linux-3.0.7/include/net/flow.h
60610 --- linux-3.0.7/include/net/flow.h 2011-07-21 22:17:23.000000000 -0400
60611 +++ linux-3.0.7/include/net/flow.h 2011-08-23 21:47:56.000000000 -0400
60612 @@ -188,6 +188,6 @@ extern struct flow_cache_object *flow_ca
60613 u8 dir, flow_resolve_t resolver, void *ctx);
60614
60615 extern void flow_cache_flush(void);
60616 -extern atomic_t flow_cache_genid;
60617 +extern atomic_unchecked_t flow_cache_genid;
60618
60619 #endif
60620 diff -urNp linux-3.0.7/include/net/inetpeer.h linux-3.0.7/include/net/inetpeer.h
60621 --- linux-3.0.7/include/net/inetpeer.h 2011-07-21 22:17:23.000000000 -0400
60622 +++ linux-3.0.7/include/net/inetpeer.h 2011-08-23 21:47:56.000000000 -0400
60623 @@ -43,8 +43,8 @@ struct inet_peer {
60624 */
60625 union {
60626 struct {
60627 - atomic_t rid; /* Frag reception counter */
60628 - atomic_t ip_id_count; /* IP ID for the next packet */
60629 + atomic_unchecked_t rid; /* Frag reception counter */
60630 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
60631 __u32 tcp_ts;
60632 __u32 tcp_ts_stamp;
60633 u32 metrics[RTAX_MAX];
60634 @@ -108,7 +108,7 @@ static inline __u16 inet_getid(struct in
60635 {
60636 more++;
60637 inet_peer_refcheck(p);
60638 - return atomic_add_return(more, &p->ip_id_count) - more;
60639 + return atomic_add_return_unchecked(more, &p->ip_id_count) - more;
60640 }
60641
60642 #endif /* _NET_INETPEER_H */
60643 diff -urNp linux-3.0.7/include/net/ip_fib.h linux-3.0.7/include/net/ip_fib.h
60644 --- linux-3.0.7/include/net/ip_fib.h 2011-07-21 22:17:23.000000000 -0400
60645 +++ linux-3.0.7/include/net/ip_fib.h 2011-08-23 21:47:56.000000000 -0400
60646 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
60647
60648 #define FIB_RES_SADDR(net, res) \
60649 ((FIB_RES_NH(res).nh_saddr_genid == \
60650 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
60651 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
60652 FIB_RES_NH(res).nh_saddr : \
60653 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
60654 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
60655 diff -urNp linux-3.0.7/include/net/ip_vs.h linux-3.0.7/include/net/ip_vs.h
60656 --- linux-3.0.7/include/net/ip_vs.h 2011-07-21 22:17:23.000000000 -0400
60657 +++ linux-3.0.7/include/net/ip_vs.h 2011-08-23 21:47:56.000000000 -0400
60658 @@ -509,7 +509,7 @@ struct ip_vs_conn {
60659 struct ip_vs_conn *control; /* Master control connection */
60660 atomic_t n_control; /* Number of controlled ones */
60661 struct ip_vs_dest *dest; /* real server */
60662 - atomic_t in_pkts; /* incoming packet counter */
60663 + atomic_unchecked_t in_pkts; /* incoming packet counter */
60664
60665 /* packet transmitter for different forwarding methods. If it
60666 mangles the packet, it must return NF_DROP or better NF_STOLEN,
60667 @@ -647,7 +647,7 @@ struct ip_vs_dest {
60668 __be16 port; /* port number of the server */
60669 union nf_inet_addr addr; /* IP address of the server */
60670 volatile unsigned flags; /* dest status flags */
60671 - atomic_t conn_flags; /* flags to copy to conn */
60672 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
60673 atomic_t weight; /* server weight */
60674
60675 atomic_t refcnt; /* reference counter */
60676 diff -urNp linux-3.0.7/include/net/irda/ircomm_core.h linux-3.0.7/include/net/irda/ircomm_core.h
60677 --- linux-3.0.7/include/net/irda/ircomm_core.h 2011-07-21 22:17:23.000000000 -0400
60678 +++ linux-3.0.7/include/net/irda/ircomm_core.h 2011-08-23 21:47:56.000000000 -0400
60679 @@ -51,7 +51,7 @@ typedef struct {
60680 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
60681 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
60682 struct ircomm_info *);
60683 -} call_t;
60684 +} __no_const call_t;
60685
60686 struct ircomm_cb {
60687 irda_queue_t queue;
60688 diff -urNp linux-3.0.7/include/net/irda/ircomm_tty.h linux-3.0.7/include/net/irda/ircomm_tty.h
60689 --- linux-3.0.7/include/net/irda/ircomm_tty.h 2011-07-21 22:17:23.000000000 -0400
60690 +++ linux-3.0.7/include/net/irda/ircomm_tty.h 2011-08-23 21:47:56.000000000 -0400
60691 @@ -35,6 +35,7 @@
60692 #include <linux/termios.h>
60693 #include <linux/timer.h>
60694 #include <linux/tty.h> /* struct tty_struct */
60695 +#include <asm/local.h>
60696
60697 #include <net/irda/irias_object.h>
60698 #include <net/irda/ircomm_core.h>
60699 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
60700 unsigned short close_delay;
60701 unsigned short closing_wait; /* time to wait before closing */
60702
60703 - int open_count;
60704 - int blocked_open; /* # of blocked opens */
60705 + local_t open_count;
60706 + local_t blocked_open; /* # of blocked opens */
60707
60708 /* Protect concurent access to :
60709 * o self->open_count
60710 diff -urNp linux-3.0.7/include/net/iucv/af_iucv.h linux-3.0.7/include/net/iucv/af_iucv.h
60711 --- linux-3.0.7/include/net/iucv/af_iucv.h 2011-07-21 22:17:23.000000000 -0400
60712 +++ linux-3.0.7/include/net/iucv/af_iucv.h 2011-08-23 21:47:56.000000000 -0400
60713 @@ -87,7 +87,7 @@ struct iucv_sock {
60714 struct iucv_sock_list {
60715 struct hlist_head head;
60716 rwlock_t lock;
60717 - atomic_t autobind_name;
60718 + atomic_unchecked_t autobind_name;
60719 };
60720
60721 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
60722 diff -urNp linux-3.0.7/include/net/lapb.h linux-3.0.7/include/net/lapb.h
60723 --- linux-3.0.7/include/net/lapb.h 2011-07-21 22:17:23.000000000 -0400
60724 +++ linux-3.0.7/include/net/lapb.h 2011-08-23 21:47:56.000000000 -0400
60725 @@ -95,7 +95,7 @@ struct lapb_cb {
60726 struct sk_buff_head write_queue;
60727 struct sk_buff_head ack_queue;
60728 unsigned char window;
60729 - struct lapb_register_struct callbacks;
60730 + struct lapb_register_struct *callbacks;
60731
60732 /* FRMR control information */
60733 struct lapb_frame frmr_data;
60734 diff -urNp linux-3.0.7/include/net/neighbour.h linux-3.0.7/include/net/neighbour.h
60735 --- linux-3.0.7/include/net/neighbour.h 2011-07-21 22:17:23.000000000 -0400
60736 +++ linux-3.0.7/include/net/neighbour.h 2011-08-31 18:39:25.000000000 -0400
60737 @@ -124,7 +124,7 @@ struct neigh_ops {
60738 int (*connected_output)(struct sk_buff*);
60739 int (*hh_output)(struct sk_buff*);
60740 int (*queue_xmit)(struct sk_buff*);
60741 -};
60742 +} __do_const;
60743
60744 struct pneigh_entry {
60745 struct pneigh_entry *next;
60746 diff -urNp linux-3.0.7/include/net/netlink.h linux-3.0.7/include/net/netlink.h
60747 --- linux-3.0.7/include/net/netlink.h 2011-07-21 22:17:23.000000000 -0400
60748 +++ linux-3.0.7/include/net/netlink.h 2011-08-23 21:47:56.000000000 -0400
60749 @@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
60750 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
60751 {
60752 if (mark)
60753 - skb_trim(skb, (unsigned char *) mark - skb->data);
60754 + skb_trim(skb, (const unsigned char *) mark - skb->data);
60755 }
60756
60757 /**
60758 diff -urNp linux-3.0.7/include/net/netns/ipv4.h linux-3.0.7/include/net/netns/ipv4.h
60759 --- linux-3.0.7/include/net/netns/ipv4.h 2011-07-21 22:17:23.000000000 -0400
60760 +++ linux-3.0.7/include/net/netns/ipv4.h 2011-08-23 21:47:56.000000000 -0400
60761 @@ -56,8 +56,8 @@ struct netns_ipv4 {
60762
60763 unsigned int sysctl_ping_group_range[2];
60764
60765 - atomic_t rt_genid;
60766 - atomic_t dev_addr_genid;
60767 + atomic_unchecked_t rt_genid;
60768 + atomic_unchecked_t dev_addr_genid;
60769
60770 #ifdef CONFIG_IP_MROUTE
60771 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
60772 diff -urNp linux-3.0.7/include/net/sctp/sctp.h linux-3.0.7/include/net/sctp/sctp.h
60773 --- linux-3.0.7/include/net/sctp/sctp.h 2011-07-21 22:17:23.000000000 -0400
60774 +++ linux-3.0.7/include/net/sctp/sctp.h 2011-08-23 21:47:56.000000000 -0400
60775 @@ -315,9 +315,9 @@ do { \
60776
60777 #else /* SCTP_DEBUG */
60778
60779 -#define SCTP_DEBUG_PRINTK(whatever...)
60780 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
60781 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
60782 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
60783 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
60784 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
60785 #define SCTP_ENABLE_DEBUG
60786 #define SCTP_DISABLE_DEBUG
60787 #define SCTP_ASSERT(expr, str, func)
60788 diff -urNp linux-3.0.7/include/net/sock.h linux-3.0.7/include/net/sock.h
60789 --- linux-3.0.7/include/net/sock.h 2011-07-21 22:17:23.000000000 -0400
60790 +++ linux-3.0.7/include/net/sock.h 2011-08-23 21:47:56.000000000 -0400
60791 @@ -277,7 +277,7 @@ struct sock {
60792 #ifdef CONFIG_RPS
60793 __u32 sk_rxhash;
60794 #endif
60795 - atomic_t sk_drops;
60796 + atomic_unchecked_t sk_drops;
60797 int sk_rcvbuf;
60798
60799 struct sk_filter __rcu *sk_filter;
60800 @@ -1390,7 +1390,7 @@ static inline void sk_nocaps_add(struct
60801 }
60802
60803 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
60804 - char __user *from, char *to,
60805 + char __user *from, unsigned char *to,
60806 int copy, int offset)
60807 {
60808 if (skb->ip_summed == CHECKSUM_NONE) {
60809 diff -urNp linux-3.0.7/include/net/tcp.h linux-3.0.7/include/net/tcp.h
60810 --- linux-3.0.7/include/net/tcp.h 2011-07-21 22:17:23.000000000 -0400
60811 +++ linux-3.0.7/include/net/tcp.h 2011-08-23 21:47:56.000000000 -0400
60812 @@ -1374,8 +1374,8 @@ enum tcp_seq_states {
60813 struct tcp_seq_afinfo {
60814 char *name;
60815 sa_family_t family;
60816 - struct file_operations seq_fops;
60817 - struct seq_operations seq_ops;
60818 + file_operations_no_const seq_fops;
60819 + seq_operations_no_const seq_ops;
60820 };
60821
60822 struct tcp_iter_state {
60823 diff -urNp linux-3.0.7/include/net/udp.h linux-3.0.7/include/net/udp.h
60824 --- linux-3.0.7/include/net/udp.h 2011-07-21 22:17:23.000000000 -0400
60825 +++ linux-3.0.7/include/net/udp.h 2011-08-23 21:47:56.000000000 -0400
60826 @@ -234,8 +234,8 @@ struct udp_seq_afinfo {
60827 char *name;
60828 sa_family_t family;
60829 struct udp_table *udp_table;
60830 - struct file_operations seq_fops;
60831 - struct seq_operations seq_ops;
60832 + file_operations_no_const seq_fops;
60833 + seq_operations_no_const seq_ops;
60834 };
60835
60836 struct udp_iter_state {
60837 diff -urNp linux-3.0.7/include/net/xfrm.h linux-3.0.7/include/net/xfrm.h
60838 --- linux-3.0.7/include/net/xfrm.h 2011-07-21 22:17:23.000000000 -0400
60839 +++ linux-3.0.7/include/net/xfrm.h 2011-08-23 21:47:56.000000000 -0400
60840 @@ -505,7 +505,7 @@ struct xfrm_policy {
60841 struct timer_list timer;
60842
60843 struct flow_cache_object flo;
60844 - atomic_t genid;
60845 + atomic_unchecked_t genid;
60846 u32 priority;
60847 u32 index;
60848 struct xfrm_mark mark;
60849 diff -urNp linux-3.0.7/include/rdma/iw_cm.h linux-3.0.7/include/rdma/iw_cm.h
60850 --- linux-3.0.7/include/rdma/iw_cm.h 2011-07-21 22:17:23.000000000 -0400
60851 +++ linux-3.0.7/include/rdma/iw_cm.h 2011-08-23 21:47:56.000000000 -0400
60852 @@ -120,7 +120,7 @@ struct iw_cm_verbs {
60853 int backlog);
60854
60855 int (*destroy_listen)(struct iw_cm_id *cm_id);
60856 -};
60857 +} __no_const;
60858
60859 /**
60860 * iw_create_cm_id - Create an IW CM identifier.
60861 diff -urNp linux-3.0.7/include/scsi/libfc.h linux-3.0.7/include/scsi/libfc.h
60862 --- linux-3.0.7/include/scsi/libfc.h 2011-07-21 22:17:23.000000000 -0400
60863 +++ linux-3.0.7/include/scsi/libfc.h 2011-08-23 21:47:56.000000000 -0400
60864 @@ -750,6 +750,7 @@ struct libfc_function_template {
60865 */
60866 void (*disc_stop_final) (struct fc_lport *);
60867 };
60868 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
60869
60870 /**
60871 * struct fc_disc - Discovery context
60872 @@ -853,7 +854,7 @@ struct fc_lport {
60873 struct fc_vport *vport;
60874
60875 /* Operational Information */
60876 - struct libfc_function_template tt;
60877 + libfc_function_template_no_const tt;
60878 u8 link_up;
60879 u8 qfull;
60880 enum fc_lport_state state;
60881 diff -urNp linux-3.0.7/include/scsi/scsi_device.h linux-3.0.7/include/scsi/scsi_device.h
60882 --- linux-3.0.7/include/scsi/scsi_device.h 2011-07-21 22:17:23.000000000 -0400
60883 +++ linux-3.0.7/include/scsi/scsi_device.h 2011-08-23 21:47:56.000000000 -0400
60884 @@ -161,9 +161,9 @@ struct scsi_device {
60885 unsigned int max_device_blocked; /* what device_blocked counts down from */
60886 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
60887
60888 - atomic_t iorequest_cnt;
60889 - atomic_t iodone_cnt;
60890 - atomic_t ioerr_cnt;
60891 + atomic_unchecked_t iorequest_cnt;
60892 + atomic_unchecked_t iodone_cnt;
60893 + atomic_unchecked_t ioerr_cnt;
60894
60895 struct device sdev_gendev,
60896 sdev_dev;
60897 diff -urNp linux-3.0.7/include/scsi/scsi_transport_fc.h linux-3.0.7/include/scsi/scsi_transport_fc.h
60898 --- linux-3.0.7/include/scsi/scsi_transport_fc.h 2011-07-21 22:17:23.000000000 -0400
60899 +++ linux-3.0.7/include/scsi/scsi_transport_fc.h 2011-08-26 19:49:56.000000000 -0400
60900 @@ -711,7 +711,7 @@ struct fc_function_template {
60901 unsigned long show_host_system_hostname:1;
60902
60903 unsigned long disable_target_scan:1;
60904 -};
60905 +} __do_const;
60906
60907
60908 /**
60909 diff -urNp linux-3.0.7/include/sound/ak4xxx-adda.h linux-3.0.7/include/sound/ak4xxx-adda.h
60910 --- linux-3.0.7/include/sound/ak4xxx-adda.h 2011-07-21 22:17:23.000000000 -0400
60911 +++ linux-3.0.7/include/sound/ak4xxx-adda.h 2011-08-23 21:47:56.000000000 -0400
60912 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
60913 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
60914 unsigned char val);
60915 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
60916 -};
60917 +} __no_const;
60918
60919 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
60920
60921 diff -urNp linux-3.0.7/include/sound/hwdep.h linux-3.0.7/include/sound/hwdep.h
60922 --- linux-3.0.7/include/sound/hwdep.h 2011-07-21 22:17:23.000000000 -0400
60923 +++ linux-3.0.7/include/sound/hwdep.h 2011-08-23 21:47:56.000000000 -0400
60924 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
60925 struct snd_hwdep_dsp_status *status);
60926 int (*dsp_load)(struct snd_hwdep *hw,
60927 struct snd_hwdep_dsp_image *image);
60928 -};
60929 +} __no_const;
60930
60931 struct snd_hwdep {
60932 struct snd_card *card;
60933 diff -urNp linux-3.0.7/include/sound/info.h linux-3.0.7/include/sound/info.h
60934 --- linux-3.0.7/include/sound/info.h 2011-07-21 22:17:23.000000000 -0400
60935 +++ linux-3.0.7/include/sound/info.h 2011-08-23 21:47:56.000000000 -0400
60936 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
60937 struct snd_info_buffer *buffer);
60938 void (*write)(struct snd_info_entry *entry,
60939 struct snd_info_buffer *buffer);
60940 -};
60941 +} __no_const;
60942
60943 struct snd_info_entry_ops {
60944 int (*open)(struct snd_info_entry *entry,
60945 diff -urNp linux-3.0.7/include/sound/pcm.h linux-3.0.7/include/sound/pcm.h
60946 --- linux-3.0.7/include/sound/pcm.h 2011-07-21 22:17:23.000000000 -0400
60947 +++ linux-3.0.7/include/sound/pcm.h 2011-08-23 21:47:56.000000000 -0400
60948 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
60949 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
60950 int (*ack)(struct snd_pcm_substream *substream);
60951 };
60952 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
60953
60954 /*
60955 *
60956 diff -urNp linux-3.0.7/include/sound/sb16_csp.h linux-3.0.7/include/sound/sb16_csp.h
60957 --- linux-3.0.7/include/sound/sb16_csp.h 2011-07-21 22:17:23.000000000 -0400
60958 +++ linux-3.0.7/include/sound/sb16_csp.h 2011-08-23 21:47:56.000000000 -0400
60959 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
60960 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
60961 int (*csp_stop) (struct snd_sb_csp * p);
60962 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
60963 -};
60964 +} __no_const;
60965
60966 /*
60967 * CSP private data
60968 diff -urNp linux-3.0.7/include/sound/soc.h linux-3.0.7/include/sound/soc.h
60969 --- linux-3.0.7/include/sound/soc.h 2011-07-21 22:17:23.000000000 -0400
60970 +++ linux-3.0.7/include/sound/soc.h 2011-08-26 19:49:56.000000000 -0400
60971 @@ -636,7 +636,7 @@ struct snd_soc_platform_driver {
60972
60973 /* platform stream ops */
60974 struct snd_pcm_ops *ops;
60975 -};
60976 +} __do_const;
60977
60978 struct snd_soc_platform {
60979 const char *name;
60980 diff -urNp linux-3.0.7/include/sound/ymfpci.h linux-3.0.7/include/sound/ymfpci.h
60981 --- linux-3.0.7/include/sound/ymfpci.h 2011-07-21 22:17:23.000000000 -0400
60982 +++ linux-3.0.7/include/sound/ymfpci.h 2011-08-23 21:47:56.000000000 -0400
60983 @@ -358,7 +358,7 @@ struct snd_ymfpci {
60984 spinlock_t reg_lock;
60985 spinlock_t voice_lock;
60986 wait_queue_head_t interrupt_sleep;
60987 - atomic_t interrupt_sleep_count;
60988 + atomic_unchecked_t interrupt_sleep_count;
60989 struct snd_info_entry *proc_entry;
60990 const struct firmware *dsp_microcode;
60991 const struct firmware *controller_microcode;
60992 diff -urNp linux-3.0.7/include/target/target_core_base.h linux-3.0.7/include/target/target_core_base.h
60993 --- linux-3.0.7/include/target/target_core_base.h 2011-07-21 22:17:23.000000000 -0400
60994 +++ linux-3.0.7/include/target/target_core_base.h 2011-08-23 21:47:56.000000000 -0400
60995 @@ -364,7 +364,7 @@ struct t10_reservation_ops {
60996 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
60997 int (*t10_pr_register)(struct se_cmd *);
60998 int (*t10_pr_clear)(struct se_cmd *);
60999 -};
61000 +} __no_const;
61001
61002 struct t10_reservation_template {
61003 /* Reservation effects all target ports */
61004 @@ -432,8 +432,8 @@ struct se_transport_task {
61005 atomic_t t_task_cdbs_left;
61006 atomic_t t_task_cdbs_ex_left;
61007 atomic_t t_task_cdbs_timeout_left;
61008 - atomic_t t_task_cdbs_sent;
61009 - atomic_t t_transport_aborted;
61010 + atomic_unchecked_t t_task_cdbs_sent;
61011 + atomic_unchecked_t t_transport_aborted;
61012 atomic_t t_transport_active;
61013 atomic_t t_transport_complete;
61014 atomic_t t_transport_queue_active;
61015 @@ -774,7 +774,7 @@ struct se_device {
61016 atomic_t active_cmds;
61017 atomic_t simple_cmds;
61018 atomic_t depth_left;
61019 - atomic_t dev_ordered_id;
61020 + atomic_unchecked_t dev_ordered_id;
61021 atomic_t dev_tur_active;
61022 atomic_t execute_tasks;
61023 atomic_t dev_status_thr_count;
61024 diff -urNp linux-3.0.7/include/trace/events/irq.h linux-3.0.7/include/trace/events/irq.h
61025 --- linux-3.0.7/include/trace/events/irq.h 2011-07-21 22:17:23.000000000 -0400
61026 +++ linux-3.0.7/include/trace/events/irq.h 2011-08-23 21:47:56.000000000 -0400
61027 @@ -36,7 +36,7 @@ struct softirq_action;
61028 */
61029 TRACE_EVENT(irq_handler_entry,
61030
61031 - TP_PROTO(int irq, struct irqaction *action),
61032 + TP_PROTO(int irq, const struct irqaction *action),
61033
61034 TP_ARGS(irq, action),
61035
61036 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
61037 */
61038 TRACE_EVENT(irq_handler_exit,
61039
61040 - TP_PROTO(int irq, struct irqaction *action, int ret),
61041 + TP_PROTO(int irq, const struct irqaction *action, int ret),
61042
61043 TP_ARGS(irq, action, ret),
61044
61045 diff -urNp linux-3.0.7/include/video/udlfb.h linux-3.0.7/include/video/udlfb.h
61046 --- linux-3.0.7/include/video/udlfb.h 2011-07-21 22:17:23.000000000 -0400
61047 +++ linux-3.0.7/include/video/udlfb.h 2011-08-23 21:47:56.000000000 -0400
61048 @@ -51,10 +51,10 @@ struct dlfb_data {
61049 int base8;
61050 u32 pseudo_palette[256];
61051 /* blit-only rendering path metrics, exposed through sysfs */
61052 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61053 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
61054 - atomic_t bytes_sent; /* to usb, after compression including overhead */
61055 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
61056 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61057 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
61058 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
61059 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
61060 };
61061
61062 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
61063 diff -urNp linux-3.0.7/include/video/uvesafb.h linux-3.0.7/include/video/uvesafb.h
61064 --- linux-3.0.7/include/video/uvesafb.h 2011-07-21 22:17:23.000000000 -0400
61065 +++ linux-3.0.7/include/video/uvesafb.h 2011-08-23 21:47:56.000000000 -0400
61066 @@ -177,6 +177,7 @@ struct uvesafb_par {
61067 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
61068 u8 pmi_setpal; /* PMI for palette changes */
61069 u16 *pmi_base; /* protected mode interface location */
61070 + u8 *pmi_code; /* protected mode code location */
61071 void *pmi_start;
61072 void *pmi_pal;
61073 u8 *vbe_state_orig; /*
61074 diff -urNp linux-3.0.7/init/do_mounts.c linux-3.0.7/init/do_mounts.c
61075 --- linux-3.0.7/init/do_mounts.c 2011-07-21 22:17:23.000000000 -0400
61076 +++ linux-3.0.7/init/do_mounts.c 2011-10-06 04:17:55.000000000 -0400
61077 @@ -287,11 +287,11 @@ static void __init get_fs_names(char *pa
61078
61079 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
61080 {
61081 - int err = sys_mount(name, "/root", fs, flags, data);
61082 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
61083 if (err)
61084 return err;
61085
61086 - sys_chdir((const char __user __force *)"/root");
61087 + sys_chdir((const char __force_user*)"/root");
61088 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
61089 printk(KERN_INFO
61090 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
61091 @@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
61092 va_start(args, fmt);
61093 vsprintf(buf, fmt, args);
61094 va_end(args);
61095 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
61096 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
61097 if (fd >= 0) {
61098 sys_ioctl(fd, FDEJECT, 0);
61099 sys_close(fd);
61100 }
61101 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
61102 - fd = sys_open("/dev/console", O_RDWR, 0);
61103 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
61104 if (fd >= 0) {
61105 sys_ioctl(fd, TCGETS, (long)&termios);
61106 termios.c_lflag &= ~ICANON;
61107 sys_ioctl(fd, TCSETSF, (long)&termios);
61108 - sys_read(fd, &c, 1);
61109 + sys_read(fd, (char __user *)&c, 1);
61110 termios.c_lflag |= ICANON;
61111 sys_ioctl(fd, TCSETSF, (long)&termios);
61112 sys_close(fd);
61113 @@ -488,6 +488,6 @@ void __init prepare_namespace(void)
61114 mount_root();
61115 out:
61116 devtmpfs_mount("dev");
61117 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
61118 - sys_chroot((const char __user __force *)".");
61119 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
61120 + sys_chroot((const char __force_user *)".");
61121 }
61122 diff -urNp linux-3.0.7/init/do_mounts.h linux-3.0.7/init/do_mounts.h
61123 --- linux-3.0.7/init/do_mounts.h 2011-07-21 22:17:23.000000000 -0400
61124 +++ linux-3.0.7/init/do_mounts.h 2011-10-06 04:17:55.000000000 -0400
61125 @@ -15,15 +15,15 @@ extern int root_mountflags;
61126
61127 static inline int create_dev(char *name, dev_t dev)
61128 {
61129 - sys_unlink(name);
61130 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
61131 + sys_unlink((char __force_user *)name);
61132 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
61133 }
61134
61135 #if BITS_PER_LONG == 32
61136 static inline u32 bstat(char *name)
61137 {
61138 struct stat64 stat;
61139 - if (sys_stat64(name, &stat) != 0)
61140 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
61141 return 0;
61142 if (!S_ISBLK(stat.st_mode))
61143 return 0;
61144 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
61145 static inline u32 bstat(char *name)
61146 {
61147 struct stat stat;
61148 - if (sys_newstat(name, &stat) != 0)
61149 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
61150 return 0;
61151 if (!S_ISBLK(stat.st_mode))
61152 return 0;
61153 diff -urNp linux-3.0.7/init/do_mounts_initrd.c linux-3.0.7/init/do_mounts_initrd.c
61154 --- linux-3.0.7/init/do_mounts_initrd.c 2011-07-21 22:17:23.000000000 -0400
61155 +++ linux-3.0.7/init/do_mounts_initrd.c 2011-10-06 04:17:55.000000000 -0400
61156 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
61157 create_dev("/dev/root.old", Root_RAM0);
61158 /* mount initrd on rootfs' /root */
61159 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
61160 - sys_mkdir("/old", 0700);
61161 - root_fd = sys_open("/", 0, 0);
61162 - old_fd = sys_open("/old", 0, 0);
61163 + sys_mkdir((const char __force_user *)"/old", 0700);
61164 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
61165 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
61166 /* move initrd over / and chdir/chroot in initrd root */
61167 - sys_chdir("/root");
61168 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
61169 - sys_chroot(".");
61170 + sys_chdir((const char __force_user *)"/root");
61171 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
61172 + sys_chroot((const char __force_user *)".");
61173
61174 /*
61175 * In case that a resume from disk is carried out by linuxrc or one of
61176 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
61177
61178 /* move initrd to rootfs' /old */
61179 sys_fchdir(old_fd);
61180 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
61181 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
61182 /* switch root and cwd back to / of rootfs */
61183 sys_fchdir(root_fd);
61184 - sys_chroot(".");
61185 + sys_chroot((const char __force_user *)".");
61186 sys_close(old_fd);
61187 sys_close(root_fd);
61188
61189 if (new_decode_dev(real_root_dev) == Root_RAM0) {
61190 - sys_chdir("/old");
61191 + sys_chdir((const char __force_user *)"/old");
61192 return;
61193 }
61194
61195 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
61196 mount_root();
61197
61198 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
61199 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
61200 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
61201 if (!error)
61202 printk("okay\n");
61203 else {
61204 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
61205 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
61206 if (error == -ENOENT)
61207 printk("/initrd does not exist. Ignored.\n");
61208 else
61209 printk("failed\n");
61210 printk(KERN_NOTICE "Unmounting old root\n");
61211 - sys_umount("/old", MNT_DETACH);
61212 + sys_umount((char __force_user *)"/old", MNT_DETACH);
61213 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
61214 if (fd < 0) {
61215 error = fd;
61216 @@ -116,11 +116,11 @@ int __init initrd_load(void)
61217 * mounted in the normal path.
61218 */
61219 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
61220 - sys_unlink("/initrd.image");
61221 + sys_unlink((const char __force_user *)"/initrd.image");
61222 handle_initrd();
61223 return 1;
61224 }
61225 }
61226 - sys_unlink("/initrd.image");
61227 + sys_unlink((const char __force_user *)"/initrd.image");
61228 return 0;
61229 }
61230 diff -urNp linux-3.0.7/init/do_mounts_md.c linux-3.0.7/init/do_mounts_md.c
61231 --- linux-3.0.7/init/do_mounts_md.c 2011-07-21 22:17:23.000000000 -0400
61232 +++ linux-3.0.7/init/do_mounts_md.c 2011-10-06 04:17:55.000000000 -0400
61233 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
61234 partitioned ? "_d" : "", minor,
61235 md_setup_args[ent].device_names);
61236
61237 - fd = sys_open(name, 0, 0);
61238 + fd = sys_open((char __force_user *)name, 0, 0);
61239 if (fd < 0) {
61240 printk(KERN_ERR "md: open failed - cannot start "
61241 "array %s\n", name);
61242 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
61243 * array without it
61244 */
61245 sys_close(fd);
61246 - fd = sys_open(name, 0, 0);
61247 + fd = sys_open((char __force_user *)name, 0, 0);
61248 sys_ioctl(fd, BLKRRPART, 0);
61249 }
61250 sys_close(fd);
61251 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
61252
61253 wait_for_device_probe();
61254
61255 - fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
61256 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
61257 if (fd >= 0) {
61258 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
61259 sys_close(fd);
61260 diff -urNp linux-3.0.7/init/initramfs.c linux-3.0.7/init/initramfs.c
61261 --- linux-3.0.7/init/initramfs.c 2011-07-21 22:17:23.000000000 -0400
61262 +++ linux-3.0.7/init/initramfs.c 2011-10-06 04:17:55.000000000 -0400
61263 @@ -74,7 +74,7 @@ static void __init free_hash(void)
61264 }
61265 }
61266
61267 -static long __init do_utime(char __user *filename, time_t mtime)
61268 +static long __init do_utime(__force char __user *filename, time_t mtime)
61269 {
61270 struct timespec t[2];
61271
61272 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
61273 struct dir_entry *de, *tmp;
61274 list_for_each_entry_safe(de, tmp, &dir_list, list) {
61275 list_del(&de->list);
61276 - do_utime(de->name, de->mtime);
61277 + do_utime((char __force_user *)de->name, de->mtime);
61278 kfree(de->name);
61279 kfree(de);
61280 }
61281 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
61282 if (nlink >= 2) {
61283 char *old = find_link(major, minor, ino, mode, collected);
61284 if (old)
61285 - return (sys_link(old, collected) < 0) ? -1 : 1;
61286 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
61287 }
61288 return 0;
61289 }
61290 @@ -280,11 +280,11 @@ static void __init clean_path(char *path
61291 {
61292 struct stat st;
61293
61294 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
61295 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
61296 if (S_ISDIR(st.st_mode))
61297 - sys_rmdir(path);
61298 + sys_rmdir((char __force_user *)path);
61299 else
61300 - sys_unlink(path);
61301 + sys_unlink((char __force_user *)path);
61302 }
61303 }
61304
61305 @@ -305,7 +305,7 @@ static int __init do_name(void)
61306 int openflags = O_WRONLY|O_CREAT;
61307 if (ml != 1)
61308 openflags |= O_TRUNC;
61309 - wfd = sys_open(collected, openflags, mode);
61310 + wfd = sys_open((char __force_user *)collected, openflags, mode);
61311
61312 if (wfd >= 0) {
61313 sys_fchown(wfd, uid, gid);
61314 @@ -317,17 +317,17 @@ static int __init do_name(void)
61315 }
61316 }
61317 } else if (S_ISDIR(mode)) {
61318 - sys_mkdir(collected, mode);
61319 - sys_chown(collected, uid, gid);
61320 - sys_chmod(collected, mode);
61321 + sys_mkdir((char __force_user *)collected, mode);
61322 + sys_chown((char __force_user *)collected, uid, gid);
61323 + sys_chmod((char __force_user *)collected, mode);
61324 dir_add(collected, mtime);
61325 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
61326 S_ISFIFO(mode) || S_ISSOCK(mode)) {
61327 if (maybe_link() == 0) {
61328 - sys_mknod(collected, mode, rdev);
61329 - sys_chown(collected, uid, gid);
61330 - sys_chmod(collected, mode);
61331 - do_utime(collected, mtime);
61332 + sys_mknod((char __force_user *)collected, mode, rdev);
61333 + sys_chown((char __force_user *)collected, uid, gid);
61334 + sys_chmod((char __force_user *)collected, mode);
61335 + do_utime((char __force_user *)collected, mtime);
61336 }
61337 }
61338 return 0;
61339 @@ -336,15 +336,15 @@ static int __init do_name(void)
61340 static int __init do_copy(void)
61341 {
61342 if (count >= body_len) {
61343 - sys_write(wfd, victim, body_len);
61344 + sys_write(wfd, (char __force_user *)victim, body_len);
61345 sys_close(wfd);
61346 - do_utime(vcollected, mtime);
61347 + do_utime((char __force_user *)vcollected, mtime);
61348 kfree(vcollected);
61349 eat(body_len);
61350 state = SkipIt;
61351 return 0;
61352 } else {
61353 - sys_write(wfd, victim, count);
61354 + sys_write(wfd, (char __force_user *)victim, count);
61355 body_len -= count;
61356 eat(count);
61357 return 1;
61358 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
61359 {
61360 collected[N_ALIGN(name_len) + body_len] = '\0';
61361 clean_path(collected, 0);
61362 - sys_symlink(collected + N_ALIGN(name_len), collected);
61363 - sys_lchown(collected, uid, gid);
61364 - do_utime(collected, mtime);
61365 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
61366 + sys_lchown((char __force_user *)collected, uid, gid);
61367 + do_utime((char __force_user *)collected, mtime);
61368 state = SkipIt;
61369 next_state = Reset;
61370 return 0;
61371 diff -urNp linux-3.0.7/init/Kconfig linux-3.0.7/init/Kconfig
61372 --- linux-3.0.7/init/Kconfig 2011-07-21 22:17:23.000000000 -0400
61373 +++ linux-3.0.7/init/Kconfig 2011-08-23 21:47:56.000000000 -0400
61374 @@ -1195,7 +1195,7 @@ config SLUB_DEBUG
61375
61376 config COMPAT_BRK
61377 bool "Disable heap randomization"
61378 - default y
61379 + default n
61380 help
61381 Randomizing heap placement makes heap exploits harder, but it
61382 also breaks ancient binaries (including anything libc5 based).
61383 diff -urNp linux-3.0.7/init/main.c linux-3.0.7/init/main.c
61384 --- linux-3.0.7/init/main.c 2011-07-21 22:17:23.000000000 -0400
61385 +++ linux-3.0.7/init/main.c 2011-10-06 04:17:55.000000000 -0400
61386 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
61387 extern void tc_init(void);
61388 #endif
61389
61390 +extern void grsecurity_init(void);
61391 +
61392 /*
61393 * Debug helper: via this flag we know that we are in 'early bootup code'
61394 * where only the boot processor is running with IRQ disabled. This means
61395 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char
61396
61397 __setup("reset_devices", set_reset_devices);
61398
61399 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
61400 +extern char pax_enter_kernel_user[];
61401 +extern char pax_exit_kernel_user[];
61402 +extern pgdval_t clone_pgd_mask;
61403 +#endif
61404 +
61405 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
61406 +static int __init setup_pax_nouderef(char *str)
61407 +{
61408 +#ifdef CONFIG_X86_32
61409 + unsigned int cpu;
61410 + struct desc_struct *gdt;
61411 +
61412 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
61413 + gdt = get_cpu_gdt_table(cpu);
61414 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
61415 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
61416 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
61417 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
61418 + }
61419 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
61420 +#else
61421 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
61422 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
61423 + clone_pgd_mask = ~(pgdval_t)0UL;
61424 +#endif
61425 +
61426 + return 0;
61427 +}
61428 +early_param("pax_nouderef", setup_pax_nouderef);
61429 +#endif
61430 +
61431 +#ifdef CONFIG_PAX_SOFTMODE
61432 +int pax_softmode;
61433 +
61434 +static int __init setup_pax_softmode(char *str)
61435 +{
61436 + get_option(&str, &pax_softmode);
61437 + return 1;
61438 +}
61439 +__setup("pax_softmode=", setup_pax_softmode);
61440 +#endif
61441 +
61442 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
61443 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
61444 static const char *panic_later, *panic_param;
61445 @@ -667,6 +712,7 @@ int __init_or_module do_one_initcall(ini
61446 {
61447 int count = preempt_count();
61448 int ret;
61449 + const char *msg1 = "", *msg2 = "";
61450
61451 if (initcall_debug)
61452 ret = do_one_initcall_debug(fn);
61453 @@ -679,15 +725,15 @@ int __init_or_module do_one_initcall(ini
61454 sprintf(msgbuf, "error code %d ", ret);
61455
61456 if (preempt_count() != count) {
61457 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
61458 + msg1 = " preemption imbalance";
61459 preempt_count() = count;
61460 }
61461 if (irqs_disabled()) {
61462 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
61463 + msg2 = " disabled interrupts";
61464 local_irq_enable();
61465 }
61466 - if (msgbuf[0]) {
61467 - printk("initcall %pF returned with %s\n", fn, msgbuf);
61468 + if (msgbuf[0] || *msg1 || *msg2) {
61469 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
61470 }
61471
61472 return ret;
61473 @@ -805,7 +851,7 @@ static int __init kernel_init(void * unu
61474 do_basic_setup();
61475
61476 /* Open the /dev/console on the rootfs, this should never fail */
61477 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
61478 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
61479 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
61480
61481 (void) sys_dup(0);
61482 @@ -818,11 +864,13 @@ static int __init kernel_init(void * unu
61483 if (!ramdisk_execute_command)
61484 ramdisk_execute_command = "/init";
61485
61486 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
61487 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
61488 ramdisk_execute_command = NULL;
61489 prepare_namespace();
61490 }
61491
61492 + grsecurity_init();
61493 +
61494 /*
61495 * Ok, we have completed the initial bootup, and
61496 * we're essentially up and running. Get rid of the
61497 diff -urNp linux-3.0.7/ipc/mqueue.c linux-3.0.7/ipc/mqueue.c
61498 --- linux-3.0.7/ipc/mqueue.c 2011-10-16 21:54:54.000000000 -0400
61499 +++ linux-3.0.7/ipc/mqueue.c 2011-10-16 21:59:31.000000000 -0400
61500 @@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(st
61501 mq_bytes = (mq_msg_tblsz +
61502 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
61503
61504 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
61505 spin_lock(&mq_lock);
61506 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
61507 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
61508 diff -urNp linux-3.0.7/ipc/msg.c linux-3.0.7/ipc/msg.c
61509 --- linux-3.0.7/ipc/msg.c 2011-07-21 22:17:23.000000000 -0400
61510 +++ linux-3.0.7/ipc/msg.c 2011-08-23 21:47:56.000000000 -0400
61511 @@ -309,18 +309,19 @@ static inline int msg_security(struct ke
61512 return security_msg_queue_associate(msq, msgflg);
61513 }
61514
61515 +static struct ipc_ops msg_ops = {
61516 + .getnew = newque,
61517 + .associate = msg_security,
61518 + .more_checks = NULL
61519 +};
61520 +
61521 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
61522 {
61523 struct ipc_namespace *ns;
61524 - struct ipc_ops msg_ops;
61525 struct ipc_params msg_params;
61526
61527 ns = current->nsproxy->ipc_ns;
61528
61529 - msg_ops.getnew = newque;
61530 - msg_ops.associate = msg_security;
61531 - msg_ops.more_checks = NULL;
61532 -
61533 msg_params.key = key;
61534 msg_params.flg = msgflg;
61535
61536 diff -urNp linux-3.0.7/ipc/sem.c linux-3.0.7/ipc/sem.c
61537 --- linux-3.0.7/ipc/sem.c 2011-09-02 18:11:21.000000000 -0400
61538 +++ linux-3.0.7/ipc/sem.c 2011-08-23 21:48:14.000000000 -0400
61539 @@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
61540 return 0;
61541 }
61542
61543 +static struct ipc_ops sem_ops = {
61544 + .getnew = newary,
61545 + .associate = sem_security,
61546 + .more_checks = sem_more_checks
61547 +};
61548 +
61549 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
61550 {
61551 struct ipc_namespace *ns;
61552 - struct ipc_ops sem_ops;
61553 struct ipc_params sem_params;
61554
61555 ns = current->nsproxy->ipc_ns;
61556 @@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
61557 if (nsems < 0 || nsems > ns->sc_semmsl)
61558 return -EINVAL;
61559
61560 - sem_ops.getnew = newary;
61561 - sem_ops.associate = sem_security;
61562 - sem_ops.more_checks = sem_more_checks;
61563 -
61564 sem_params.key = key;
61565 sem_params.flg = semflg;
61566 sem_params.u.nsems = nsems;
61567 @@ -854,6 +855,8 @@ static int semctl_main(struct ipc_namesp
61568 int nsems;
61569 struct list_head tasks;
61570
61571 + pax_track_stack();
61572 +
61573 sma = sem_lock_check(ns, semid);
61574 if (IS_ERR(sma))
61575 return PTR_ERR(sma);
61576 @@ -1301,6 +1304,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
61577 struct ipc_namespace *ns;
61578 struct list_head tasks;
61579
61580 + pax_track_stack();
61581 +
61582 ns = current->nsproxy->ipc_ns;
61583
61584 if (nsops < 1 || semid < 0)
61585 diff -urNp linux-3.0.7/ipc/shm.c linux-3.0.7/ipc/shm.c
61586 --- linux-3.0.7/ipc/shm.c 2011-07-21 22:17:23.000000000 -0400
61587 +++ linux-3.0.7/ipc/shm.c 2011-08-23 21:48:14.000000000 -0400
61588 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
61589 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
61590 #endif
61591
61592 +#ifdef CONFIG_GRKERNSEC
61593 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
61594 + const time_t shm_createtime, const uid_t cuid,
61595 + const int shmid);
61596 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
61597 + const time_t shm_createtime);
61598 +#endif
61599 +
61600 void shm_init_ns(struct ipc_namespace *ns)
61601 {
61602 ns->shm_ctlmax = SHMMAX;
61603 @@ -401,6 +409,14 @@ static int newseg(struct ipc_namespace *
61604 shp->shm_lprid = 0;
61605 shp->shm_atim = shp->shm_dtim = 0;
61606 shp->shm_ctim = get_seconds();
61607 +#ifdef CONFIG_GRKERNSEC
61608 + {
61609 + struct timespec timeval;
61610 + do_posix_clock_monotonic_gettime(&timeval);
61611 +
61612 + shp->shm_createtime = timeval.tv_sec;
61613 + }
61614 +#endif
61615 shp->shm_segsz = size;
61616 shp->shm_nattch = 0;
61617 shp->shm_file = file;
61618 @@ -451,18 +467,19 @@ static inline int shm_more_checks(struct
61619 return 0;
61620 }
61621
61622 +static struct ipc_ops shm_ops = {
61623 + .getnew = newseg,
61624 + .associate = shm_security,
61625 + .more_checks = shm_more_checks
61626 +};
61627 +
61628 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
61629 {
61630 struct ipc_namespace *ns;
61631 - struct ipc_ops shm_ops;
61632 struct ipc_params shm_params;
61633
61634 ns = current->nsproxy->ipc_ns;
61635
61636 - shm_ops.getnew = newseg;
61637 - shm_ops.associate = shm_security;
61638 - shm_ops.more_checks = shm_more_checks;
61639 -
61640 shm_params.key = key;
61641 shm_params.flg = shmflg;
61642 shm_params.u.size = size;
61643 @@ -762,8 +779,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
61644 case SHM_LOCK:
61645 case SHM_UNLOCK:
61646 {
61647 - struct file *uninitialized_var(shm_file);
61648 -
61649 lru_add_drain_all(); /* drain pagevecs to lru lists */
61650
61651 shp = shm_lock_check(ns, shmid);
61652 @@ -896,9 +911,21 @@ long do_shmat(int shmid, char __user *sh
61653 if (err)
61654 goto out_unlock;
61655
61656 +#ifdef CONFIG_GRKERNSEC
61657 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
61658 + shp->shm_perm.cuid, shmid) ||
61659 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
61660 + err = -EACCES;
61661 + goto out_unlock;
61662 + }
61663 +#endif
61664 +
61665 path = shp->shm_file->f_path;
61666 path_get(&path);
61667 shp->shm_nattch++;
61668 +#ifdef CONFIG_GRKERNSEC
61669 + shp->shm_lapid = current->pid;
61670 +#endif
61671 size = i_size_read(path.dentry->d_inode);
61672 shm_unlock(shp);
61673
61674 diff -urNp linux-3.0.7/kernel/acct.c linux-3.0.7/kernel/acct.c
61675 --- linux-3.0.7/kernel/acct.c 2011-07-21 22:17:23.000000000 -0400
61676 +++ linux-3.0.7/kernel/acct.c 2011-10-06 04:17:55.000000000 -0400
61677 @@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
61678 */
61679 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
61680 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
61681 - file->f_op->write(file, (char *)&ac,
61682 + file->f_op->write(file, (char __force_user *)&ac,
61683 sizeof(acct_t), &file->f_pos);
61684 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
61685 set_fs(fs);
61686 diff -urNp linux-3.0.7/kernel/audit.c linux-3.0.7/kernel/audit.c
61687 --- linux-3.0.7/kernel/audit.c 2011-07-21 22:17:23.000000000 -0400
61688 +++ linux-3.0.7/kernel/audit.c 2011-08-23 21:47:56.000000000 -0400
61689 @@ -112,7 +112,7 @@ u32 audit_sig_sid = 0;
61690 3) suppressed due to audit_rate_limit
61691 4) suppressed due to audit_backlog_limit
61692 */
61693 -static atomic_t audit_lost = ATOMIC_INIT(0);
61694 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
61695
61696 /* The netlink socket. */
61697 static struct sock *audit_sock;
61698 @@ -234,7 +234,7 @@ void audit_log_lost(const char *message)
61699 unsigned long now;
61700 int print;
61701
61702 - atomic_inc(&audit_lost);
61703 + atomic_inc_unchecked(&audit_lost);
61704
61705 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
61706
61707 @@ -253,7 +253,7 @@ void audit_log_lost(const char *message)
61708 printk(KERN_WARNING
61709 "audit: audit_lost=%d audit_rate_limit=%d "
61710 "audit_backlog_limit=%d\n",
61711 - atomic_read(&audit_lost),
61712 + atomic_read_unchecked(&audit_lost),
61713 audit_rate_limit,
61714 audit_backlog_limit);
61715 audit_panic(message);
61716 @@ -686,7 +686,7 @@ static int audit_receive_msg(struct sk_b
61717 status_set.pid = audit_pid;
61718 status_set.rate_limit = audit_rate_limit;
61719 status_set.backlog_limit = audit_backlog_limit;
61720 - status_set.lost = atomic_read(&audit_lost);
61721 + status_set.lost = atomic_read_unchecked(&audit_lost);
61722 status_set.backlog = skb_queue_len(&audit_skb_queue);
61723 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
61724 &status_set, sizeof(status_set));
61725 diff -urNp linux-3.0.7/kernel/auditsc.c linux-3.0.7/kernel/auditsc.c
61726 --- linux-3.0.7/kernel/auditsc.c 2011-07-21 22:17:23.000000000 -0400
61727 +++ linux-3.0.7/kernel/auditsc.c 2011-08-23 21:47:56.000000000 -0400
61728 @@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_conte
61729 }
61730
61731 /* global counter which is incremented every time something logs in */
61732 -static atomic_t session_id = ATOMIC_INIT(0);
61733 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
61734
61735 /**
61736 * audit_set_loginuid - set a task's audit_context loginuid
61737 @@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT
61738 */
61739 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
61740 {
61741 - unsigned int sessionid = atomic_inc_return(&session_id);
61742 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
61743 struct audit_context *context = task->audit_context;
61744
61745 if (context && context->in_syscall) {
61746 diff -urNp linux-3.0.7/kernel/capability.c linux-3.0.7/kernel/capability.c
61747 --- linux-3.0.7/kernel/capability.c 2011-07-21 22:17:23.000000000 -0400
61748 +++ linux-3.0.7/kernel/capability.c 2011-08-23 21:48:14.000000000 -0400
61749 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
61750 * before modification is attempted and the application
61751 * fails.
61752 */
61753 + if (tocopy > ARRAY_SIZE(kdata))
61754 + return -EFAULT;
61755 +
61756 if (copy_to_user(dataptr, kdata, tocopy
61757 * sizeof(struct __user_cap_data_struct))) {
61758 return -EFAULT;
61759 @@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *n
61760 BUG();
61761 }
61762
61763 - if (security_capable(ns, current_cred(), cap) == 0) {
61764 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
61765 current->flags |= PF_SUPERPRIV;
61766 return true;
61767 }
61768 @@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *n
61769 }
61770 EXPORT_SYMBOL(ns_capable);
61771
61772 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
61773 +{
61774 + if (unlikely(!cap_valid(cap))) {
61775 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
61776 + BUG();
61777 + }
61778 +
61779 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
61780 + current->flags |= PF_SUPERPRIV;
61781 + return true;
61782 + }
61783 + return false;
61784 +}
61785 +EXPORT_SYMBOL(ns_capable_nolog);
61786 +
61787 +bool capable_nolog(int cap)
61788 +{
61789 + return ns_capable_nolog(&init_user_ns, cap);
61790 +}
61791 +EXPORT_SYMBOL(capable_nolog);
61792 +
61793 /**
61794 * task_ns_capable - Determine whether current task has a superior
61795 * capability targeted at a specific task's user namespace.
61796 @@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct
61797 }
61798 EXPORT_SYMBOL(task_ns_capable);
61799
61800 +bool task_ns_capable_nolog(struct task_struct *t, int cap)
61801 +{
61802 + return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
61803 +}
61804 +EXPORT_SYMBOL(task_ns_capable_nolog);
61805 +
61806 /**
61807 * nsown_capable - Check superior capability to one's own user_ns
61808 * @cap: The capability in question
61809 diff -urNp linux-3.0.7/kernel/cgroup.c linux-3.0.7/kernel/cgroup.c
61810 --- linux-3.0.7/kernel/cgroup.c 2011-07-21 22:17:23.000000000 -0400
61811 +++ linux-3.0.7/kernel/cgroup.c 2011-08-23 21:48:14.000000000 -0400
61812 @@ -593,6 +593,8 @@ static struct css_set *find_css_set(
61813 struct hlist_head *hhead;
61814 struct cg_cgroup_link *link;
61815
61816 + pax_track_stack();
61817 +
61818 /* First see if we already have a cgroup group that matches
61819 * the desired set */
61820 read_lock(&css_set_lock);
61821 diff -urNp linux-3.0.7/kernel/compat.c linux-3.0.7/kernel/compat.c
61822 --- linux-3.0.7/kernel/compat.c 2011-07-21 22:17:23.000000000 -0400
61823 +++ linux-3.0.7/kernel/compat.c 2011-10-06 04:17:55.000000000 -0400
61824 @@ -13,6 +13,7 @@
61825
61826 #include <linux/linkage.h>
61827 #include <linux/compat.h>
61828 +#include <linux/module.h>
61829 #include <linux/errno.h>
61830 #include <linux/time.h>
61831 #include <linux/signal.h>
61832 @@ -166,7 +167,7 @@ static long compat_nanosleep_restart(str
61833 mm_segment_t oldfs;
61834 long ret;
61835
61836 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
61837 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
61838 oldfs = get_fs();
61839 set_fs(KERNEL_DS);
61840 ret = hrtimer_nanosleep_restart(restart);
61841 @@ -198,7 +199,7 @@ asmlinkage long compat_sys_nanosleep(str
61842 oldfs = get_fs();
61843 set_fs(KERNEL_DS);
61844 ret = hrtimer_nanosleep(&tu,
61845 - rmtp ? (struct timespec __user *)&rmt : NULL,
61846 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
61847 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
61848 set_fs(oldfs);
61849
61850 @@ -307,7 +308,7 @@ asmlinkage long compat_sys_sigpending(co
61851 mm_segment_t old_fs = get_fs();
61852
61853 set_fs(KERNEL_DS);
61854 - ret = sys_sigpending((old_sigset_t __user *) &s);
61855 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
61856 set_fs(old_fs);
61857 if (ret == 0)
61858 ret = put_user(s, set);
61859 @@ -330,8 +331,8 @@ asmlinkage long compat_sys_sigprocmask(i
61860 old_fs = get_fs();
61861 set_fs(KERNEL_DS);
61862 ret = sys_sigprocmask(how,
61863 - set ? (old_sigset_t __user *) &s : NULL,
61864 - oset ? (old_sigset_t __user *) &s : NULL);
61865 + set ? (old_sigset_t __force_user *) &s : NULL,
61866 + oset ? (old_sigset_t __force_user *) &s : NULL);
61867 set_fs(old_fs);
61868 if (ret == 0)
61869 if (oset)
61870 @@ -368,7 +369,7 @@ asmlinkage long compat_sys_old_getrlimit
61871 mm_segment_t old_fs = get_fs();
61872
61873 set_fs(KERNEL_DS);
61874 - ret = sys_old_getrlimit(resource, &r);
61875 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
61876 set_fs(old_fs);
61877
61878 if (!ret) {
61879 @@ -440,7 +441,7 @@ asmlinkage long compat_sys_getrusage(int
61880 mm_segment_t old_fs = get_fs();
61881
61882 set_fs(KERNEL_DS);
61883 - ret = sys_getrusage(who, (struct rusage __user *) &r);
61884 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
61885 set_fs(old_fs);
61886
61887 if (ret)
61888 @@ -467,8 +468,8 @@ compat_sys_wait4(compat_pid_t pid, compa
61889 set_fs (KERNEL_DS);
61890 ret = sys_wait4(pid,
61891 (stat_addr ?
61892 - (unsigned int __user *) &status : NULL),
61893 - options, (struct rusage __user *) &r);
61894 + (unsigned int __force_user *) &status : NULL),
61895 + options, (struct rusage __force_user *) &r);
61896 set_fs (old_fs);
61897
61898 if (ret > 0) {
61899 @@ -493,8 +494,8 @@ asmlinkage long compat_sys_waitid(int wh
61900 memset(&info, 0, sizeof(info));
61901
61902 set_fs(KERNEL_DS);
61903 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
61904 - uru ? (struct rusage __user *)&ru : NULL);
61905 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
61906 + uru ? (struct rusage __force_user *)&ru : NULL);
61907 set_fs(old_fs);
61908
61909 if ((ret < 0) || (info.si_signo == 0))
61910 @@ -624,8 +625,8 @@ long compat_sys_timer_settime(timer_t ti
61911 oldfs = get_fs();
61912 set_fs(KERNEL_DS);
61913 err = sys_timer_settime(timer_id, flags,
61914 - (struct itimerspec __user *) &newts,
61915 - (struct itimerspec __user *) &oldts);
61916 + (struct itimerspec __force_user *) &newts,
61917 + (struct itimerspec __force_user *) &oldts);
61918 set_fs(oldfs);
61919 if (!err && old && put_compat_itimerspec(old, &oldts))
61920 return -EFAULT;
61921 @@ -642,7 +643,7 @@ long compat_sys_timer_gettime(timer_t ti
61922 oldfs = get_fs();
61923 set_fs(KERNEL_DS);
61924 err = sys_timer_gettime(timer_id,
61925 - (struct itimerspec __user *) &ts);
61926 + (struct itimerspec __force_user *) &ts);
61927 set_fs(oldfs);
61928 if (!err && put_compat_itimerspec(setting, &ts))
61929 return -EFAULT;
61930 @@ -661,7 +662,7 @@ long compat_sys_clock_settime(clockid_t
61931 oldfs = get_fs();
61932 set_fs(KERNEL_DS);
61933 err = sys_clock_settime(which_clock,
61934 - (struct timespec __user *) &ts);
61935 + (struct timespec __force_user *) &ts);
61936 set_fs(oldfs);
61937 return err;
61938 }
61939 @@ -676,7 +677,7 @@ long compat_sys_clock_gettime(clockid_t
61940 oldfs = get_fs();
61941 set_fs(KERNEL_DS);
61942 err = sys_clock_gettime(which_clock,
61943 - (struct timespec __user *) &ts);
61944 + (struct timespec __force_user *) &ts);
61945 set_fs(oldfs);
61946 if (!err && put_compat_timespec(&ts, tp))
61947 return -EFAULT;
61948 @@ -696,7 +697,7 @@ long compat_sys_clock_adjtime(clockid_t
61949
61950 oldfs = get_fs();
61951 set_fs(KERNEL_DS);
61952 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
61953 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
61954 set_fs(oldfs);
61955
61956 err = compat_put_timex(utp, &txc);
61957 @@ -716,7 +717,7 @@ long compat_sys_clock_getres(clockid_t w
61958 oldfs = get_fs();
61959 set_fs(KERNEL_DS);
61960 err = sys_clock_getres(which_clock,
61961 - (struct timespec __user *) &ts);
61962 + (struct timespec __force_user *) &ts);
61963 set_fs(oldfs);
61964 if (!err && tp && put_compat_timespec(&ts, tp))
61965 return -EFAULT;
61966 @@ -728,9 +729,9 @@ static long compat_clock_nanosleep_resta
61967 long err;
61968 mm_segment_t oldfs;
61969 struct timespec tu;
61970 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
61971 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
61972
61973 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
61974 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
61975 oldfs = get_fs();
61976 set_fs(KERNEL_DS);
61977 err = clock_nanosleep_restart(restart);
61978 @@ -762,8 +763,8 @@ long compat_sys_clock_nanosleep(clockid_
61979 oldfs = get_fs();
61980 set_fs(KERNEL_DS);
61981 err = sys_clock_nanosleep(which_clock, flags,
61982 - (struct timespec __user *) &in,
61983 - (struct timespec __user *) &out);
61984 + (struct timespec __force_user *) &in,
61985 + (struct timespec __force_user *) &out);
61986 set_fs(oldfs);
61987
61988 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
61989 diff -urNp linux-3.0.7/kernel/configs.c linux-3.0.7/kernel/configs.c
61990 --- linux-3.0.7/kernel/configs.c 2011-07-21 22:17:23.000000000 -0400
61991 +++ linux-3.0.7/kernel/configs.c 2011-08-23 21:48:14.000000000 -0400
61992 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
61993 struct proc_dir_entry *entry;
61994
61995 /* create the current config file */
61996 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
61997 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
61998 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
61999 + &ikconfig_file_ops);
62000 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62001 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
62002 + &ikconfig_file_ops);
62003 +#endif
62004 +#else
62005 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
62006 &ikconfig_file_ops);
62007 +#endif
62008 +
62009 if (!entry)
62010 return -ENOMEM;
62011
62012 diff -urNp linux-3.0.7/kernel/cred.c linux-3.0.7/kernel/cred.c
62013 --- linux-3.0.7/kernel/cred.c 2011-07-21 22:17:23.000000000 -0400
62014 +++ linux-3.0.7/kernel/cred.c 2011-08-25 17:23:03.000000000 -0400
62015 @@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
62016 */
62017 void __put_cred(struct cred *cred)
62018 {
62019 + pax_track_stack();
62020 +
62021 kdebug("__put_cred(%p{%d,%d})", cred,
62022 atomic_read(&cred->usage),
62023 read_cred_subscribers(cred));
62024 @@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
62025 {
62026 struct cred *cred;
62027
62028 + pax_track_stack();
62029 +
62030 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
62031 atomic_read(&tsk->cred->usage),
62032 read_cred_subscribers(tsk->cred));
62033 @@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
62034 {
62035 const struct cred *cred;
62036
62037 + pax_track_stack();
62038 +
62039 rcu_read_lock();
62040
62041 do {
62042 @@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
62043 {
62044 struct cred *new;
62045
62046 + pax_track_stack();
62047 +
62048 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
62049 if (!new)
62050 return NULL;
62051 @@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
62052 const struct cred *old;
62053 struct cred *new;
62054
62055 + pax_track_stack();
62056 +
62057 validate_process_creds();
62058
62059 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62060 @@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
62061 struct thread_group_cred *tgcred = NULL;
62062 struct cred *new;
62063
62064 + pax_track_stack();
62065 +
62066 #ifdef CONFIG_KEYS
62067 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
62068 if (!tgcred)
62069 @@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
62070 struct cred *new;
62071 int ret;
62072
62073 + pax_track_stack();
62074 +
62075 if (
62076 #ifdef CONFIG_KEYS
62077 !p->cred->thread_keyring &&
62078 @@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
62079 struct task_struct *task = current;
62080 const struct cred *old = task->real_cred;
62081
62082 + pax_track_stack();
62083 +
62084 kdebug("commit_creds(%p{%d,%d})", new,
62085 atomic_read(&new->usage),
62086 read_cred_subscribers(new));
62087 @@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
62088
62089 get_cred(new); /* we will require a ref for the subj creds too */
62090
62091 + gr_set_role_label(task, new->uid, new->gid);
62092 +
62093 /* dumpability changes */
62094 if (old->euid != new->euid ||
62095 old->egid != new->egid ||
62096 @@ -508,10 +526,8 @@ int commit_creds(struct cred *new)
62097 key_fsgid_changed(task);
62098
62099 /* do it
62100 - * - What if a process setreuid()'s and this brings the
62101 - * new uid over his NPROC rlimit? We can check this now
62102 - * cheaply with the new uid cache, so if it matters
62103 - * we should be checking for it. -DaveM
62104 + * RLIMIT_NPROC limits on user->processes have already been checked
62105 + * in set_user().
62106 */
62107 alter_cred_subscribers(new, 2);
62108 if (new->user != old->user)
62109 @@ -551,6 +567,8 @@ EXPORT_SYMBOL(commit_creds);
62110 */
62111 void abort_creds(struct cred *new)
62112 {
62113 + pax_track_stack();
62114 +
62115 kdebug("abort_creds(%p{%d,%d})", new,
62116 atomic_read(&new->usage),
62117 read_cred_subscribers(new));
62118 @@ -574,6 +592,8 @@ const struct cred *override_creds(const
62119 {
62120 const struct cred *old = current->cred;
62121
62122 + pax_track_stack();
62123 +
62124 kdebug("override_creds(%p{%d,%d})", new,
62125 atomic_read(&new->usage),
62126 read_cred_subscribers(new));
62127 @@ -603,6 +623,8 @@ void revert_creds(const struct cred *old
62128 {
62129 const struct cred *override = current->cred;
62130
62131 + pax_track_stack();
62132 +
62133 kdebug("revert_creds(%p{%d,%d})", old,
62134 atomic_read(&old->usage),
62135 read_cred_subscribers(old));
62136 @@ -649,6 +671,8 @@ struct cred *prepare_kernel_cred(struct
62137 const struct cred *old;
62138 struct cred *new;
62139
62140 + pax_track_stack();
62141 +
62142 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62143 if (!new)
62144 return NULL;
62145 @@ -703,6 +727,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
62146 */
62147 int set_security_override(struct cred *new, u32 secid)
62148 {
62149 + pax_track_stack();
62150 +
62151 return security_kernel_act_as(new, secid);
62152 }
62153 EXPORT_SYMBOL(set_security_override);
62154 @@ -722,6 +748,8 @@ int set_security_override_from_ctx(struc
62155 u32 secid;
62156 int ret;
62157
62158 + pax_track_stack();
62159 +
62160 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
62161 if (ret < 0)
62162 return ret;
62163 diff -urNp linux-3.0.7/kernel/debug/debug_core.c linux-3.0.7/kernel/debug/debug_core.c
62164 --- linux-3.0.7/kernel/debug/debug_core.c 2011-07-21 22:17:23.000000000 -0400
62165 +++ linux-3.0.7/kernel/debug/debug_core.c 2011-08-23 21:47:56.000000000 -0400
62166 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
62167 */
62168 static atomic_t masters_in_kgdb;
62169 static atomic_t slaves_in_kgdb;
62170 -static atomic_t kgdb_break_tasklet_var;
62171 +static atomic_unchecked_t kgdb_break_tasklet_var;
62172 atomic_t kgdb_setting_breakpoint;
62173
62174 struct task_struct *kgdb_usethread;
62175 @@ -129,7 +129,7 @@ int kgdb_single_step;
62176 static pid_t kgdb_sstep_pid;
62177
62178 /* to keep track of the CPU which is doing the single stepping*/
62179 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
62180 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
62181
62182 /*
62183 * If you are debugging a problem where roundup (the collection of
62184 @@ -542,7 +542,7 @@ return_normal:
62185 * kernel will only try for the value of sstep_tries before
62186 * giving up and continuing on.
62187 */
62188 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
62189 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
62190 (kgdb_info[cpu].task &&
62191 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
62192 atomic_set(&kgdb_active, -1);
62193 @@ -636,8 +636,8 @@ cpu_master_loop:
62194 }
62195
62196 kgdb_restore:
62197 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
62198 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
62199 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
62200 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
62201 if (kgdb_info[sstep_cpu].task)
62202 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
62203 else
62204 @@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
62205 static void kgdb_tasklet_bpt(unsigned long ing)
62206 {
62207 kgdb_breakpoint();
62208 - atomic_set(&kgdb_break_tasklet_var, 0);
62209 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
62210 }
62211
62212 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
62213
62214 void kgdb_schedule_breakpoint(void)
62215 {
62216 - if (atomic_read(&kgdb_break_tasklet_var) ||
62217 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
62218 atomic_read(&kgdb_active) != -1 ||
62219 atomic_read(&kgdb_setting_breakpoint))
62220 return;
62221 - atomic_inc(&kgdb_break_tasklet_var);
62222 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
62223 tasklet_schedule(&kgdb_tasklet_breakpoint);
62224 }
62225 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
62226 diff -urNp linux-3.0.7/kernel/debug/kdb/kdb_main.c linux-3.0.7/kernel/debug/kdb/kdb_main.c
62227 --- linux-3.0.7/kernel/debug/kdb/kdb_main.c 2011-07-21 22:17:23.000000000 -0400
62228 +++ linux-3.0.7/kernel/debug/kdb/kdb_main.c 2011-08-23 21:47:56.000000000 -0400
62229 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
62230 list_for_each_entry(mod, kdb_modules, list) {
62231
62232 kdb_printf("%-20s%8u 0x%p ", mod->name,
62233 - mod->core_size, (void *)mod);
62234 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
62235 #ifdef CONFIG_MODULE_UNLOAD
62236 kdb_printf("%4d ", module_refcount(mod));
62237 #endif
62238 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
62239 kdb_printf(" (Loading)");
62240 else
62241 kdb_printf(" (Live)");
62242 - kdb_printf(" 0x%p", mod->module_core);
62243 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
62244
62245 #ifdef CONFIG_MODULE_UNLOAD
62246 {
62247 diff -urNp linux-3.0.7/kernel/events/core.c linux-3.0.7/kernel/events/core.c
62248 --- linux-3.0.7/kernel/events/core.c 2011-09-02 18:11:21.000000000 -0400
62249 +++ linux-3.0.7/kernel/events/core.c 2011-09-14 09:08:05.000000000 -0400
62250 @@ -170,7 +170,7 @@ int perf_proc_update_handler(struct ctl_
62251 return 0;
62252 }
62253
62254 -static atomic64_t perf_event_id;
62255 +static atomic64_unchecked_t perf_event_id;
62256
62257 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
62258 enum event_type_t event_type);
62259 @@ -2488,7 +2488,7 @@ static void __perf_event_read(void *info
62260
62261 static inline u64 perf_event_count(struct perf_event *event)
62262 {
62263 - return local64_read(&event->count) + atomic64_read(&event->child_count);
62264 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
62265 }
62266
62267 static u64 perf_event_read(struct perf_event *event)
62268 @@ -3023,9 +3023,9 @@ u64 perf_event_read_value(struct perf_ev
62269 mutex_lock(&event->child_mutex);
62270 total += perf_event_read(event);
62271 *enabled += event->total_time_enabled +
62272 - atomic64_read(&event->child_total_time_enabled);
62273 + atomic64_read_unchecked(&event->child_total_time_enabled);
62274 *running += event->total_time_running +
62275 - atomic64_read(&event->child_total_time_running);
62276 + atomic64_read_unchecked(&event->child_total_time_running);
62277
62278 list_for_each_entry(child, &event->child_list, child_list) {
62279 total += perf_event_read(child);
62280 @@ -3388,10 +3388,10 @@ void perf_event_update_userpage(struct p
62281 userpg->offset -= local64_read(&event->hw.prev_count);
62282
62283 userpg->time_enabled = event->total_time_enabled +
62284 - atomic64_read(&event->child_total_time_enabled);
62285 + atomic64_read_unchecked(&event->child_total_time_enabled);
62286
62287 userpg->time_running = event->total_time_running +
62288 - atomic64_read(&event->child_total_time_running);
62289 + atomic64_read_unchecked(&event->child_total_time_running);
62290
62291 barrier();
62292 ++userpg->lock;
62293 @@ -4188,11 +4188,11 @@ static void perf_output_read_one(struct
62294 values[n++] = perf_event_count(event);
62295 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
62296 values[n++] = enabled +
62297 - atomic64_read(&event->child_total_time_enabled);
62298 + atomic64_read_unchecked(&event->child_total_time_enabled);
62299 }
62300 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
62301 values[n++] = running +
62302 - atomic64_read(&event->child_total_time_running);
62303 + atomic64_read_unchecked(&event->child_total_time_running);
62304 }
62305 if (read_format & PERF_FORMAT_ID)
62306 values[n++] = primary_event_id(event);
62307 @@ -4833,12 +4833,12 @@ static void perf_event_mmap_event(struct
62308 * need to add enough zero bytes after the string to handle
62309 * the 64bit alignment we do later.
62310 */
62311 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
62312 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
62313 if (!buf) {
62314 name = strncpy(tmp, "//enomem", sizeof(tmp));
62315 goto got_name;
62316 }
62317 - name = d_path(&file->f_path, buf, PATH_MAX);
62318 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
62319 if (IS_ERR(name)) {
62320 name = strncpy(tmp, "//toolong", sizeof(tmp));
62321 goto got_name;
62322 @@ -6190,7 +6190,7 @@ perf_event_alloc(struct perf_event_attr
62323 event->parent = parent_event;
62324
62325 event->ns = get_pid_ns(current->nsproxy->pid_ns);
62326 - event->id = atomic64_inc_return(&perf_event_id);
62327 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
62328
62329 event->state = PERF_EVENT_STATE_INACTIVE;
62330
62331 @@ -6713,10 +6713,10 @@ static void sync_child_event(struct perf
62332 /*
62333 * Add back the child's count to the parent's count:
62334 */
62335 - atomic64_add(child_val, &parent_event->child_count);
62336 - atomic64_add(child_event->total_time_enabled,
62337 + atomic64_add_unchecked(child_val, &parent_event->child_count);
62338 + atomic64_add_unchecked(child_event->total_time_enabled,
62339 &parent_event->child_total_time_enabled);
62340 - atomic64_add(child_event->total_time_running,
62341 + atomic64_add_unchecked(child_event->total_time_running,
62342 &parent_event->child_total_time_running);
62343
62344 /*
62345 diff -urNp linux-3.0.7/kernel/exit.c linux-3.0.7/kernel/exit.c
62346 --- linux-3.0.7/kernel/exit.c 2011-07-21 22:17:23.000000000 -0400
62347 +++ linux-3.0.7/kernel/exit.c 2011-08-23 21:48:14.000000000 -0400
62348 @@ -57,6 +57,10 @@
62349 #include <asm/pgtable.h>
62350 #include <asm/mmu_context.h>
62351
62352 +#ifdef CONFIG_GRKERNSEC
62353 +extern rwlock_t grsec_exec_file_lock;
62354 +#endif
62355 +
62356 static void exit_mm(struct task_struct * tsk);
62357
62358 static void __unhash_process(struct task_struct *p, bool group_dead)
62359 @@ -169,6 +173,10 @@ void release_task(struct task_struct * p
62360 struct task_struct *leader;
62361 int zap_leader;
62362 repeat:
62363 +#ifdef CONFIG_NET
62364 + gr_del_task_from_ip_table(p);
62365 +#endif
62366 +
62367 tracehook_prepare_release_task(p);
62368 /* don't need to get the RCU readlock here - the process is dead and
62369 * can't be modifying its own credentials. But shut RCU-lockdep up */
62370 @@ -338,11 +346,22 @@ static void reparent_to_kthreadd(void)
62371 {
62372 write_lock_irq(&tasklist_lock);
62373
62374 +#ifdef CONFIG_GRKERNSEC
62375 + write_lock(&grsec_exec_file_lock);
62376 + if (current->exec_file) {
62377 + fput(current->exec_file);
62378 + current->exec_file = NULL;
62379 + }
62380 + write_unlock(&grsec_exec_file_lock);
62381 +#endif
62382 +
62383 ptrace_unlink(current);
62384 /* Reparent to init */
62385 current->real_parent = current->parent = kthreadd_task;
62386 list_move_tail(&current->sibling, &current->real_parent->children);
62387
62388 + gr_set_kernel_label(current);
62389 +
62390 /* Set the exit signal to SIGCHLD so we signal init on exit */
62391 current->exit_signal = SIGCHLD;
62392
62393 @@ -394,7 +413,7 @@ int allow_signal(int sig)
62394 * know it'll be handled, so that they don't get converted to
62395 * SIGKILL or just silently dropped.
62396 */
62397 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
62398 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
62399 recalc_sigpending();
62400 spin_unlock_irq(&current->sighand->siglock);
62401 return 0;
62402 @@ -430,6 +449,17 @@ void daemonize(const char *name, ...)
62403 vsnprintf(current->comm, sizeof(current->comm), name, args);
62404 va_end(args);
62405
62406 +#ifdef CONFIG_GRKERNSEC
62407 + write_lock(&grsec_exec_file_lock);
62408 + if (current->exec_file) {
62409 + fput(current->exec_file);
62410 + current->exec_file = NULL;
62411 + }
62412 + write_unlock(&grsec_exec_file_lock);
62413 +#endif
62414 +
62415 + gr_set_kernel_label(current);
62416 +
62417 /*
62418 * If we were started as result of loading a module, close all of the
62419 * user space pages. We don't need them, and if we didn't close them
62420 @@ -904,15 +934,8 @@ NORET_TYPE void do_exit(long code)
62421 struct task_struct *tsk = current;
62422 int group_dead;
62423
62424 - profile_task_exit(tsk);
62425 -
62426 - WARN_ON(atomic_read(&tsk->fs_excl));
62427 - WARN_ON(blk_needs_flush_plug(tsk));
62428 -
62429 if (unlikely(in_interrupt()))
62430 panic("Aiee, killing interrupt handler!");
62431 - if (unlikely(!tsk->pid))
62432 - panic("Attempted to kill the idle task!");
62433
62434 /*
62435 * If do_exit is called because this processes oopsed, it's possible
62436 @@ -923,6 +946,14 @@ NORET_TYPE void do_exit(long code)
62437 */
62438 set_fs(USER_DS);
62439
62440 + profile_task_exit(tsk);
62441 +
62442 + WARN_ON(atomic_read(&tsk->fs_excl));
62443 + WARN_ON(blk_needs_flush_plug(tsk));
62444 +
62445 + if (unlikely(!tsk->pid))
62446 + panic("Attempted to kill the idle task!");
62447 +
62448 tracehook_report_exit(&code);
62449
62450 validate_creds_for_do_exit(tsk);
62451 @@ -983,6 +1014,9 @@ NORET_TYPE void do_exit(long code)
62452 tsk->exit_code = code;
62453 taskstats_exit(tsk, group_dead);
62454
62455 + gr_acl_handle_psacct(tsk, code);
62456 + gr_acl_handle_exit();
62457 +
62458 exit_mm(tsk);
62459
62460 if (group_dead)
62461 diff -urNp linux-3.0.7/kernel/fork.c linux-3.0.7/kernel/fork.c
62462 --- linux-3.0.7/kernel/fork.c 2011-07-21 22:17:23.000000000 -0400
62463 +++ linux-3.0.7/kernel/fork.c 2011-08-25 17:23:36.000000000 -0400
62464 @@ -286,7 +286,7 @@ static struct task_struct *dup_task_stru
62465 *stackend = STACK_END_MAGIC; /* for overflow detection */
62466
62467 #ifdef CONFIG_CC_STACKPROTECTOR
62468 - tsk->stack_canary = get_random_int();
62469 + tsk->stack_canary = pax_get_random_long();
62470 #endif
62471
62472 /* One for us, one for whoever does the "release_task()" (usually parent) */
62473 @@ -308,13 +308,77 @@ out:
62474 }
62475
62476 #ifdef CONFIG_MMU
62477 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
62478 +{
62479 + struct vm_area_struct *tmp;
62480 + unsigned long charge;
62481 + struct mempolicy *pol;
62482 + struct file *file;
62483 +
62484 + charge = 0;
62485 + if (mpnt->vm_flags & VM_ACCOUNT) {
62486 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
62487 + if (security_vm_enough_memory(len))
62488 + goto fail_nomem;
62489 + charge = len;
62490 + }
62491 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62492 + if (!tmp)
62493 + goto fail_nomem;
62494 + *tmp = *mpnt;
62495 + tmp->vm_mm = mm;
62496 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
62497 + pol = mpol_dup(vma_policy(mpnt));
62498 + if (IS_ERR(pol))
62499 + goto fail_nomem_policy;
62500 + vma_set_policy(tmp, pol);
62501 + if (anon_vma_fork(tmp, mpnt))
62502 + goto fail_nomem_anon_vma_fork;
62503 + tmp->vm_flags &= ~VM_LOCKED;
62504 + tmp->vm_next = tmp->vm_prev = NULL;
62505 + tmp->vm_mirror = NULL;
62506 + file = tmp->vm_file;
62507 + if (file) {
62508 + struct inode *inode = file->f_path.dentry->d_inode;
62509 + struct address_space *mapping = file->f_mapping;
62510 +
62511 + get_file(file);
62512 + if (tmp->vm_flags & VM_DENYWRITE)
62513 + atomic_dec(&inode->i_writecount);
62514 + mutex_lock(&mapping->i_mmap_mutex);
62515 + if (tmp->vm_flags & VM_SHARED)
62516 + mapping->i_mmap_writable++;
62517 + flush_dcache_mmap_lock(mapping);
62518 + /* insert tmp into the share list, just after mpnt */
62519 + vma_prio_tree_add(tmp, mpnt);
62520 + flush_dcache_mmap_unlock(mapping);
62521 + mutex_unlock(&mapping->i_mmap_mutex);
62522 + }
62523 +
62524 + /*
62525 + * Clear hugetlb-related page reserves for children. This only
62526 + * affects MAP_PRIVATE mappings. Faults generated by the child
62527 + * are not guaranteed to succeed, even if read-only
62528 + */
62529 + if (is_vm_hugetlb_page(tmp))
62530 + reset_vma_resv_huge_pages(tmp);
62531 +
62532 + return tmp;
62533 +
62534 +fail_nomem_anon_vma_fork:
62535 + mpol_put(pol);
62536 +fail_nomem_policy:
62537 + kmem_cache_free(vm_area_cachep, tmp);
62538 +fail_nomem:
62539 + vm_unacct_memory(charge);
62540 + return NULL;
62541 +}
62542 +
62543 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
62544 {
62545 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
62546 struct rb_node **rb_link, *rb_parent;
62547 int retval;
62548 - unsigned long charge;
62549 - struct mempolicy *pol;
62550
62551 down_write(&oldmm->mmap_sem);
62552 flush_cache_dup_mm(oldmm);
62553 @@ -326,8 +390,8 @@ static int dup_mmap(struct mm_struct *mm
62554 mm->locked_vm = 0;
62555 mm->mmap = NULL;
62556 mm->mmap_cache = NULL;
62557 - mm->free_area_cache = oldmm->mmap_base;
62558 - mm->cached_hole_size = ~0UL;
62559 + mm->free_area_cache = oldmm->free_area_cache;
62560 + mm->cached_hole_size = oldmm->cached_hole_size;
62561 mm->map_count = 0;
62562 cpumask_clear(mm_cpumask(mm));
62563 mm->mm_rb = RB_ROOT;
62564 @@ -343,8 +407,6 @@ static int dup_mmap(struct mm_struct *mm
62565
62566 prev = NULL;
62567 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
62568 - struct file *file;
62569 -
62570 if (mpnt->vm_flags & VM_DONTCOPY) {
62571 long pages = vma_pages(mpnt);
62572 mm->total_vm -= pages;
62573 @@ -352,55 +414,13 @@ static int dup_mmap(struct mm_struct *mm
62574 -pages);
62575 continue;
62576 }
62577 - charge = 0;
62578 - if (mpnt->vm_flags & VM_ACCOUNT) {
62579 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
62580 - if (security_vm_enough_memory(len))
62581 - goto fail_nomem;
62582 - charge = len;
62583 - }
62584 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62585 - if (!tmp)
62586 - goto fail_nomem;
62587 - *tmp = *mpnt;
62588 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
62589 - pol = mpol_dup(vma_policy(mpnt));
62590 - retval = PTR_ERR(pol);
62591 - if (IS_ERR(pol))
62592 - goto fail_nomem_policy;
62593 - vma_set_policy(tmp, pol);
62594 - tmp->vm_mm = mm;
62595 - if (anon_vma_fork(tmp, mpnt))
62596 - goto fail_nomem_anon_vma_fork;
62597 - tmp->vm_flags &= ~VM_LOCKED;
62598 - tmp->vm_next = tmp->vm_prev = NULL;
62599 - file = tmp->vm_file;
62600 - if (file) {
62601 - struct inode *inode = file->f_path.dentry->d_inode;
62602 - struct address_space *mapping = file->f_mapping;
62603 -
62604 - get_file(file);
62605 - if (tmp->vm_flags & VM_DENYWRITE)
62606 - atomic_dec(&inode->i_writecount);
62607 - mutex_lock(&mapping->i_mmap_mutex);
62608 - if (tmp->vm_flags & VM_SHARED)
62609 - mapping->i_mmap_writable++;
62610 - flush_dcache_mmap_lock(mapping);
62611 - /* insert tmp into the share list, just after mpnt */
62612 - vma_prio_tree_add(tmp, mpnt);
62613 - flush_dcache_mmap_unlock(mapping);
62614 - mutex_unlock(&mapping->i_mmap_mutex);
62615 + tmp = dup_vma(mm, mpnt);
62616 + if (!tmp) {
62617 + retval = -ENOMEM;
62618 + goto out;
62619 }
62620
62621 /*
62622 - * Clear hugetlb-related page reserves for children. This only
62623 - * affects MAP_PRIVATE mappings. Faults generated by the child
62624 - * are not guaranteed to succeed, even if read-only
62625 - */
62626 - if (is_vm_hugetlb_page(tmp))
62627 - reset_vma_resv_huge_pages(tmp);
62628 -
62629 - /*
62630 * Link in the new vma and copy the page table entries.
62631 */
62632 *pprev = tmp;
62633 @@ -421,6 +441,31 @@ static int dup_mmap(struct mm_struct *mm
62634 if (retval)
62635 goto out;
62636 }
62637 +
62638 +#ifdef CONFIG_PAX_SEGMEXEC
62639 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
62640 + struct vm_area_struct *mpnt_m;
62641 +
62642 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
62643 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
62644 +
62645 + if (!mpnt->vm_mirror)
62646 + continue;
62647 +
62648 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
62649 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
62650 + mpnt->vm_mirror = mpnt_m;
62651 + } else {
62652 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
62653 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
62654 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
62655 + mpnt->vm_mirror->vm_mirror = mpnt;
62656 + }
62657 + }
62658 + BUG_ON(mpnt_m);
62659 + }
62660 +#endif
62661 +
62662 /* a new mm has just been created */
62663 arch_dup_mmap(oldmm, mm);
62664 retval = 0;
62665 @@ -429,14 +474,6 @@ out:
62666 flush_tlb_mm(oldmm);
62667 up_write(&oldmm->mmap_sem);
62668 return retval;
62669 -fail_nomem_anon_vma_fork:
62670 - mpol_put(pol);
62671 -fail_nomem_policy:
62672 - kmem_cache_free(vm_area_cachep, tmp);
62673 -fail_nomem:
62674 - retval = -ENOMEM;
62675 - vm_unacct_memory(charge);
62676 - goto out;
62677 }
62678
62679 static inline int mm_alloc_pgd(struct mm_struct * mm)
62680 @@ -836,13 +873,14 @@ static int copy_fs(unsigned long clone_f
62681 spin_unlock(&fs->lock);
62682 return -EAGAIN;
62683 }
62684 - fs->users++;
62685 + atomic_inc(&fs->users);
62686 spin_unlock(&fs->lock);
62687 return 0;
62688 }
62689 tsk->fs = copy_fs_struct(fs);
62690 if (!tsk->fs)
62691 return -ENOMEM;
62692 + gr_set_chroot_entries(tsk, &tsk->fs->root);
62693 return 0;
62694 }
62695
62696 @@ -1104,12 +1142,16 @@ static struct task_struct *copy_process(
62697 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
62698 #endif
62699 retval = -EAGAIN;
62700 +
62701 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
62702 +
62703 if (atomic_read(&p->real_cred->user->processes) >=
62704 task_rlimit(p, RLIMIT_NPROC)) {
62705 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
62706 - p->real_cred->user != INIT_USER)
62707 + if (p->real_cred->user != INIT_USER &&
62708 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
62709 goto bad_fork_free;
62710 }
62711 + current->flags &= ~PF_NPROC_EXCEEDED;
62712
62713 retval = copy_creds(p, clone_flags);
62714 if (retval < 0)
62715 @@ -1250,6 +1292,8 @@ static struct task_struct *copy_process(
62716 if (clone_flags & CLONE_THREAD)
62717 p->tgid = current->tgid;
62718
62719 + gr_copy_label(p);
62720 +
62721 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
62722 /*
62723 * Clear TID on mm_release()?
62724 @@ -1414,6 +1458,8 @@ bad_fork_cleanup_count:
62725 bad_fork_free:
62726 free_task(p);
62727 fork_out:
62728 + gr_log_forkfail(retval);
62729 +
62730 return ERR_PTR(retval);
62731 }
62732
62733 @@ -1502,6 +1548,8 @@ long do_fork(unsigned long clone_flags,
62734 if (clone_flags & CLONE_PARENT_SETTID)
62735 put_user(nr, parent_tidptr);
62736
62737 + gr_handle_brute_check();
62738 +
62739 if (clone_flags & CLONE_VFORK) {
62740 p->vfork_done = &vfork;
62741 init_completion(&vfork);
62742 @@ -1610,7 +1658,7 @@ static int unshare_fs(unsigned long unsh
62743 return 0;
62744
62745 /* don't need lock here; in the worst case we'll do useless copy */
62746 - if (fs->users == 1)
62747 + if (atomic_read(&fs->users) == 1)
62748 return 0;
62749
62750 *new_fsp = copy_fs_struct(fs);
62751 @@ -1697,7 +1745,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
62752 fs = current->fs;
62753 spin_lock(&fs->lock);
62754 current->fs = new_fs;
62755 - if (--fs->users)
62756 + gr_set_chroot_entries(current, &current->fs->root);
62757 + if (atomic_dec_return(&fs->users))
62758 new_fs = NULL;
62759 else
62760 new_fs = fs;
62761 diff -urNp linux-3.0.7/kernel/futex.c linux-3.0.7/kernel/futex.c
62762 --- linux-3.0.7/kernel/futex.c 2011-09-02 18:11:21.000000000 -0400
62763 +++ linux-3.0.7/kernel/futex.c 2011-08-23 21:48:14.000000000 -0400
62764 @@ -54,6 +54,7 @@
62765 #include <linux/mount.h>
62766 #include <linux/pagemap.h>
62767 #include <linux/syscalls.h>
62768 +#include <linux/ptrace.h>
62769 #include <linux/signal.h>
62770 #include <linux/module.h>
62771 #include <linux/magic.h>
62772 @@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fsh
62773 struct page *page, *page_head;
62774 int err, ro = 0;
62775
62776 +#ifdef CONFIG_PAX_SEGMEXEC
62777 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
62778 + return -EFAULT;
62779 +#endif
62780 +
62781 /*
62782 * The futex address must be "naturally" aligned.
62783 */
62784 @@ -1863,6 +1869,8 @@ static int futex_wait(u32 __user *uaddr,
62785 struct futex_q q = futex_q_init;
62786 int ret;
62787
62788 + pax_track_stack();
62789 +
62790 if (!bitset)
62791 return -EINVAL;
62792 q.bitset = bitset;
62793 @@ -2259,6 +2267,8 @@ static int futex_wait_requeue_pi(u32 __u
62794 struct futex_q q = futex_q_init;
62795 int res, ret;
62796
62797 + pax_track_stack();
62798 +
62799 if (!bitset)
62800 return -EINVAL;
62801
62802 @@ -2431,7 +2441,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
62803 {
62804 struct robust_list_head __user *head;
62805 unsigned long ret;
62806 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
62807 const struct cred *cred = current_cred(), *pcred;
62808 +#endif
62809
62810 if (!futex_cmpxchg_enabled)
62811 return -ENOSYS;
62812 @@ -2447,6 +2459,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
62813 if (!p)
62814 goto err_unlock;
62815 ret = -EPERM;
62816 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62817 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
62818 + goto err_unlock;
62819 +#else
62820 pcred = __task_cred(p);
62821 /* If victim is in different user_ns, then uids are not
62822 comparable, so we must have CAP_SYS_PTRACE */
62823 @@ -2461,6 +2477,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
62824 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
62825 goto err_unlock;
62826 ok:
62827 +#endif
62828 head = p->robust_list;
62829 rcu_read_unlock();
62830 }
62831 @@ -2712,6 +2729,7 @@ static int __init futex_init(void)
62832 {
62833 u32 curval;
62834 int i;
62835 + mm_segment_t oldfs;
62836
62837 /*
62838 * This will fail and we want it. Some arch implementations do
62839 @@ -2723,8 +2741,11 @@ static int __init futex_init(void)
62840 * implementation, the non-functional ones will return
62841 * -ENOSYS.
62842 */
62843 + oldfs = get_fs();
62844 + set_fs(USER_DS);
62845 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
62846 futex_cmpxchg_enabled = 1;
62847 + set_fs(oldfs);
62848
62849 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
62850 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
62851 diff -urNp linux-3.0.7/kernel/futex_compat.c linux-3.0.7/kernel/futex_compat.c
62852 --- linux-3.0.7/kernel/futex_compat.c 2011-07-21 22:17:23.000000000 -0400
62853 +++ linux-3.0.7/kernel/futex_compat.c 2011-08-23 21:48:14.000000000 -0400
62854 @@ -10,6 +10,7 @@
62855 #include <linux/compat.h>
62856 #include <linux/nsproxy.h>
62857 #include <linux/futex.h>
62858 +#include <linux/ptrace.h>
62859
62860 #include <asm/uaccess.h>
62861
62862 @@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
62863 {
62864 struct compat_robust_list_head __user *head;
62865 unsigned long ret;
62866 - const struct cred *cred = current_cred(), *pcred;
62867 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
62868 + const struct cred *cred = current_cred();
62869 + const struct cred *pcred;
62870 +#endif
62871
62872 if (!futex_cmpxchg_enabled)
62873 return -ENOSYS;
62874 @@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
62875 if (!p)
62876 goto err_unlock;
62877 ret = -EPERM;
62878 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62879 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
62880 + goto err_unlock;
62881 +#else
62882 pcred = __task_cred(p);
62883 /* If victim is in different user_ns, then uids are not
62884 comparable, so we must have CAP_SYS_PTRACE */
62885 @@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
62886 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
62887 goto err_unlock;
62888 ok:
62889 +#endif
62890 head = p->compat_robust_list;
62891 rcu_read_unlock();
62892 }
62893 diff -urNp linux-3.0.7/kernel/gcov/base.c linux-3.0.7/kernel/gcov/base.c
62894 --- linux-3.0.7/kernel/gcov/base.c 2011-07-21 22:17:23.000000000 -0400
62895 +++ linux-3.0.7/kernel/gcov/base.c 2011-08-23 21:47:56.000000000 -0400
62896 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
62897 }
62898
62899 #ifdef CONFIG_MODULES
62900 -static inline int within(void *addr, void *start, unsigned long size)
62901 -{
62902 - return ((addr >= start) && (addr < start + size));
62903 -}
62904 -
62905 /* Update list and generate events when modules are unloaded. */
62906 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
62907 void *data)
62908 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
62909 prev = NULL;
62910 /* Remove entries located in module from linked list. */
62911 for (info = gcov_info_head; info; info = info->next) {
62912 - if (within(info, mod->module_core, mod->core_size)) {
62913 + if (within_module_core_rw((unsigned long)info, mod)) {
62914 if (prev)
62915 prev->next = info->next;
62916 else
62917 diff -urNp linux-3.0.7/kernel/hrtimer.c linux-3.0.7/kernel/hrtimer.c
62918 --- linux-3.0.7/kernel/hrtimer.c 2011-07-21 22:17:23.000000000 -0400
62919 +++ linux-3.0.7/kernel/hrtimer.c 2011-08-23 21:47:56.000000000 -0400
62920 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
62921 local_irq_restore(flags);
62922 }
62923
62924 -static void run_hrtimer_softirq(struct softirq_action *h)
62925 +static void run_hrtimer_softirq(void)
62926 {
62927 hrtimer_peek_ahead_timers();
62928 }
62929 diff -urNp linux-3.0.7/kernel/jump_label.c linux-3.0.7/kernel/jump_label.c
62930 --- linux-3.0.7/kernel/jump_label.c 2011-07-21 22:17:23.000000000 -0400
62931 +++ linux-3.0.7/kernel/jump_label.c 2011-08-23 21:47:56.000000000 -0400
62932 @@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entr
62933
62934 size = (((unsigned long)stop - (unsigned long)start)
62935 / sizeof(struct jump_entry));
62936 + pax_open_kernel();
62937 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
62938 + pax_close_kernel();
62939 }
62940
62941 static void jump_label_update(struct jump_label_key *key, int enable);
62942 @@ -297,10 +299,12 @@ static void jump_label_invalidate_module
62943 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
62944 struct jump_entry *iter;
62945
62946 + pax_open_kernel();
62947 for (iter = iter_start; iter < iter_stop; iter++) {
62948 if (within_module_init(iter->code, mod))
62949 iter->code = 0;
62950 }
62951 + pax_close_kernel();
62952 }
62953
62954 static int
62955 diff -urNp linux-3.0.7/kernel/kallsyms.c linux-3.0.7/kernel/kallsyms.c
62956 --- linux-3.0.7/kernel/kallsyms.c 2011-07-21 22:17:23.000000000 -0400
62957 +++ linux-3.0.7/kernel/kallsyms.c 2011-08-23 21:48:14.000000000 -0400
62958 @@ -11,6 +11,9 @@
62959 * Changed the compression method from stem compression to "table lookup"
62960 * compression (see scripts/kallsyms.c for a more complete description)
62961 */
62962 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62963 +#define __INCLUDED_BY_HIDESYM 1
62964 +#endif
62965 #include <linux/kallsyms.h>
62966 #include <linux/module.h>
62967 #include <linux/init.h>
62968 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
62969
62970 static inline int is_kernel_inittext(unsigned long addr)
62971 {
62972 + if (system_state != SYSTEM_BOOTING)
62973 + return 0;
62974 +
62975 if (addr >= (unsigned long)_sinittext
62976 && addr <= (unsigned long)_einittext)
62977 return 1;
62978 return 0;
62979 }
62980
62981 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
62982 +#ifdef CONFIG_MODULES
62983 +static inline int is_module_text(unsigned long addr)
62984 +{
62985 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
62986 + return 1;
62987 +
62988 + addr = ktla_ktva(addr);
62989 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
62990 +}
62991 +#else
62992 +static inline int is_module_text(unsigned long addr)
62993 +{
62994 + return 0;
62995 +}
62996 +#endif
62997 +#endif
62998 +
62999 static inline int is_kernel_text(unsigned long addr)
63000 {
63001 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
63002 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
63003
63004 static inline int is_kernel(unsigned long addr)
63005 {
63006 +
63007 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63008 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
63009 + return 1;
63010 +
63011 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
63012 +#else
63013 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
63014 +#endif
63015 +
63016 return 1;
63017 return in_gate_area_no_mm(addr);
63018 }
63019
63020 static int is_ksym_addr(unsigned long addr)
63021 {
63022 +
63023 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63024 + if (is_module_text(addr))
63025 + return 0;
63026 +#endif
63027 +
63028 if (all_var)
63029 return is_kernel(addr);
63030
63031 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
63032
63033 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
63034 {
63035 - iter->name[0] = '\0';
63036 iter->nameoff = get_symbol_offset(new_pos);
63037 iter->pos = new_pos;
63038 }
63039 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
63040 {
63041 struct kallsym_iter *iter = m->private;
63042
63043 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63044 + if (current_uid())
63045 + return 0;
63046 +#endif
63047 +
63048 /* Some debugging symbols have no name. Ignore them. */
63049 if (!iter->name[0])
63050 return 0;
63051 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
63052 struct kallsym_iter *iter;
63053 int ret;
63054
63055 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
63056 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
63057 if (!iter)
63058 return -ENOMEM;
63059 reset_iter(iter, 0);
63060 diff -urNp linux-3.0.7/kernel/kexec.c linux-3.0.7/kernel/kexec.c
63061 --- linux-3.0.7/kernel/kexec.c 2011-07-21 22:17:23.000000000 -0400
63062 +++ linux-3.0.7/kernel/kexec.c 2011-10-06 04:17:55.000000000 -0400
63063 @@ -1033,7 +1033,8 @@ asmlinkage long compat_sys_kexec_load(un
63064 unsigned long flags)
63065 {
63066 struct compat_kexec_segment in;
63067 - struct kexec_segment out, __user *ksegments;
63068 + struct kexec_segment out;
63069 + struct kexec_segment __user *ksegments;
63070 unsigned long i, result;
63071
63072 /* Don't allow clients that don't understand the native
63073 diff -urNp linux-3.0.7/kernel/kmod.c linux-3.0.7/kernel/kmod.c
63074 --- linux-3.0.7/kernel/kmod.c 2011-07-21 22:17:23.000000000 -0400
63075 +++ linux-3.0.7/kernel/kmod.c 2011-10-06 04:17:55.000000000 -0400
63076 @@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
63077 * If module auto-loading support is disabled then this function
63078 * becomes a no-operation.
63079 */
63080 -int __request_module(bool wait, const char *fmt, ...)
63081 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
63082 {
63083 - va_list args;
63084 char module_name[MODULE_NAME_LEN];
63085 unsigned int max_modprobes;
63086 int ret;
63087 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
63088 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
63089 static char *envp[] = { "HOME=/",
63090 "TERM=linux",
63091 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
63092 @@ -88,9 +87,7 @@ int __request_module(bool wait, const ch
63093 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
63094 static int kmod_loop_msg;
63095
63096 - va_start(args, fmt);
63097 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
63098 - va_end(args);
63099 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
63100 if (ret >= MODULE_NAME_LEN)
63101 return -ENAMETOOLONG;
63102
63103 @@ -98,6 +95,20 @@ int __request_module(bool wait, const ch
63104 if (ret)
63105 return ret;
63106
63107 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63108 + if (!current_uid()) {
63109 + /* hack to workaround consolekit/udisks stupidity */
63110 + read_lock(&tasklist_lock);
63111 + if (!strcmp(current->comm, "mount") &&
63112 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
63113 + read_unlock(&tasklist_lock);
63114 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
63115 + return -EPERM;
63116 + }
63117 + read_unlock(&tasklist_lock);
63118 + }
63119 +#endif
63120 +
63121 /* If modprobe needs a service that is in a module, we get a recursive
63122 * loop. Limit the number of running kmod threads to max_threads/2 or
63123 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
63124 @@ -131,6 +142,47 @@ int __request_module(bool wait, const ch
63125 atomic_dec(&kmod_concurrent);
63126 return ret;
63127 }
63128 +
63129 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
63130 +{
63131 + va_list args;
63132 + int ret;
63133 +
63134 + va_start(args, fmt);
63135 + ret = ____request_module(wait, module_param, fmt, args);
63136 + va_end(args);
63137 +
63138 + return ret;
63139 +}
63140 +
63141 +int __request_module(bool wait, const char *fmt, ...)
63142 +{
63143 + va_list args;
63144 + int ret;
63145 +
63146 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63147 + if (current_uid()) {
63148 + char module_param[MODULE_NAME_LEN];
63149 +
63150 + memset(module_param, 0, sizeof(module_param));
63151 +
63152 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
63153 +
63154 + va_start(args, fmt);
63155 + ret = ____request_module(wait, module_param, fmt, args);
63156 + va_end(args);
63157 +
63158 + return ret;
63159 + }
63160 +#endif
63161 +
63162 + va_start(args, fmt);
63163 + ret = ____request_module(wait, NULL, fmt, args);
63164 + va_end(args);
63165 +
63166 + return ret;
63167 +}
63168 +
63169 EXPORT_SYMBOL(__request_module);
63170 #endif /* CONFIG_MODULES */
63171
63172 @@ -220,7 +272,7 @@ static int wait_for_helper(void *data)
63173 *
63174 * Thus the __user pointer cast is valid here.
63175 */
63176 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
63177 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
63178
63179 /*
63180 * If ret is 0, either ____call_usermodehelper failed and the
63181 diff -urNp linux-3.0.7/kernel/kprobes.c linux-3.0.7/kernel/kprobes.c
63182 --- linux-3.0.7/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
63183 +++ linux-3.0.7/kernel/kprobes.c 2011-08-23 21:47:56.000000000 -0400
63184 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
63185 * kernel image and loaded module images reside. This is required
63186 * so x86_64 can correctly handle the %rip-relative fixups.
63187 */
63188 - kip->insns = module_alloc(PAGE_SIZE);
63189 + kip->insns = module_alloc_exec(PAGE_SIZE);
63190 if (!kip->insns) {
63191 kfree(kip);
63192 return NULL;
63193 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
63194 */
63195 if (!list_is_singular(&kip->list)) {
63196 list_del(&kip->list);
63197 - module_free(NULL, kip->insns);
63198 + module_free_exec(NULL, kip->insns);
63199 kfree(kip);
63200 }
63201 return 1;
63202 @@ -1936,7 +1936,7 @@ static int __init init_kprobes(void)
63203 {
63204 int i, err = 0;
63205 unsigned long offset = 0, size = 0;
63206 - char *modname, namebuf[128];
63207 + char *modname, namebuf[KSYM_NAME_LEN];
63208 const char *symbol_name;
63209 void *addr;
63210 struct kprobe_blackpoint *kb;
63211 @@ -2062,7 +2062,7 @@ static int __kprobes show_kprobe_addr(st
63212 const char *sym = NULL;
63213 unsigned int i = *(loff_t *) v;
63214 unsigned long offset = 0;
63215 - char *modname, namebuf[128];
63216 + char *modname, namebuf[KSYM_NAME_LEN];
63217
63218 head = &kprobe_table[i];
63219 preempt_disable();
63220 diff -urNp linux-3.0.7/kernel/lockdep.c linux-3.0.7/kernel/lockdep.c
63221 --- linux-3.0.7/kernel/lockdep.c 2011-07-21 22:17:23.000000000 -0400
63222 +++ linux-3.0.7/kernel/lockdep.c 2011-08-23 21:47:56.000000000 -0400
63223 @@ -583,6 +583,10 @@ static int static_obj(void *obj)
63224 end = (unsigned long) &_end,
63225 addr = (unsigned long) obj;
63226
63227 +#ifdef CONFIG_PAX_KERNEXEC
63228 + start = ktla_ktva(start);
63229 +#endif
63230 +
63231 /*
63232 * static variable?
63233 */
63234 @@ -718,6 +722,7 @@ register_lock_class(struct lockdep_map *
63235 if (!static_obj(lock->key)) {
63236 debug_locks_off();
63237 printk("INFO: trying to register non-static key.\n");
63238 + printk("lock:%pS key:%pS.\n", lock, lock->key);
63239 printk("the code is fine but needs lockdep annotation.\n");
63240 printk("turning off the locking correctness validator.\n");
63241 dump_stack();
63242 @@ -2936,7 +2941,7 @@ static int __lock_acquire(struct lockdep
63243 if (!class)
63244 return 0;
63245 }
63246 - atomic_inc((atomic_t *)&class->ops);
63247 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
63248 if (very_verbose(class)) {
63249 printk("\nacquire class [%p] %s", class->key, class->name);
63250 if (class->name_version > 1)
63251 diff -urNp linux-3.0.7/kernel/lockdep_proc.c linux-3.0.7/kernel/lockdep_proc.c
63252 --- linux-3.0.7/kernel/lockdep_proc.c 2011-07-21 22:17:23.000000000 -0400
63253 +++ linux-3.0.7/kernel/lockdep_proc.c 2011-08-23 21:47:56.000000000 -0400
63254 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
63255
63256 static void print_name(struct seq_file *m, struct lock_class *class)
63257 {
63258 - char str[128];
63259 + char str[KSYM_NAME_LEN];
63260 const char *name = class->name;
63261
63262 if (!name) {
63263 diff -urNp linux-3.0.7/kernel/module.c linux-3.0.7/kernel/module.c
63264 --- linux-3.0.7/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
63265 +++ linux-3.0.7/kernel/module.c 2011-08-23 21:48:14.000000000 -0400
63266 @@ -58,6 +58,7 @@
63267 #include <linux/jump_label.h>
63268 #include <linux/pfn.h>
63269 #include <linux/bsearch.h>
63270 +#include <linux/grsecurity.h>
63271
63272 #define CREATE_TRACE_POINTS
63273 #include <trace/events/module.h>
63274 @@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
63275
63276 /* Bounds of module allocation, for speeding __module_address.
63277 * Protected by module_mutex. */
63278 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
63279 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
63280 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
63281
63282 int register_module_notifier(struct notifier_block * nb)
63283 {
63284 @@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(cons
63285 return true;
63286
63287 list_for_each_entry_rcu(mod, &modules, list) {
63288 - struct symsearch arr[] = {
63289 + struct symsearch modarr[] = {
63290 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
63291 NOT_GPL_ONLY, false },
63292 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
63293 @@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(cons
63294 #endif
63295 };
63296
63297 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
63298 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
63299 return true;
63300 }
63301 return false;
63302 @@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(
63303 static int percpu_modalloc(struct module *mod,
63304 unsigned long size, unsigned long align)
63305 {
63306 - if (align > PAGE_SIZE) {
63307 + if (align-1 >= PAGE_SIZE) {
63308 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
63309 mod->name, align, PAGE_SIZE);
63310 align = PAGE_SIZE;
63311 @@ -1166,7 +1168,7 @@ resolve_symbol_wait(struct module *mod,
63312 */
63313 #ifdef CONFIG_SYSFS
63314
63315 -#ifdef CONFIG_KALLSYMS
63316 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63317 static inline bool sect_empty(const Elf_Shdr *sect)
63318 {
63319 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
63320 @@ -1632,21 +1634,21 @@ static void set_section_ro_nx(void *base
63321
63322 static void unset_module_core_ro_nx(struct module *mod)
63323 {
63324 - set_page_attributes(mod->module_core + mod->core_text_size,
63325 - mod->module_core + mod->core_size,
63326 + set_page_attributes(mod->module_core_rw,
63327 + mod->module_core_rw + mod->core_size_rw,
63328 set_memory_x);
63329 - set_page_attributes(mod->module_core,
63330 - mod->module_core + mod->core_ro_size,
63331 + set_page_attributes(mod->module_core_rx,
63332 + mod->module_core_rx + mod->core_size_rx,
63333 set_memory_rw);
63334 }
63335
63336 static void unset_module_init_ro_nx(struct module *mod)
63337 {
63338 - set_page_attributes(mod->module_init + mod->init_text_size,
63339 - mod->module_init + mod->init_size,
63340 + set_page_attributes(mod->module_init_rw,
63341 + mod->module_init_rw + mod->init_size_rw,
63342 set_memory_x);
63343 - set_page_attributes(mod->module_init,
63344 - mod->module_init + mod->init_ro_size,
63345 + set_page_attributes(mod->module_init_rx,
63346 + mod->module_init_rx + mod->init_size_rx,
63347 set_memory_rw);
63348 }
63349
63350 @@ -1657,14 +1659,14 @@ void set_all_modules_text_rw(void)
63351
63352 mutex_lock(&module_mutex);
63353 list_for_each_entry_rcu(mod, &modules, list) {
63354 - if ((mod->module_core) && (mod->core_text_size)) {
63355 - set_page_attributes(mod->module_core,
63356 - mod->module_core + mod->core_text_size,
63357 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
63358 + set_page_attributes(mod->module_core_rx,
63359 + mod->module_core_rx + mod->core_size_rx,
63360 set_memory_rw);
63361 }
63362 - if ((mod->module_init) && (mod->init_text_size)) {
63363 - set_page_attributes(mod->module_init,
63364 - mod->module_init + mod->init_text_size,
63365 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
63366 + set_page_attributes(mod->module_init_rx,
63367 + mod->module_init_rx + mod->init_size_rx,
63368 set_memory_rw);
63369 }
63370 }
63371 @@ -1678,14 +1680,14 @@ void set_all_modules_text_ro(void)
63372
63373 mutex_lock(&module_mutex);
63374 list_for_each_entry_rcu(mod, &modules, list) {
63375 - if ((mod->module_core) && (mod->core_text_size)) {
63376 - set_page_attributes(mod->module_core,
63377 - mod->module_core + mod->core_text_size,
63378 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
63379 + set_page_attributes(mod->module_core_rx,
63380 + mod->module_core_rx + mod->core_size_rx,
63381 set_memory_ro);
63382 }
63383 - if ((mod->module_init) && (mod->init_text_size)) {
63384 - set_page_attributes(mod->module_init,
63385 - mod->module_init + mod->init_text_size,
63386 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
63387 + set_page_attributes(mod->module_init_rx,
63388 + mod->module_init_rx + mod->init_size_rx,
63389 set_memory_ro);
63390 }
63391 }
63392 @@ -1722,16 +1724,19 @@ static void free_module(struct module *m
63393
63394 /* This may be NULL, but that's OK */
63395 unset_module_init_ro_nx(mod);
63396 - module_free(mod, mod->module_init);
63397 + module_free(mod, mod->module_init_rw);
63398 + module_free_exec(mod, mod->module_init_rx);
63399 kfree(mod->args);
63400 percpu_modfree(mod);
63401
63402 /* Free lock-classes: */
63403 - lockdep_free_key_range(mod->module_core, mod->core_size);
63404 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
63405 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
63406
63407 /* Finally, free the core (containing the module structure) */
63408 unset_module_core_ro_nx(mod);
63409 - module_free(mod, mod->module_core);
63410 + module_free_exec(mod, mod->module_core_rx);
63411 + module_free(mod, mod->module_core_rw);
63412
63413 #ifdef CONFIG_MPU
63414 update_protections(current->mm);
63415 @@ -1800,10 +1805,31 @@ static int simplify_symbols(struct modul
63416 unsigned int i;
63417 int ret = 0;
63418 const struct kernel_symbol *ksym;
63419 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63420 + int is_fs_load = 0;
63421 + int register_filesystem_found = 0;
63422 + char *p;
63423 +
63424 + p = strstr(mod->args, "grsec_modharden_fs");
63425 + if (p) {
63426 + char *endptr = p + strlen("grsec_modharden_fs");
63427 + /* copy \0 as well */
63428 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
63429 + is_fs_load = 1;
63430 + }
63431 +#endif
63432
63433 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
63434 const char *name = info->strtab + sym[i].st_name;
63435
63436 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63437 + /* it's a real shame this will never get ripped and copied
63438 + upstream! ;(
63439 + */
63440 + if (is_fs_load && !strcmp(name, "register_filesystem"))
63441 + register_filesystem_found = 1;
63442 +#endif
63443 +
63444 switch (sym[i].st_shndx) {
63445 case SHN_COMMON:
63446 /* We compiled with -fno-common. These are not
63447 @@ -1824,7 +1850,9 @@ static int simplify_symbols(struct modul
63448 ksym = resolve_symbol_wait(mod, info, name);
63449 /* Ok if resolved. */
63450 if (ksym && !IS_ERR(ksym)) {
63451 + pax_open_kernel();
63452 sym[i].st_value = ksym->value;
63453 + pax_close_kernel();
63454 break;
63455 }
63456
63457 @@ -1843,11 +1871,20 @@ static int simplify_symbols(struct modul
63458 secbase = (unsigned long)mod_percpu(mod);
63459 else
63460 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
63461 + pax_open_kernel();
63462 sym[i].st_value += secbase;
63463 + pax_close_kernel();
63464 break;
63465 }
63466 }
63467
63468 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63469 + if (is_fs_load && !register_filesystem_found) {
63470 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
63471 + ret = -EPERM;
63472 + }
63473 +#endif
63474 +
63475 return ret;
63476 }
63477
63478 @@ -1931,22 +1968,12 @@ static void layout_sections(struct modul
63479 || s->sh_entsize != ~0UL
63480 || strstarts(sname, ".init"))
63481 continue;
63482 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
63483 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63484 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
63485 + else
63486 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
63487 DEBUGP("\t%s\n", name);
63488 }
63489 - switch (m) {
63490 - case 0: /* executable */
63491 - mod->core_size = debug_align(mod->core_size);
63492 - mod->core_text_size = mod->core_size;
63493 - break;
63494 - case 1: /* RO: text and ro-data */
63495 - mod->core_size = debug_align(mod->core_size);
63496 - mod->core_ro_size = mod->core_size;
63497 - break;
63498 - case 3: /* whole core */
63499 - mod->core_size = debug_align(mod->core_size);
63500 - break;
63501 - }
63502 }
63503
63504 DEBUGP("Init section allocation order:\n");
63505 @@ -1960,23 +1987,13 @@ static void layout_sections(struct modul
63506 || s->sh_entsize != ~0UL
63507 || !strstarts(sname, ".init"))
63508 continue;
63509 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
63510 - | INIT_OFFSET_MASK);
63511 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63512 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
63513 + else
63514 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
63515 + s->sh_entsize |= INIT_OFFSET_MASK;
63516 DEBUGP("\t%s\n", sname);
63517 }
63518 - switch (m) {
63519 - case 0: /* executable */
63520 - mod->init_size = debug_align(mod->init_size);
63521 - mod->init_text_size = mod->init_size;
63522 - break;
63523 - case 1: /* RO: text and ro-data */
63524 - mod->init_size = debug_align(mod->init_size);
63525 - mod->init_ro_size = mod->init_size;
63526 - break;
63527 - case 3: /* whole init */
63528 - mod->init_size = debug_align(mod->init_size);
63529 - break;
63530 - }
63531 }
63532 }
63533
63534 @@ -2141,7 +2158,7 @@ static void layout_symtab(struct module
63535
63536 /* Put symbol section at end of init part of module. */
63537 symsect->sh_flags |= SHF_ALLOC;
63538 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
63539 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
63540 info->index.sym) | INIT_OFFSET_MASK;
63541 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
63542
63543 @@ -2158,19 +2175,19 @@ static void layout_symtab(struct module
63544 }
63545
63546 /* Append room for core symbols at end of core part. */
63547 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
63548 - mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
63549 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
63550 + mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
63551
63552 /* Put string table section at end of init part of module. */
63553 strsect->sh_flags |= SHF_ALLOC;
63554 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
63555 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
63556 info->index.str) | INIT_OFFSET_MASK;
63557 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
63558
63559 /* Append room for core symbols' strings at end of core part. */
63560 - info->stroffs = mod->core_size;
63561 + info->stroffs = mod->core_size_rx;
63562 __set_bit(0, info->strmap);
63563 - mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
63564 + mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
63565 }
63566
63567 static void add_kallsyms(struct module *mod, const struct load_info *info)
63568 @@ -2186,11 +2203,13 @@ static void add_kallsyms(struct module *
63569 /* Make sure we get permanent strtab: don't use info->strtab. */
63570 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
63571
63572 + pax_open_kernel();
63573 +
63574 /* Set types up while we still have access to sections. */
63575 for (i = 0; i < mod->num_symtab; i++)
63576 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
63577
63578 - mod->core_symtab = dst = mod->module_core + info->symoffs;
63579 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
63580 src = mod->symtab;
63581 *dst = *src;
63582 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
63583 @@ -2203,10 +2222,12 @@ static void add_kallsyms(struct module *
63584 }
63585 mod->core_num_syms = ndst;
63586
63587 - mod->core_strtab = s = mod->module_core + info->stroffs;
63588 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
63589 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
63590 if (test_bit(i, info->strmap))
63591 *++s = mod->strtab[i];
63592 +
63593 + pax_close_kernel();
63594 }
63595 #else
63596 static inline void layout_symtab(struct module *mod, struct load_info *info)
63597 @@ -2235,17 +2256,33 @@ static void dynamic_debug_remove(struct
63598 ddebug_remove_module(debug->modname);
63599 }
63600
63601 -static void *module_alloc_update_bounds(unsigned long size)
63602 +static void *module_alloc_update_bounds_rw(unsigned long size)
63603 {
63604 void *ret = module_alloc(size);
63605
63606 if (ret) {
63607 mutex_lock(&module_mutex);
63608 /* Update module bounds. */
63609 - if ((unsigned long)ret < module_addr_min)
63610 - module_addr_min = (unsigned long)ret;
63611 - if ((unsigned long)ret + size > module_addr_max)
63612 - module_addr_max = (unsigned long)ret + size;
63613 + if ((unsigned long)ret < module_addr_min_rw)
63614 + module_addr_min_rw = (unsigned long)ret;
63615 + if ((unsigned long)ret + size > module_addr_max_rw)
63616 + module_addr_max_rw = (unsigned long)ret + size;
63617 + mutex_unlock(&module_mutex);
63618 + }
63619 + return ret;
63620 +}
63621 +
63622 +static void *module_alloc_update_bounds_rx(unsigned long size)
63623 +{
63624 + void *ret = module_alloc_exec(size);
63625 +
63626 + if (ret) {
63627 + mutex_lock(&module_mutex);
63628 + /* Update module bounds. */
63629 + if ((unsigned long)ret < module_addr_min_rx)
63630 + module_addr_min_rx = (unsigned long)ret;
63631 + if ((unsigned long)ret + size > module_addr_max_rx)
63632 + module_addr_max_rx = (unsigned long)ret + size;
63633 mutex_unlock(&module_mutex);
63634 }
63635 return ret;
63636 @@ -2538,7 +2575,7 @@ static int move_module(struct module *mo
63637 void *ptr;
63638
63639 /* Do the allocs. */
63640 - ptr = module_alloc_update_bounds(mod->core_size);
63641 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
63642 /*
63643 * The pointer to this block is stored in the module structure
63644 * which is inside the block. Just mark it as not being a
63645 @@ -2548,23 +2585,50 @@ static int move_module(struct module *mo
63646 if (!ptr)
63647 return -ENOMEM;
63648
63649 - memset(ptr, 0, mod->core_size);
63650 - mod->module_core = ptr;
63651 + memset(ptr, 0, mod->core_size_rw);
63652 + mod->module_core_rw = ptr;
63653
63654 - ptr = module_alloc_update_bounds(mod->init_size);
63655 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
63656 /*
63657 * The pointer to this block is stored in the module structure
63658 * which is inside the block. This block doesn't need to be
63659 * scanned as it contains data and code that will be freed
63660 * after the module is initialized.
63661 */
63662 - kmemleak_ignore(ptr);
63663 - if (!ptr && mod->init_size) {
63664 - module_free(mod, mod->module_core);
63665 + kmemleak_not_leak(ptr);
63666 + if (!ptr && mod->init_size_rw) {
63667 + module_free(mod, mod->module_core_rw);
63668 return -ENOMEM;
63669 }
63670 - memset(ptr, 0, mod->init_size);
63671 - mod->module_init = ptr;
63672 + memset(ptr, 0, mod->init_size_rw);
63673 + mod->module_init_rw = ptr;
63674 +
63675 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
63676 + kmemleak_not_leak(ptr);
63677 + if (!ptr) {
63678 + module_free(mod, mod->module_init_rw);
63679 + module_free(mod, mod->module_core_rw);
63680 + return -ENOMEM;
63681 + }
63682 +
63683 + pax_open_kernel();
63684 + memset(ptr, 0, mod->core_size_rx);
63685 + pax_close_kernel();
63686 + mod->module_core_rx = ptr;
63687 +
63688 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
63689 + kmemleak_not_leak(ptr);
63690 + if (!ptr && mod->init_size_rx) {
63691 + module_free_exec(mod, mod->module_core_rx);
63692 + module_free(mod, mod->module_init_rw);
63693 + module_free(mod, mod->module_core_rw);
63694 + return -ENOMEM;
63695 + }
63696 +
63697 + pax_open_kernel();
63698 + memset(ptr, 0, mod->init_size_rx);
63699 + pax_close_kernel();
63700 + mod->module_init_rx = ptr;
63701
63702 /* Transfer each section which specifies SHF_ALLOC */
63703 DEBUGP("final section addresses:\n");
63704 @@ -2575,16 +2639,45 @@ static int move_module(struct module *mo
63705 if (!(shdr->sh_flags & SHF_ALLOC))
63706 continue;
63707
63708 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
63709 - dest = mod->module_init
63710 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
63711 - else
63712 - dest = mod->module_core + shdr->sh_entsize;
63713 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
63714 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
63715 + dest = mod->module_init_rw
63716 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
63717 + else
63718 + dest = mod->module_init_rx
63719 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
63720 + } else {
63721 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
63722 + dest = mod->module_core_rw + shdr->sh_entsize;
63723 + else
63724 + dest = mod->module_core_rx + shdr->sh_entsize;
63725 + }
63726 +
63727 + if (shdr->sh_type != SHT_NOBITS) {
63728 +
63729 +#ifdef CONFIG_PAX_KERNEXEC
63730 +#ifdef CONFIG_X86_64
63731 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
63732 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
63733 +#endif
63734 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
63735 + pax_open_kernel();
63736 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
63737 + pax_close_kernel();
63738 + } else
63739 +#endif
63740
63741 - if (shdr->sh_type != SHT_NOBITS)
63742 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
63743 + }
63744 /* Update sh_addr to point to copy in image. */
63745 - shdr->sh_addr = (unsigned long)dest;
63746 +
63747 +#ifdef CONFIG_PAX_KERNEXEC
63748 + if (shdr->sh_flags & SHF_EXECINSTR)
63749 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
63750 + else
63751 +#endif
63752 +
63753 + shdr->sh_addr = (unsigned long)dest;
63754 DEBUGP("\t0x%lx %s\n",
63755 shdr->sh_addr, info->secstrings + shdr->sh_name);
63756 }
63757 @@ -2635,12 +2728,12 @@ static void flush_module_icache(const st
63758 * Do it before processing of module parameters, so the module
63759 * can provide parameter accessor functions of its own.
63760 */
63761 - if (mod->module_init)
63762 - flush_icache_range((unsigned long)mod->module_init,
63763 - (unsigned long)mod->module_init
63764 - + mod->init_size);
63765 - flush_icache_range((unsigned long)mod->module_core,
63766 - (unsigned long)mod->module_core + mod->core_size);
63767 + if (mod->module_init_rx)
63768 + flush_icache_range((unsigned long)mod->module_init_rx,
63769 + (unsigned long)mod->module_init_rx
63770 + + mod->init_size_rx);
63771 + flush_icache_range((unsigned long)mod->module_core_rx,
63772 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
63773
63774 set_fs(old_fs);
63775 }
63776 @@ -2712,8 +2805,10 @@ static void module_deallocate(struct mod
63777 {
63778 kfree(info->strmap);
63779 percpu_modfree(mod);
63780 - module_free(mod, mod->module_init);
63781 - module_free(mod, mod->module_core);
63782 + module_free_exec(mod, mod->module_init_rx);
63783 + module_free_exec(mod, mod->module_core_rx);
63784 + module_free(mod, mod->module_init_rw);
63785 + module_free(mod, mod->module_core_rw);
63786 }
63787
63788 static int post_relocation(struct module *mod, const struct load_info *info)
63789 @@ -2770,9 +2865,38 @@ static struct module *load_module(void _
63790 if (err)
63791 goto free_unload;
63792
63793 + /* Now copy in args */
63794 + mod->args = strndup_user(uargs, ~0UL >> 1);
63795 + if (IS_ERR(mod->args)) {
63796 + err = PTR_ERR(mod->args);
63797 + goto free_unload;
63798 + }
63799 +
63800 /* Set up MODINFO_ATTR fields */
63801 setup_modinfo(mod, &info);
63802
63803 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63804 + {
63805 + char *p, *p2;
63806 +
63807 + if (strstr(mod->args, "grsec_modharden_netdev")) {
63808 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
63809 + err = -EPERM;
63810 + goto free_modinfo;
63811 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
63812 + p += strlen("grsec_modharden_normal");
63813 + p2 = strstr(p, "_");
63814 + if (p2) {
63815 + *p2 = '\0';
63816 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
63817 + *p2 = '_';
63818 + }
63819 + err = -EPERM;
63820 + goto free_modinfo;
63821 + }
63822 + }
63823 +#endif
63824 +
63825 /* Fix up syms, so that st_value is a pointer to location. */
63826 err = simplify_symbols(mod, &info);
63827 if (err < 0)
63828 @@ -2788,13 +2912,6 @@ static struct module *load_module(void _
63829
63830 flush_module_icache(mod);
63831
63832 - /* Now copy in args */
63833 - mod->args = strndup_user(uargs, ~0UL >> 1);
63834 - if (IS_ERR(mod->args)) {
63835 - err = PTR_ERR(mod->args);
63836 - goto free_arch_cleanup;
63837 - }
63838 -
63839 /* Mark state as coming so strong_try_module_get() ignores us. */
63840 mod->state = MODULE_STATE_COMING;
63841
63842 @@ -2854,11 +2971,10 @@ static struct module *load_module(void _
63843 unlock:
63844 mutex_unlock(&module_mutex);
63845 synchronize_sched();
63846 - kfree(mod->args);
63847 - free_arch_cleanup:
63848 module_arch_cleanup(mod);
63849 free_modinfo:
63850 free_modinfo(mod);
63851 + kfree(mod->args);
63852 free_unload:
63853 module_unload_free(mod);
63854 free_module:
63855 @@ -2899,16 +3015,16 @@ SYSCALL_DEFINE3(init_module, void __user
63856 MODULE_STATE_COMING, mod);
63857
63858 /* Set RO and NX regions for core */
63859 - set_section_ro_nx(mod->module_core,
63860 - mod->core_text_size,
63861 - mod->core_ro_size,
63862 - mod->core_size);
63863 + set_section_ro_nx(mod->module_core_rx,
63864 + mod->core_size_rx,
63865 + mod->core_size_rx,
63866 + mod->core_size_rx);
63867
63868 /* Set RO and NX regions for init */
63869 - set_section_ro_nx(mod->module_init,
63870 - mod->init_text_size,
63871 - mod->init_ro_size,
63872 - mod->init_size);
63873 + set_section_ro_nx(mod->module_init_rx,
63874 + mod->init_size_rx,
63875 + mod->init_size_rx,
63876 + mod->init_size_rx);
63877
63878 do_mod_ctors(mod);
63879 /* Start the module */
63880 @@ -2954,11 +3070,12 @@ SYSCALL_DEFINE3(init_module, void __user
63881 mod->strtab = mod->core_strtab;
63882 #endif
63883 unset_module_init_ro_nx(mod);
63884 - module_free(mod, mod->module_init);
63885 - mod->module_init = NULL;
63886 - mod->init_size = 0;
63887 - mod->init_ro_size = 0;
63888 - mod->init_text_size = 0;
63889 + module_free(mod, mod->module_init_rw);
63890 + module_free_exec(mod, mod->module_init_rx);
63891 + mod->module_init_rw = NULL;
63892 + mod->module_init_rx = NULL;
63893 + mod->init_size_rw = 0;
63894 + mod->init_size_rx = 0;
63895 mutex_unlock(&module_mutex);
63896
63897 return 0;
63898 @@ -2989,10 +3106,16 @@ static const char *get_ksymbol(struct mo
63899 unsigned long nextval;
63900
63901 /* At worse, next value is at end of module */
63902 - if (within_module_init(addr, mod))
63903 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
63904 + if (within_module_init_rx(addr, mod))
63905 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
63906 + else if (within_module_init_rw(addr, mod))
63907 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
63908 + else if (within_module_core_rx(addr, mod))
63909 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
63910 + else if (within_module_core_rw(addr, mod))
63911 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
63912 else
63913 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
63914 + return NULL;
63915
63916 /* Scan for closest preceding symbol, and next symbol. (ELF
63917 starts real symbols at 1). */
63918 @@ -3238,7 +3361,7 @@ static int m_show(struct seq_file *m, vo
63919 char buf[8];
63920
63921 seq_printf(m, "%s %u",
63922 - mod->name, mod->init_size + mod->core_size);
63923 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
63924 print_unload_info(m, mod);
63925
63926 /* Informative for users. */
63927 @@ -3247,7 +3370,7 @@ static int m_show(struct seq_file *m, vo
63928 mod->state == MODULE_STATE_COMING ? "Loading":
63929 "Live");
63930 /* Used by oprofile and other similar tools. */
63931 - seq_printf(m, " 0x%pK", mod->module_core);
63932 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
63933
63934 /* Taints info */
63935 if (mod->taints)
63936 @@ -3283,7 +3406,17 @@ static const struct file_operations proc
63937
63938 static int __init proc_modules_init(void)
63939 {
63940 +#ifndef CONFIG_GRKERNSEC_HIDESYM
63941 +#ifdef CONFIG_GRKERNSEC_PROC_USER
63942 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
63943 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63944 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
63945 +#else
63946 proc_create("modules", 0, NULL, &proc_modules_operations);
63947 +#endif
63948 +#else
63949 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
63950 +#endif
63951 return 0;
63952 }
63953 module_init(proc_modules_init);
63954 @@ -3342,12 +3475,12 @@ struct module *__module_address(unsigned
63955 {
63956 struct module *mod;
63957
63958 - if (addr < module_addr_min || addr > module_addr_max)
63959 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
63960 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
63961 return NULL;
63962
63963 list_for_each_entry_rcu(mod, &modules, list)
63964 - if (within_module_core(addr, mod)
63965 - || within_module_init(addr, mod))
63966 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
63967 return mod;
63968 return NULL;
63969 }
63970 @@ -3381,11 +3514,20 @@ bool is_module_text_address(unsigned lon
63971 */
63972 struct module *__module_text_address(unsigned long addr)
63973 {
63974 - struct module *mod = __module_address(addr);
63975 + struct module *mod;
63976 +
63977 +#ifdef CONFIG_X86_32
63978 + addr = ktla_ktva(addr);
63979 +#endif
63980 +
63981 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
63982 + return NULL;
63983 +
63984 + mod = __module_address(addr);
63985 +
63986 if (mod) {
63987 /* Make sure it's within the text section. */
63988 - if (!within(addr, mod->module_init, mod->init_text_size)
63989 - && !within(addr, mod->module_core, mod->core_text_size))
63990 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
63991 mod = NULL;
63992 }
63993 return mod;
63994 diff -urNp linux-3.0.7/kernel/mutex.c linux-3.0.7/kernel/mutex.c
63995 --- linux-3.0.7/kernel/mutex.c 2011-07-21 22:17:23.000000000 -0400
63996 +++ linux-3.0.7/kernel/mutex.c 2011-08-23 21:47:56.000000000 -0400
63997 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock,
63998 spin_lock_mutex(&lock->wait_lock, flags);
63999
64000 debug_mutex_lock_common(lock, &waiter);
64001 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
64002 + debug_mutex_add_waiter(lock, &waiter, task);
64003
64004 /* add waiting tasks to the end of the waitqueue (FIFO): */
64005 list_add_tail(&waiter.list, &lock->wait_list);
64006 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock,
64007 * TASK_UNINTERRUPTIBLE case.)
64008 */
64009 if (unlikely(signal_pending_state(state, task))) {
64010 - mutex_remove_waiter(lock, &waiter,
64011 - task_thread_info(task));
64012 + mutex_remove_waiter(lock, &waiter, task);
64013 mutex_release(&lock->dep_map, 1, ip);
64014 spin_unlock_mutex(&lock->wait_lock, flags);
64015
64016 @@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock,
64017 done:
64018 lock_acquired(&lock->dep_map, ip);
64019 /* got the lock - rejoice! */
64020 - mutex_remove_waiter(lock, &waiter, current_thread_info());
64021 + mutex_remove_waiter(lock, &waiter, task);
64022 mutex_set_owner(lock);
64023
64024 /* set it to 0 if there are no waiters left: */
64025 diff -urNp linux-3.0.7/kernel/mutex-debug.c linux-3.0.7/kernel/mutex-debug.c
64026 --- linux-3.0.7/kernel/mutex-debug.c 2011-07-21 22:17:23.000000000 -0400
64027 +++ linux-3.0.7/kernel/mutex-debug.c 2011-08-23 21:47:56.000000000 -0400
64028 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
64029 }
64030
64031 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64032 - struct thread_info *ti)
64033 + struct task_struct *task)
64034 {
64035 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
64036
64037 /* Mark the current thread as blocked on the lock: */
64038 - ti->task->blocked_on = waiter;
64039 + task->blocked_on = waiter;
64040 }
64041
64042 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64043 - struct thread_info *ti)
64044 + struct task_struct *task)
64045 {
64046 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64047 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
64048 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
64049 - ti->task->blocked_on = NULL;
64050 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
64051 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
64052 + task->blocked_on = NULL;
64053
64054 list_del_init(&waiter->list);
64055 waiter->task = NULL;
64056 diff -urNp linux-3.0.7/kernel/mutex-debug.h linux-3.0.7/kernel/mutex-debug.h
64057 --- linux-3.0.7/kernel/mutex-debug.h 2011-07-21 22:17:23.000000000 -0400
64058 +++ linux-3.0.7/kernel/mutex-debug.h 2011-08-23 21:47:56.000000000 -0400
64059 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(stru
64060 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
64061 extern void debug_mutex_add_waiter(struct mutex *lock,
64062 struct mutex_waiter *waiter,
64063 - struct thread_info *ti);
64064 + struct task_struct *task);
64065 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64066 - struct thread_info *ti);
64067 + struct task_struct *task);
64068 extern void debug_mutex_unlock(struct mutex *lock);
64069 extern void debug_mutex_init(struct mutex *lock, const char *name,
64070 struct lock_class_key *key);
64071 diff -urNp linux-3.0.7/kernel/padata.c linux-3.0.7/kernel/padata.c
64072 --- linux-3.0.7/kernel/padata.c 2011-07-21 22:17:23.000000000 -0400
64073 +++ linux-3.0.7/kernel/padata.c 2011-08-23 21:47:56.000000000 -0400
64074 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
64075 padata->pd = pd;
64076 padata->cb_cpu = cb_cpu;
64077
64078 - if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
64079 - atomic_set(&pd->seq_nr, -1);
64080 + if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
64081 + atomic_set_unchecked(&pd->seq_nr, -1);
64082
64083 - padata->seq_nr = atomic_inc_return(&pd->seq_nr);
64084 + padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
64085
64086 target_cpu = padata_cpu_hash(padata);
64087 queue = per_cpu_ptr(pd->pqueue, target_cpu);
64088 @@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
64089 padata_init_pqueues(pd);
64090 padata_init_squeues(pd);
64091 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
64092 - atomic_set(&pd->seq_nr, -1);
64093 + atomic_set_unchecked(&pd->seq_nr, -1);
64094 atomic_set(&pd->reorder_objects, 0);
64095 atomic_set(&pd->refcnt, 0);
64096 pd->pinst = pinst;
64097 diff -urNp linux-3.0.7/kernel/panic.c linux-3.0.7/kernel/panic.c
64098 --- linux-3.0.7/kernel/panic.c 2011-07-21 22:17:23.000000000 -0400
64099 +++ linux-3.0.7/kernel/panic.c 2011-08-23 21:48:14.000000000 -0400
64100 @@ -369,7 +369,7 @@ static void warn_slowpath_common(const c
64101 const char *board;
64102
64103 printk(KERN_WARNING "------------[ cut here ]------------\n");
64104 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
64105 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
64106 board = dmi_get_system_info(DMI_PRODUCT_NAME);
64107 if (board)
64108 printk(KERN_WARNING "Hardware name: %s\n", board);
64109 @@ -424,7 +424,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
64110 */
64111 void __stack_chk_fail(void)
64112 {
64113 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
64114 + dump_stack();
64115 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
64116 __builtin_return_address(0));
64117 }
64118 EXPORT_SYMBOL(__stack_chk_fail);
64119 diff -urNp linux-3.0.7/kernel/pid.c linux-3.0.7/kernel/pid.c
64120 --- linux-3.0.7/kernel/pid.c 2011-07-21 22:17:23.000000000 -0400
64121 +++ linux-3.0.7/kernel/pid.c 2011-08-23 21:48:14.000000000 -0400
64122 @@ -33,6 +33,7 @@
64123 #include <linux/rculist.h>
64124 #include <linux/bootmem.h>
64125 #include <linux/hash.h>
64126 +#include <linux/security.h>
64127 #include <linux/pid_namespace.h>
64128 #include <linux/init_task.h>
64129 #include <linux/syscalls.h>
64130 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
64131
64132 int pid_max = PID_MAX_DEFAULT;
64133
64134 -#define RESERVED_PIDS 300
64135 +#define RESERVED_PIDS 500
64136
64137 int pid_max_min = RESERVED_PIDS + 1;
64138 int pid_max_max = PID_MAX_LIMIT;
64139 @@ -419,8 +420,15 @@ EXPORT_SYMBOL(pid_task);
64140 */
64141 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
64142 {
64143 + struct task_struct *task;
64144 +
64145 rcu_lockdep_assert(rcu_read_lock_held());
64146 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64147 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64148 +
64149 + if (gr_pid_is_chrooted(task))
64150 + return NULL;
64151 +
64152 + return task;
64153 }
64154
64155 struct task_struct *find_task_by_vpid(pid_t vnr)
64156 @@ -428,6 +436,12 @@ struct task_struct *find_task_by_vpid(pi
64157 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
64158 }
64159
64160 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
64161 +{
64162 + rcu_lockdep_assert(rcu_read_lock_held());
64163 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
64164 +}
64165 +
64166 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
64167 {
64168 struct pid *pid;
64169 diff -urNp linux-3.0.7/kernel/posix-cpu-timers.c linux-3.0.7/kernel/posix-cpu-timers.c
64170 --- linux-3.0.7/kernel/posix-cpu-timers.c 2011-10-17 23:17:09.000000000 -0400
64171 +++ linux-3.0.7/kernel/posix-cpu-timers.c 2011-10-17 23:17:19.000000000 -0400
64172 @@ -6,6 +6,7 @@
64173 #include <linux/posix-timers.h>
64174 #include <linux/errno.h>
64175 #include <linux/math64.h>
64176 +#include <linux/security.h>
64177 #include <asm/uaccess.h>
64178 #include <linux/kernel_stat.h>
64179 #include <trace/events/timer.h>
64180 @@ -1605,14 +1606,14 @@ struct k_clock clock_posix_cpu = {
64181
64182 static __init int init_posix_cpu_timers(void)
64183 {
64184 - struct k_clock process = {
64185 + static struct k_clock process = {
64186 .clock_getres = process_cpu_clock_getres,
64187 .clock_get = process_cpu_clock_get,
64188 .timer_create = process_cpu_timer_create,
64189 .nsleep = process_cpu_nsleep,
64190 .nsleep_restart = process_cpu_nsleep_restart,
64191 };
64192 - struct k_clock thread = {
64193 + static struct k_clock thread = {
64194 .clock_getres = thread_cpu_clock_getres,
64195 .clock_get = thread_cpu_clock_get,
64196 .timer_create = thread_cpu_timer_create,
64197 diff -urNp linux-3.0.7/kernel/posix-timers.c linux-3.0.7/kernel/posix-timers.c
64198 --- linux-3.0.7/kernel/posix-timers.c 2011-07-21 22:17:23.000000000 -0400
64199 +++ linux-3.0.7/kernel/posix-timers.c 2011-08-23 21:48:14.000000000 -0400
64200 @@ -43,6 +43,7 @@
64201 #include <linux/idr.h>
64202 #include <linux/posix-clock.h>
64203 #include <linux/posix-timers.h>
64204 +#include <linux/grsecurity.h>
64205 #include <linux/syscalls.h>
64206 #include <linux/wait.h>
64207 #include <linux/workqueue.h>
64208 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
64209 * which we beg off on and pass to do_sys_settimeofday().
64210 */
64211
64212 -static struct k_clock posix_clocks[MAX_CLOCKS];
64213 +static struct k_clock *posix_clocks[MAX_CLOCKS];
64214
64215 /*
64216 * These ones are defined below.
64217 @@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
64218 */
64219 static __init int init_posix_timers(void)
64220 {
64221 - struct k_clock clock_realtime = {
64222 + static struct k_clock clock_realtime = {
64223 .clock_getres = hrtimer_get_res,
64224 .clock_get = posix_clock_realtime_get,
64225 .clock_set = posix_clock_realtime_set,
64226 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void
64227 .timer_get = common_timer_get,
64228 .timer_del = common_timer_del,
64229 };
64230 - struct k_clock clock_monotonic = {
64231 + static struct k_clock clock_monotonic = {
64232 .clock_getres = hrtimer_get_res,
64233 .clock_get = posix_ktime_get_ts,
64234 .nsleep = common_nsleep,
64235 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void
64236 .timer_get = common_timer_get,
64237 .timer_del = common_timer_del,
64238 };
64239 - struct k_clock clock_monotonic_raw = {
64240 + static struct k_clock clock_monotonic_raw = {
64241 .clock_getres = hrtimer_get_res,
64242 .clock_get = posix_get_monotonic_raw,
64243 };
64244 - struct k_clock clock_realtime_coarse = {
64245 + static struct k_clock clock_realtime_coarse = {
64246 .clock_getres = posix_get_coarse_res,
64247 .clock_get = posix_get_realtime_coarse,
64248 };
64249 - struct k_clock clock_monotonic_coarse = {
64250 + static struct k_clock clock_monotonic_coarse = {
64251 .clock_getres = posix_get_coarse_res,
64252 .clock_get = posix_get_monotonic_coarse,
64253 };
64254 - struct k_clock clock_boottime = {
64255 + static struct k_clock clock_boottime = {
64256 .clock_getres = hrtimer_get_res,
64257 .clock_get = posix_get_boottime,
64258 .nsleep = common_nsleep,
64259 @@ -272,6 +273,8 @@ static __init int init_posix_timers(void
64260 .timer_del = common_timer_del,
64261 };
64262
64263 + pax_track_stack();
64264 +
64265 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
64266 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
64267 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
64268 @@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
64269 return;
64270 }
64271
64272 - posix_clocks[clock_id] = *new_clock;
64273 + posix_clocks[clock_id] = new_clock;
64274 }
64275 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
64276
64277 @@ -519,9 +522,9 @@ static struct k_clock *clockid_to_kclock
64278 return (id & CLOCKFD_MASK) == CLOCKFD ?
64279 &clock_posix_dynamic : &clock_posix_cpu;
64280
64281 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
64282 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
64283 return NULL;
64284 - return &posix_clocks[id];
64285 + return posix_clocks[id];
64286 }
64287
64288 static int common_timer_create(struct k_itimer *new_timer)
64289 @@ -959,6 +962,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
64290 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
64291 return -EFAULT;
64292
64293 + /* only the CLOCK_REALTIME clock can be set, all other clocks
64294 + have their clock_set fptr set to a nosettime dummy function
64295 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
64296 + call common_clock_set, which calls do_sys_settimeofday, which
64297 + we hook
64298 + */
64299 +
64300 return kc->clock_set(which_clock, &new_tp);
64301 }
64302
64303 diff -urNp linux-3.0.7/kernel/power/poweroff.c linux-3.0.7/kernel/power/poweroff.c
64304 --- linux-3.0.7/kernel/power/poweroff.c 2011-07-21 22:17:23.000000000 -0400
64305 +++ linux-3.0.7/kernel/power/poweroff.c 2011-08-23 21:47:56.000000000 -0400
64306 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
64307 .enable_mask = SYSRQ_ENABLE_BOOT,
64308 };
64309
64310 -static int pm_sysrq_init(void)
64311 +static int __init pm_sysrq_init(void)
64312 {
64313 register_sysrq_key('o', &sysrq_poweroff_op);
64314 return 0;
64315 diff -urNp linux-3.0.7/kernel/power/process.c linux-3.0.7/kernel/power/process.c
64316 --- linux-3.0.7/kernel/power/process.c 2011-07-21 22:17:23.000000000 -0400
64317 +++ linux-3.0.7/kernel/power/process.c 2011-08-23 21:47:56.000000000 -0400
64318 @@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
64319 u64 elapsed_csecs64;
64320 unsigned int elapsed_csecs;
64321 bool wakeup = false;
64322 + bool timedout = false;
64323
64324 do_gettimeofday(&start);
64325
64326 @@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
64327
64328 while (true) {
64329 todo = 0;
64330 + if (time_after(jiffies, end_time))
64331 + timedout = true;
64332 read_lock(&tasklist_lock);
64333 do_each_thread(g, p) {
64334 if (frozen(p) || !freezable(p))
64335 @@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
64336 * try_to_stop() after schedule() in ptrace/signal
64337 * stop sees TIF_FREEZE.
64338 */
64339 - if (!task_is_stopped_or_traced(p) &&
64340 - !freezer_should_skip(p))
64341 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
64342 todo++;
64343 + if (timedout) {
64344 + printk(KERN_ERR "Task refusing to freeze:\n");
64345 + sched_show_task(p);
64346 + }
64347 + }
64348 } while_each_thread(g, p);
64349 read_unlock(&tasklist_lock);
64350
64351 @@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
64352 todo += wq_busy;
64353 }
64354
64355 - if (!todo || time_after(jiffies, end_time))
64356 + if (!todo || timedout)
64357 break;
64358
64359 if (pm_wakeup_pending()) {
64360 diff -urNp linux-3.0.7/kernel/printk.c linux-3.0.7/kernel/printk.c
64361 --- linux-3.0.7/kernel/printk.c 2011-10-16 21:54:54.000000000 -0400
64362 +++ linux-3.0.7/kernel/printk.c 2011-10-16 21:55:28.000000000 -0400
64363 @@ -313,12 +313,17 @@ static int check_syslog_permissions(int
64364 if (from_file && type != SYSLOG_ACTION_OPEN)
64365 return 0;
64366
64367 +#ifdef CONFIG_GRKERNSEC_DMESG
64368 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
64369 + return -EPERM;
64370 +#endif
64371 +
64372 if (syslog_action_restricted(type)) {
64373 if (capable(CAP_SYSLOG))
64374 return 0;
64375 /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
64376 if (capable(CAP_SYS_ADMIN)) {
64377 - WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
64378 + printk_once(KERN_WARNING "Attempt to access syslog with CAP_SYS_ADMIN "
64379 "but no CAP_SYSLOG (deprecated).\n");
64380 return 0;
64381 }
64382 diff -urNp linux-3.0.7/kernel/profile.c linux-3.0.7/kernel/profile.c
64383 --- linux-3.0.7/kernel/profile.c 2011-07-21 22:17:23.000000000 -0400
64384 +++ linux-3.0.7/kernel/profile.c 2011-08-23 21:47:56.000000000 -0400
64385 @@ -39,7 +39,7 @@ struct profile_hit {
64386 /* Oprofile timer tick hook */
64387 static int (*timer_hook)(struct pt_regs *) __read_mostly;
64388
64389 -static atomic_t *prof_buffer;
64390 +static atomic_unchecked_t *prof_buffer;
64391 static unsigned long prof_len, prof_shift;
64392
64393 int prof_on __read_mostly;
64394 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
64395 hits[i].pc = 0;
64396 continue;
64397 }
64398 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64399 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64400 hits[i].hits = hits[i].pc = 0;
64401 }
64402 }
64403 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, vo
64404 * Add the current hit(s) and flush the write-queue out
64405 * to the global buffer:
64406 */
64407 - atomic_add(nr_hits, &prof_buffer[pc]);
64408 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
64409 for (i = 0; i < NR_PROFILE_HIT; ++i) {
64410 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64411 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64412 hits[i].pc = hits[i].hits = 0;
64413 }
64414 out:
64415 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, vo
64416 {
64417 unsigned long pc;
64418 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
64419 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64420 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64421 }
64422 #endif /* !CONFIG_SMP */
64423
64424 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
64425 return -EFAULT;
64426 buf++; p++; count--; read++;
64427 }
64428 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
64429 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
64430 if (copy_to_user(buf, (void *)pnt, count))
64431 return -EFAULT;
64432 read += count;
64433 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
64434 }
64435 #endif
64436 profile_discard_flip_buffers();
64437 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
64438 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
64439 return count;
64440 }
64441
64442 diff -urNp linux-3.0.7/kernel/ptrace.c linux-3.0.7/kernel/ptrace.c
64443 --- linux-3.0.7/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
64444 +++ linux-3.0.7/kernel/ptrace.c 2011-08-23 21:48:14.000000000 -0400
64445 @@ -132,7 +132,8 @@ int ptrace_check_attach(struct task_stru
64446 return ret;
64447 }
64448
64449 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
64450 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
64451 + unsigned int log)
64452 {
64453 const struct cred *cred = current_cred(), *tcred;
64454
64455 @@ -158,7 +159,8 @@ int __ptrace_may_access(struct task_stru
64456 cred->gid == tcred->sgid &&
64457 cred->gid == tcred->gid))
64458 goto ok;
64459 - if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
64460 + if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
64461 + (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
64462 goto ok;
64463 rcu_read_unlock();
64464 return -EPERM;
64465 @@ -167,7 +169,9 @@ ok:
64466 smp_rmb();
64467 if (task->mm)
64468 dumpable = get_dumpable(task->mm);
64469 - if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
64470 + if (!dumpable &&
64471 + ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
64472 + (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
64473 return -EPERM;
64474
64475 return security_ptrace_access_check(task, mode);
64476 @@ -177,7 +181,16 @@ bool ptrace_may_access(struct task_struc
64477 {
64478 int err;
64479 task_lock(task);
64480 - err = __ptrace_may_access(task, mode);
64481 + err = __ptrace_may_access(task, mode, 0);
64482 + task_unlock(task);
64483 + return !err;
64484 +}
64485 +
64486 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
64487 +{
64488 + int err;
64489 + task_lock(task);
64490 + err = __ptrace_may_access(task, mode, 1);
64491 task_unlock(task);
64492 return !err;
64493 }
64494 @@ -205,7 +218,7 @@ static int ptrace_attach(struct task_str
64495 goto out;
64496
64497 task_lock(task);
64498 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
64499 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
64500 task_unlock(task);
64501 if (retval)
64502 goto unlock_creds;
64503 @@ -218,7 +231,7 @@ static int ptrace_attach(struct task_str
64504 goto unlock_tasklist;
64505
64506 task->ptrace = PT_PTRACED;
64507 - if (task_ns_capable(task, CAP_SYS_PTRACE))
64508 + if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
64509 task->ptrace |= PT_PTRACE_CAP;
64510
64511 __ptrace_link(task, current);
64512 @@ -406,6 +419,8 @@ int ptrace_readdata(struct task_struct *
64513 {
64514 int copied = 0;
64515
64516 + pax_track_stack();
64517 +
64518 while (len > 0) {
64519 char buf[128];
64520 int this_len, retval;
64521 @@ -417,7 +432,7 @@ int ptrace_readdata(struct task_struct *
64522 break;
64523 return -EIO;
64524 }
64525 - if (copy_to_user(dst, buf, retval))
64526 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
64527 return -EFAULT;
64528 copied += retval;
64529 src += retval;
64530 @@ -431,6 +446,8 @@ int ptrace_writedata(struct task_struct
64531 {
64532 int copied = 0;
64533
64534 + pax_track_stack();
64535 +
64536 while (len > 0) {
64537 char buf[128];
64538 int this_len, retval;
64539 @@ -613,9 +630,11 @@ int ptrace_request(struct task_struct *c
64540 {
64541 int ret = -EIO;
64542 siginfo_t siginfo;
64543 - void __user *datavp = (void __user *) data;
64544 + void __user *datavp = (__force void __user *) data;
64545 unsigned long __user *datalp = datavp;
64546
64547 + pax_track_stack();
64548 +
64549 switch (request) {
64550 case PTRACE_PEEKTEXT:
64551 case PTRACE_PEEKDATA:
64552 @@ -761,14 +780,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
64553 goto out;
64554 }
64555
64556 + if (gr_handle_ptrace(child, request)) {
64557 + ret = -EPERM;
64558 + goto out_put_task_struct;
64559 + }
64560 +
64561 if (request == PTRACE_ATTACH) {
64562 ret = ptrace_attach(child);
64563 /*
64564 * Some architectures need to do book-keeping after
64565 * a ptrace attach.
64566 */
64567 - if (!ret)
64568 + if (!ret) {
64569 arch_ptrace_attach(child);
64570 + gr_audit_ptrace(child);
64571 + }
64572 goto out_put_task_struct;
64573 }
64574
64575 @@ -793,7 +819,7 @@ int generic_ptrace_peekdata(struct task_
64576 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
64577 if (copied != sizeof(tmp))
64578 return -EIO;
64579 - return put_user(tmp, (unsigned long __user *)data);
64580 + return put_user(tmp, (__force unsigned long __user *)data);
64581 }
64582
64583 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
64584 @@ -816,6 +842,8 @@ int compat_ptrace_request(struct task_st
64585 siginfo_t siginfo;
64586 int ret;
64587
64588 + pax_track_stack();
64589 +
64590 switch (request) {
64591 case PTRACE_PEEKTEXT:
64592 case PTRACE_PEEKDATA:
64593 @@ -903,14 +931,21 @@ asmlinkage long compat_sys_ptrace(compat
64594 goto out;
64595 }
64596
64597 + if (gr_handle_ptrace(child, request)) {
64598 + ret = -EPERM;
64599 + goto out_put_task_struct;
64600 + }
64601 +
64602 if (request == PTRACE_ATTACH) {
64603 ret = ptrace_attach(child);
64604 /*
64605 * Some architectures need to do book-keeping after
64606 * a ptrace attach.
64607 */
64608 - if (!ret)
64609 + if (!ret) {
64610 arch_ptrace_attach(child);
64611 + gr_audit_ptrace(child);
64612 + }
64613 goto out_put_task_struct;
64614 }
64615
64616 diff -urNp linux-3.0.7/kernel/rcutorture.c linux-3.0.7/kernel/rcutorture.c
64617 --- linux-3.0.7/kernel/rcutorture.c 2011-07-21 22:17:23.000000000 -0400
64618 +++ linux-3.0.7/kernel/rcutorture.c 2011-08-23 21:47:56.000000000 -0400
64619 @@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
64620 { 0 };
64621 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
64622 { 0 };
64623 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
64624 -static atomic_t n_rcu_torture_alloc;
64625 -static atomic_t n_rcu_torture_alloc_fail;
64626 -static atomic_t n_rcu_torture_free;
64627 -static atomic_t n_rcu_torture_mberror;
64628 -static atomic_t n_rcu_torture_error;
64629 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
64630 +static atomic_unchecked_t n_rcu_torture_alloc;
64631 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
64632 +static atomic_unchecked_t n_rcu_torture_free;
64633 +static atomic_unchecked_t n_rcu_torture_mberror;
64634 +static atomic_unchecked_t n_rcu_torture_error;
64635 static long n_rcu_torture_boost_ktrerror;
64636 static long n_rcu_torture_boost_rterror;
64637 static long n_rcu_torture_boost_failure;
64638 @@ -223,11 +223,11 @@ rcu_torture_alloc(void)
64639
64640 spin_lock_bh(&rcu_torture_lock);
64641 if (list_empty(&rcu_torture_freelist)) {
64642 - atomic_inc(&n_rcu_torture_alloc_fail);
64643 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
64644 spin_unlock_bh(&rcu_torture_lock);
64645 return NULL;
64646 }
64647 - atomic_inc(&n_rcu_torture_alloc);
64648 + atomic_inc_unchecked(&n_rcu_torture_alloc);
64649 p = rcu_torture_freelist.next;
64650 list_del_init(p);
64651 spin_unlock_bh(&rcu_torture_lock);
64652 @@ -240,7 +240,7 @@ rcu_torture_alloc(void)
64653 static void
64654 rcu_torture_free(struct rcu_torture *p)
64655 {
64656 - atomic_inc(&n_rcu_torture_free);
64657 + atomic_inc_unchecked(&n_rcu_torture_free);
64658 spin_lock_bh(&rcu_torture_lock);
64659 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
64660 spin_unlock_bh(&rcu_torture_lock);
64661 @@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
64662 i = rp->rtort_pipe_count;
64663 if (i > RCU_TORTURE_PIPE_LEN)
64664 i = RCU_TORTURE_PIPE_LEN;
64665 - atomic_inc(&rcu_torture_wcount[i]);
64666 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
64667 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
64668 rp->rtort_mbtest = 0;
64669 rcu_torture_free(rp);
64670 @@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_fr
64671 i = rp->rtort_pipe_count;
64672 if (i > RCU_TORTURE_PIPE_LEN)
64673 i = RCU_TORTURE_PIPE_LEN;
64674 - atomic_inc(&rcu_torture_wcount[i]);
64675 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
64676 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
64677 rp->rtort_mbtest = 0;
64678 list_del(&rp->rtort_free);
64679 @@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
64680 i = old_rp->rtort_pipe_count;
64681 if (i > RCU_TORTURE_PIPE_LEN)
64682 i = RCU_TORTURE_PIPE_LEN;
64683 - atomic_inc(&rcu_torture_wcount[i]);
64684 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
64685 old_rp->rtort_pipe_count++;
64686 cur_ops->deferred_free(old_rp);
64687 }
64688 @@ -951,7 +951,7 @@ static void rcu_torture_timer(unsigned l
64689 return;
64690 }
64691 if (p->rtort_mbtest == 0)
64692 - atomic_inc(&n_rcu_torture_mberror);
64693 + atomic_inc_unchecked(&n_rcu_torture_mberror);
64694 spin_lock(&rand_lock);
64695 cur_ops->read_delay(&rand);
64696 n_rcu_torture_timers++;
64697 @@ -1013,7 +1013,7 @@ rcu_torture_reader(void *arg)
64698 continue;
64699 }
64700 if (p->rtort_mbtest == 0)
64701 - atomic_inc(&n_rcu_torture_mberror);
64702 + atomic_inc_unchecked(&n_rcu_torture_mberror);
64703 cur_ops->read_delay(&rand);
64704 preempt_disable();
64705 pipe_count = p->rtort_pipe_count;
64706 @@ -1072,16 +1072,16 @@ rcu_torture_printk(char *page)
64707 rcu_torture_current,
64708 rcu_torture_current_version,
64709 list_empty(&rcu_torture_freelist),
64710 - atomic_read(&n_rcu_torture_alloc),
64711 - atomic_read(&n_rcu_torture_alloc_fail),
64712 - atomic_read(&n_rcu_torture_free),
64713 - atomic_read(&n_rcu_torture_mberror),
64714 + atomic_read_unchecked(&n_rcu_torture_alloc),
64715 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
64716 + atomic_read_unchecked(&n_rcu_torture_free),
64717 + atomic_read_unchecked(&n_rcu_torture_mberror),
64718 n_rcu_torture_boost_ktrerror,
64719 n_rcu_torture_boost_rterror,
64720 n_rcu_torture_boost_failure,
64721 n_rcu_torture_boosts,
64722 n_rcu_torture_timers);
64723 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
64724 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
64725 n_rcu_torture_boost_ktrerror != 0 ||
64726 n_rcu_torture_boost_rterror != 0 ||
64727 n_rcu_torture_boost_failure != 0)
64728 @@ -1089,7 +1089,7 @@ rcu_torture_printk(char *page)
64729 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
64730 if (i > 1) {
64731 cnt += sprintf(&page[cnt], "!!! ");
64732 - atomic_inc(&n_rcu_torture_error);
64733 + atomic_inc_unchecked(&n_rcu_torture_error);
64734 WARN_ON_ONCE(1);
64735 }
64736 cnt += sprintf(&page[cnt], "Reader Pipe: ");
64737 @@ -1103,7 +1103,7 @@ rcu_torture_printk(char *page)
64738 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
64739 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
64740 cnt += sprintf(&page[cnt], " %d",
64741 - atomic_read(&rcu_torture_wcount[i]));
64742 + atomic_read_unchecked(&rcu_torture_wcount[i]));
64743 }
64744 cnt += sprintf(&page[cnt], "\n");
64745 if (cur_ops->stats)
64746 @@ -1412,7 +1412,7 @@ rcu_torture_cleanup(void)
64747
64748 if (cur_ops->cleanup)
64749 cur_ops->cleanup();
64750 - if (atomic_read(&n_rcu_torture_error))
64751 + if (atomic_read_unchecked(&n_rcu_torture_error))
64752 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
64753 else
64754 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
64755 @@ -1476,17 +1476,17 @@ rcu_torture_init(void)
64756
64757 rcu_torture_current = NULL;
64758 rcu_torture_current_version = 0;
64759 - atomic_set(&n_rcu_torture_alloc, 0);
64760 - atomic_set(&n_rcu_torture_alloc_fail, 0);
64761 - atomic_set(&n_rcu_torture_free, 0);
64762 - atomic_set(&n_rcu_torture_mberror, 0);
64763 - atomic_set(&n_rcu_torture_error, 0);
64764 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
64765 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
64766 + atomic_set_unchecked(&n_rcu_torture_free, 0);
64767 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
64768 + atomic_set_unchecked(&n_rcu_torture_error, 0);
64769 n_rcu_torture_boost_ktrerror = 0;
64770 n_rcu_torture_boost_rterror = 0;
64771 n_rcu_torture_boost_failure = 0;
64772 n_rcu_torture_boosts = 0;
64773 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
64774 - atomic_set(&rcu_torture_wcount[i], 0);
64775 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
64776 for_each_possible_cpu(cpu) {
64777 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
64778 per_cpu(rcu_torture_count, cpu)[i] = 0;
64779 diff -urNp linux-3.0.7/kernel/rcutree.c linux-3.0.7/kernel/rcutree.c
64780 --- linux-3.0.7/kernel/rcutree.c 2011-07-21 22:17:23.000000000 -0400
64781 +++ linux-3.0.7/kernel/rcutree.c 2011-09-14 09:08:05.000000000 -0400
64782 @@ -356,9 +356,9 @@ void rcu_enter_nohz(void)
64783 }
64784 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
64785 smp_mb__before_atomic_inc(); /* See above. */
64786 - atomic_inc(&rdtp->dynticks);
64787 + atomic_inc_unchecked(&rdtp->dynticks);
64788 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
64789 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
64790 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
64791 local_irq_restore(flags);
64792
64793 /* If the interrupt queued a callback, get out of dyntick mode. */
64794 @@ -387,10 +387,10 @@ void rcu_exit_nohz(void)
64795 return;
64796 }
64797 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
64798 - atomic_inc(&rdtp->dynticks);
64799 + atomic_inc_unchecked(&rdtp->dynticks);
64800 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
64801 smp_mb__after_atomic_inc(); /* See above. */
64802 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
64803 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
64804 local_irq_restore(flags);
64805 }
64806
64807 @@ -406,14 +406,14 @@ void rcu_nmi_enter(void)
64808 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
64809
64810 if (rdtp->dynticks_nmi_nesting == 0 &&
64811 - (atomic_read(&rdtp->dynticks) & 0x1))
64812 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
64813 return;
64814 rdtp->dynticks_nmi_nesting++;
64815 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
64816 - atomic_inc(&rdtp->dynticks);
64817 + atomic_inc_unchecked(&rdtp->dynticks);
64818 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
64819 smp_mb__after_atomic_inc(); /* See above. */
64820 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
64821 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
64822 }
64823
64824 /**
64825 @@ -432,9 +432,9 @@ void rcu_nmi_exit(void)
64826 return;
64827 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
64828 smp_mb__before_atomic_inc(); /* See above. */
64829 - atomic_inc(&rdtp->dynticks);
64830 + atomic_inc_unchecked(&rdtp->dynticks);
64831 smp_mb__after_atomic_inc(); /* Force delay to next write. */
64832 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
64833 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
64834 }
64835
64836 /**
64837 @@ -469,7 +469,7 @@ void rcu_irq_exit(void)
64838 */
64839 static int dyntick_save_progress_counter(struct rcu_data *rdp)
64840 {
64841 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
64842 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
64843 return 0;
64844 }
64845
64846 @@ -484,7 +484,7 @@ static int rcu_implicit_dynticks_qs(stru
64847 unsigned long curr;
64848 unsigned long snap;
64849
64850 - curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
64851 + curr = (unsigned long)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
64852 snap = (unsigned long)rdp->dynticks_snap;
64853
64854 /*
64855 @@ -1470,7 +1470,7 @@ __rcu_process_callbacks(struct rcu_state
64856 /*
64857 * Do softirq processing for the current CPU.
64858 */
64859 -static void rcu_process_callbacks(struct softirq_action *unused)
64860 +static void rcu_process_callbacks(void)
64861 {
64862 __rcu_process_callbacks(&rcu_sched_state,
64863 &__get_cpu_var(rcu_sched_data));
64864 diff -urNp linux-3.0.7/kernel/rcutree.h linux-3.0.7/kernel/rcutree.h
64865 --- linux-3.0.7/kernel/rcutree.h 2011-07-21 22:17:23.000000000 -0400
64866 +++ linux-3.0.7/kernel/rcutree.h 2011-09-14 09:08:05.000000000 -0400
64867 @@ -86,7 +86,7 @@
64868 struct rcu_dynticks {
64869 int dynticks_nesting; /* Track irq/process nesting level. */
64870 int dynticks_nmi_nesting; /* Track NMI nesting level. */
64871 - atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
64872 + atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
64873 };
64874
64875 /* RCU's kthread states for tracing. */
64876 diff -urNp linux-3.0.7/kernel/rcutree_plugin.h linux-3.0.7/kernel/rcutree_plugin.h
64877 --- linux-3.0.7/kernel/rcutree_plugin.h 2011-07-21 22:17:23.000000000 -0400
64878 +++ linux-3.0.7/kernel/rcutree_plugin.h 2011-08-23 21:47:56.000000000 -0400
64879 @@ -822,7 +822,7 @@ void synchronize_rcu_expedited(void)
64880
64881 /* Clean up and exit. */
64882 smp_mb(); /* ensure expedited GP seen before counter increment. */
64883 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
64884 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
64885 unlock_mb_ret:
64886 mutex_unlock(&sync_rcu_preempt_exp_mutex);
64887 mb_ret:
64888 @@ -1774,8 +1774,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
64889
64890 #else /* #ifndef CONFIG_SMP */
64891
64892 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
64893 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
64894 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
64895 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
64896
64897 static int synchronize_sched_expedited_cpu_stop(void *data)
64898 {
64899 @@ -1830,7 +1830,7 @@ void synchronize_sched_expedited(void)
64900 int firstsnap, s, snap, trycount = 0;
64901
64902 /* Note that atomic_inc_return() implies full memory barrier. */
64903 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
64904 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
64905 get_online_cpus();
64906
64907 /*
64908 @@ -1851,7 +1851,7 @@ void synchronize_sched_expedited(void)
64909 }
64910
64911 /* Check to see if someone else did our work for us. */
64912 - s = atomic_read(&sync_sched_expedited_done);
64913 + s = atomic_read_unchecked(&sync_sched_expedited_done);
64914 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
64915 smp_mb(); /* ensure test happens before caller kfree */
64916 return;
64917 @@ -1866,7 +1866,7 @@ void synchronize_sched_expedited(void)
64918 * grace period works for us.
64919 */
64920 get_online_cpus();
64921 - snap = atomic_read(&sync_sched_expedited_started) - 1;
64922 + snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
64923 smp_mb(); /* ensure read is before try_stop_cpus(). */
64924 }
64925
64926 @@ -1877,12 +1877,12 @@ void synchronize_sched_expedited(void)
64927 * than we did beat us to the punch.
64928 */
64929 do {
64930 - s = atomic_read(&sync_sched_expedited_done);
64931 + s = atomic_read_unchecked(&sync_sched_expedited_done);
64932 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
64933 smp_mb(); /* ensure test happens before caller kfree */
64934 break;
64935 }
64936 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
64937 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
64938
64939 put_online_cpus();
64940 }
64941 diff -urNp linux-3.0.7/kernel/relay.c linux-3.0.7/kernel/relay.c
64942 --- linux-3.0.7/kernel/relay.c 2011-07-21 22:17:23.000000000 -0400
64943 +++ linux-3.0.7/kernel/relay.c 2011-08-23 21:48:14.000000000 -0400
64944 @@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
64945 };
64946 ssize_t ret;
64947
64948 + pax_track_stack();
64949 +
64950 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
64951 return 0;
64952 if (splice_grow_spd(pipe, &spd))
64953 diff -urNp linux-3.0.7/kernel/resource.c linux-3.0.7/kernel/resource.c
64954 --- linux-3.0.7/kernel/resource.c 2011-07-21 22:17:23.000000000 -0400
64955 +++ linux-3.0.7/kernel/resource.c 2011-08-23 21:48:14.000000000 -0400
64956 @@ -141,8 +141,18 @@ static const struct file_operations proc
64957
64958 static int __init ioresources_init(void)
64959 {
64960 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
64961 +#ifdef CONFIG_GRKERNSEC_PROC_USER
64962 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
64963 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
64964 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64965 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
64966 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
64967 +#endif
64968 +#else
64969 proc_create("ioports", 0, NULL, &proc_ioports_operations);
64970 proc_create("iomem", 0, NULL, &proc_iomem_operations);
64971 +#endif
64972 return 0;
64973 }
64974 __initcall(ioresources_init);
64975 diff -urNp linux-3.0.7/kernel/rtmutex-tester.c linux-3.0.7/kernel/rtmutex-tester.c
64976 --- linux-3.0.7/kernel/rtmutex-tester.c 2011-07-21 22:17:23.000000000 -0400
64977 +++ linux-3.0.7/kernel/rtmutex-tester.c 2011-08-23 21:47:56.000000000 -0400
64978 @@ -20,7 +20,7 @@
64979 #define MAX_RT_TEST_MUTEXES 8
64980
64981 static spinlock_t rttest_lock;
64982 -static atomic_t rttest_event;
64983 +static atomic_unchecked_t rttest_event;
64984
64985 struct test_thread_data {
64986 int opcode;
64987 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
64988
64989 case RTTEST_LOCKCONT:
64990 td->mutexes[td->opdata] = 1;
64991 - td->event = atomic_add_return(1, &rttest_event);
64992 + td->event = atomic_add_return_unchecked(1, &rttest_event);
64993 return 0;
64994
64995 case RTTEST_RESET:
64996 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
64997 return 0;
64998
64999 case RTTEST_RESETEVENT:
65000 - atomic_set(&rttest_event, 0);
65001 + atomic_set_unchecked(&rttest_event, 0);
65002 return 0;
65003
65004 default:
65005 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
65006 return ret;
65007
65008 td->mutexes[id] = 1;
65009 - td->event = atomic_add_return(1, &rttest_event);
65010 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65011 rt_mutex_lock(&mutexes[id]);
65012 - td->event = atomic_add_return(1, &rttest_event);
65013 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65014 td->mutexes[id] = 4;
65015 return 0;
65016
65017 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
65018 return ret;
65019
65020 td->mutexes[id] = 1;
65021 - td->event = atomic_add_return(1, &rttest_event);
65022 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65023 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
65024 - td->event = atomic_add_return(1, &rttest_event);
65025 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65026 td->mutexes[id] = ret ? 0 : 4;
65027 return ret ? -EINTR : 0;
65028
65029 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
65030 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
65031 return ret;
65032
65033 - td->event = atomic_add_return(1, &rttest_event);
65034 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65035 rt_mutex_unlock(&mutexes[id]);
65036 - td->event = atomic_add_return(1, &rttest_event);
65037 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65038 td->mutexes[id] = 0;
65039 return 0;
65040
65041 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
65042 break;
65043
65044 td->mutexes[dat] = 2;
65045 - td->event = atomic_add_return(1, &rttest_event);
65046 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65047 break;
65048
65049 default:
65050 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
65051 return;
65052
65053 td->mutexes[dat] = 3;
65054 - td->event = atomic_add_return(1, &rttest_event);
65055 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65056 break;
65057
65058 case RTTEST_LOCKNOWAIT:
65059 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
65060 return;
65061
65062 td->mutexes[dat] = 1;
65063 - td->event = atomic_add_return(1, &rttest_event);
65064 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65065 return;
65066
65067 default:
65068 diff -urNp linux-3.0.7/kernel/sched_autogroup.c linux-3.0.7/kernel/sched_autogroup.c
65069 --- linux-3.0.7/kernel/sched_autogroup.c 2011-07-21 22:17:23.000000000 -0400
65070 +++ linux-3.0.7/kernel/sched_autogroup.c 2011-08-23 21:47:56.000000000 -0400
65071 @@ -7,7 +7,7 @@
65072
65073 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
65074 static struct autogroup autogroup_default;
65075 -static atomic_t autogroup_seq_nr;
65076 +static atomic_unchecked_t autogroup_seq_nr;
65077
65078 static void __init autogroup_init(struct task_struct *init_task)
65079 {
65080 @@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
65081
65082 kref_init(&ag->kref);
65083 init_rwsem(&ag->lock);
65084 - ag->id = atomic_inc_return(&autogroup_seq_nr);
65085 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
65086 ag->tg = tg;
65087 #ifdef CONFIG_RT_GROUP_SCHED
65088 /*
65089 diff -urNp linux-3.0.7/kernel/sched.c linux-3.0.7/kernel/sched.c
65090 --- linux-3.0.7/kernel/sched.c 2011-10-17 23:17:09.000000000 -0400
65091 +++ linux-3.0.7/kernel/sched.c 2011-10-17 23:17:19.000000000 -0400
65092 @@ -4227,6 +4227,8 @@ static void __sched __schedule(void)
65093 struct rq *rq;
65094 int cpu;
65095
65096 + pax_track_stack();
65097 +
65098 need_resched:
65099 preempt_disable();
65100 cpu = smp_processor_id();
65101 @@ -4920,6 +4922,8 @@ int can_nice(const struct task_struct *p
65102 /* convert nice value [19,-20] to rlimit style value [1,40] */
65103 int nice_rlim = 20 - nice;
65104
65105 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
65106 +
65107 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
65108 capable(CAP_SYS_NICE));
65109 }
65110 @@ -4953,7 +4957,8 @@ SYSCALL_DEFINE1(nice, int, increment)
65111 if (nice > 19)
65112 nice = 19;
65113
65114 - if (increment < 0 && !can_nice(current, nice))
65115 + if (increment < 0 && (!can_nice(current, nice) ||
65116 + gr_handle_chroot_nice()))
65117 return -EPERM;
65118
65119 retval = security_task_setnice(current, nice);
65120 @@ -5097,6 +5102,7 @@ recheck:
65121 unsigned long rlim_rtprio =
65122 task_rlimit(p, RLIMIT_RTPRIO);
65123
65124 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
65125 /* can't set/change the rt policy */
65126 if (policy != p->policy && !rlim_rtprio)
65127 return -EPERM;
65128 diff -urNp linux-3.0.7/kernel/sched_fair.c linux-3.0.7/kernel/sched_fair.c
65129 --- linux-3.0.7/kernel/sched_fair.c 2011-07-21 22:17:23.000000000 -0400
65130 +++ linux-3.0.7/kernel/sched_fair.c 2011-08-23 21:47:56.000000000 -0400
65131 @@ -4050,7 +4050,7 @@ static void nohz_idle_balance(int this_c
65132 * run_rebalance_domains is triggered when needed from the scheduler tick.
65133 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
65134 */
65135 -static void run_rebalance_domains(struct softirq_action *h)
65136 +static void run_rebalance_domains(void)
65137 {
65138 int this_cpu = smp_processor_id();
65139 struct rq *this_rq = cpu_rq(this_cpu);
65140 diff -urNp linux-3.0.7/kernel/signal.c linux-3.0.7/kernel/signal.c
65141 --- linux-3.0.7/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
65142 +++ linux-3.0.7/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
65143 @@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
65144
65145 int print_fatal_signals __read_mostly;
65146
65147 -static void __user *sig_handler(struct task_struct *t, int sig)
65148 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
65149 {
65150 return t->sighand->action[sig - 1].sa.sa_handler;
65151 }
65152
65153 -static int sig_handler_ignored(void __user *handler, int sig)
65154 +static int sig_handler_ignored(__sighandler_t handler, int sig)
65155 {
65156 /* Is it explicitly or implicitly ignored? */
65157 return handler == SIG_IGN ||
65158 @@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
65159 static int sig_task_ignored(struct task_struct *t, int sig,
65160 int from_ancestor_ns)
65161 {
65162 - void __user *handler;
65163 + __sighandler_t handler;
65164
65165 handler = sig_handler(t, sig);
65166
65167 @@ -320,6 +320,9 @@ __sigqueue_alloc(int sig, struct task_st
65168 atomic_inc(&user->sigpending);
65169 rcu_read_unlock();
65170
65171 + if (!override_rlimit)
65172 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
65173 +
65174 if (override_rlimit ||
65175 atomic_read(&user->sigpending) <=
65176 task_rlimit(t, RLIMIT_SIGPENDING)) {
65177 @@ -444,7 +447,7 @@ flush_signal_handlers(struct task_struct
65178
65179 int unhandled_signal(struct task_struct *tsk, int sig)
65180 {
65181 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
65182 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
65183 if (is_global_init(tsk))
65184 return 1;
65185 if (handler != SIG_IGN && handler != SIG_DFL)
65186 @@ -770,6 +773,13 @@ static int check_kill_permission(int sig
65187 }
65188 }
65189
65190 + /* allow glibc communication via tgkill to other threads in our
65191 + thread group */
65192 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
65193 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
65194 + && gr_handle_signal(t, sig))
65195 + return -EPERM;
65196 +
65197 return security_task_kill(t, info, sig, 0);
65198 }
65199
65200 @@ -1092,7 +1102,7 @@ __group_send_sig_info(int sig, struct si
65201 return send_signal(sig, info, p, 1);
65202 }
65203
65204 -static int
65205 +int
65206 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
65207 {
65208 return send_signal(sig, info, t, 0);
65209 @@ -1129,6 +1139,7 @@ force_sig_info(int sig, struct siginfo *
65210 unsigned long int flags;
65211 int ret, blocked, ignored;
65212 struct k_sigaction *action;
65213 + int is_unhandled = 0;
65214
65215 spin_lock_irqsave(&t->sighand->siglock, flags);
65216 action = &t->sighand->action[sig-1];
65217 @@ -1143,9 +1154,18 @@ force_sig_info(int sig, struct siginfo *
65218 }
65219 if (action->sa.sa_handler == SIG_DFL)
65220 t->signal->flags &= ~SIGNAL_UNKILLABLE;
65221 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
65222 + is_unhandled = 1;
65223 ret = specific_send_sig_info(sig, info, t);
65224 spin_unlock_irqrestore(&t->sighand->siglock, flags);
65225
65226 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
65227 + normal operation */
65228 + if (is_unhandled) {
65229 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
65230 + gr_handle_crash(t, sig);
65231 + }
65232 +
65233 return ret;
65234 }
65235
65236 @@ -1212,8 +1232,11 @@ int group_send_sig_info(int sig, struct
65237 ret = check_kill_permission(sig, info, p);
65238 rcu_read_unlock();
65239
65240 - if (!ret && sig)
65241 + if (!ret && sig) {
65242 ret = do_send_sig_info(sig, info, p, true);
65243 + if (!ret)
65244 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
65245 + }
65246
65247 return ret;
65248 }
65249 @@ -1839,6 +1862,8 @@ void ptrace_notify(int exit_code)
65250 {
65251 siginfo_t info;
65252
65253 + pax_track_stack();
65254 +
65255 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
65256
65257 memset(&info, 0, sizeof info);
65258 @@ -2639,7 +2664,15 @@ do_send_specific(pid_t tgid, pid_t pid,
65259 int error = -ESRCH;
65260
65261 rcu_read_lock();
65262 - p = find_task_by_vpid(pid);
65263 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65264 + /* allow glibc communication via tgkill to other threads in our
65265 + thread group */
65266 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
65267 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
65268 + p = find_task_by_vpid_unrestricted(pid);
65269 + else
65270 +#endif
65271 + p = find_task_by_vpid(pid);
65272 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
65273 error = check_kill_permission(sig, info, p);
65274 /*
65275 diff -urNp linux-3.0.7/kernel/smp.c linux-3.0.7/kernel/smp.c
65276 --- linux-3.0.7/kernel/smp.c 2011-07-21 22:17:23.000000000 -0400
65277 +++ linux-3.0.7/kernel/smp.c 2011-08-23 21:47:56.000000000 -0400
65278 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t fu
65279 }
65280 EXPORT_SYMBOL(smp_call_function);
65281
65282 -void ipi_call_lock(void)
65283 +void ipi_call_lock(void) __acquires(call_function.lock)
65284 {
65285 raw_spin_lock(&call_function.lock);
65286 }
65287
65288 -void ipi_call_unlock(void)
65289 +void ipi_call_unlock(void) __releases(call_function.lock)
65290 {
65291 raw_spin_unlock(&call_function.lock);
65292 }
65293
65294 -void ipi_call_lock_irq(void)
65295 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
65296 {
65297 raw_spin_lock_irq(&call_function.lock);
65298 }
65299
65300 -void ipi_call_unlock_irq(void)
65301 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
65302 {
65303 raw_spin_unlock_irq(&call_function.lock);
65304 }
65305 diff -urNp linux-3.0.7/kernel/softirq.c linux-3.0.7/kernel/softirq.c
65306 --- linux-3.0.7/kernel/softirq.c 2011-07-21 22:17:23.000000000 -0400
65307 +++ linux-3.0.7/kernel/softirq.c 2011-08-23 21:47:56.000000000 -0400
65308 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
65309
65310 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
65311
65312 -char *softirq_to_name[NR_SOFTIRQS] = {
65313 +const char * const softirq_to_name[NR_SOFTIRQS] = {
65314 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
65315 "TASKLET", "SCHED", "HRTIMER", "RCU"
65316 };
65317 @@ -235,7 +235,7 @@ restart:
65318 kstat_incr_softirqs_this_cpu(vec_nr);
65319
65320 trace_softirq_entry(vec_nr);
65321 - h->action(h);
65322 + h->action();
65323 trace_softirq_exit(vec_nr);
65324 if (unlikely(prev_count != preempt_count())) {
65325 printk(KERN_ERR "huh, entered softirq %u %s %p"
65326 @@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
65327 local_irq_restore(flags);
65328 }
65329
65330 -void open_softirq(int nr, void (*action)(struct softirq_action *))
65331 +void open_softirq(int nr, void (*action)(void))
65332 {
65333 - softirq_vec[nr].action = action;
65334 + pax_open_kernel();
65335 + *(void **)&softirq_vec[nr].action = action;
65336 + pax_close_kernel();
65337 }
65338
65339 /*
65340 @@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct
65341
65342 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
65343
65344 -static void tasklet_action(struct softirq_action *a)
65345 +static void tasklet_action(void)
65346 {
65347 struct tasklet_struct *list;
65348
65349 @@ -476,7 +478,7 @@ static void tasklet_action(struct softir
65350 }
65351 }
65352
65353 -static void tasklet_hi_action(struct softirq_action *a)
65354 +static void tasklet_hi_action(void)
65355 {
65356 struct tasklet_struct *list;
65357
65358 diff -urNp linux-3.0.7/kernel/sys.c linux-3.0.7/kernel/sys.c
65359 --- linux-3.0.7/kernel/sys.c 2011-09-02 18:11:26.000000000 -0400
65360 +++ linux-3.0.7/kernel/sys.c 2011-10-06 04:17:55.000000000 -0400
65361 @@ -158,6 +158,12 @@ static int set_one_prio(struct task_stru
65362 error = -EACCES;
65363 goto out;
65364 }
65365 +
65366 + if (gr_handle_chroot_setpriority(p, niceval)) {
65367 + error = -EACCES;
65368 + goto out;
65369 + }
65370 +
65371 no_nice = security_task_setnice(p, niceval);
65372 if (no_nice) {
65373 error = no_nice;
65374 @@ -541,6 +547,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
65375 goto error;
65376 }
65377
65378 + if (gr_check_group_change(new->gid, new->egid, -1))
65379 + goto error;
65380 +
65381 if (rgid != (gid_t) -1 ||
65382 (egid != (gid_t) -1 && egid != old->gid))
65383 new->sgid = new->egid;
65384 @@ -570,6 +579,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
65385 old = current_cred();
65386
65387 retval = -EPERM;
65388 +
65389 + if (gr_check_group_change(gid, gid, gid))
65390 + goto error;
65391 +
65392 if (nsown_capable(CAP_SETGID))
65393 new->gid = new->egid = new->sgid = new->fsgid = gid;
65394 else if (gid == old->gid || gid == old->sgid)
65395 @@ -595,11 +608,18 @@ static int set_user(struct cred *new)
65396 if (!new_user)
65397 return -EAGAIN;
65398
65399 + /*
65400 + * We don't fail in case of NPROC limit excess here because too many
65401 + * poorly written programs don't check set*uid() return code, assuming
65402 + * it never fails if called by root. We may still enforce NPROC limit
65403 + * for programs doing set*uid()+execve() by harmlessly deferring the
65404 + * failure to the execve() stage.
65405 + */
65406 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
65407 - new_user != INIT_USER) {
65408 - free_uid(new_user);
65409 - return -EAGAIN;
65410 - }
65411 + new_user != INIT_USER)
65412 + current->flags |= PF_NPROC_EXCEEDED;
65413 + else
65414 + current->flags &= ~PF_NPROC_EXCEEDED;
65415
65416 free_uid(new->user);
65417 new->user = new_user;
65418 @@ -650,6 +670,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
65419 goto error;
65420 }
65421
65422 + if (gr_check_user_change(new->uid, new->euid, -1))
65423 + goto error;
65424 +
65425 if (new->uid != old->uid) {
65426 retval = set_user(new);
65427 if (retval < 0)
65428 @@ -694,6 +717,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
65429 old = current_cred();
65430
65431 retval = -EPERM;
65432 +
65433 + if (gr_check_crash_uid(uid))
65434 + goto error;
65435 + if (gr_check_user_change(uid, uid, uid))
65436 + goto error;
65437 +
65438 if (nsown_capable(CAP_SETUID)) {
65439 new->suid = new->uid = uid;
65440 if (uid != old->uid) {
65441 @@ -748,6 +777,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
65442 goto error;
65443 }
65444
65445 + if (gr_check_user_change(ruid, euid, -1))
65446 + goto error;
65447 +
65448 if (ruid != (uid_t) -1) {
65449 new->uid = ruid;
65450 if (ruid != old->uid) {
65451 @@ -812,6 +844,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
65452 goto error;
65453 }
65454
65455 + if (gr_check_group_change(rgid, egid, -1))
65456 + goto error;
65457 +
65458 if (rgid != (gid_t) -1)
65459 new->gid = rgid;
65460 if (egid != (gid_t) -1)
65461 @@ -858,6 +893,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
65462 old = current_cred();
65463 old_fsuid = old->fsuid;
65464
65465 + if (gr_check_user_change(-1, -1, uid))
65466 + goto error;
65467 +
65468 if (uid == old->uid || uid == old->euid ||
65469 uid == old->suid || uid == old->fsuid ||
65470 nsown_capable(CAP_SETUID)) {
65471 @@ -868,6 +906,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
65472 }
65473 }
65474
65475 +error:
65476 abort_creds(new);
65477 return old_fsuid;
65478
65479 @@ -894,12 +933,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
65480 if (gid == old->gid || gid == old->egid ||
65481 gid == old->sgid || gid == old->fsgid ||
65482 nsown_capable(CAP_SETGID)) {
65483 + if (gr_check_group_change(-1, -1, gid))
65484 + goto error;
65485 +
65486 if (gid != old_fsgid) {
65487 new->fsgid = gid;
65488 goto change_okay;
65489 }
65490 }
65491
65492 +error:
65493 abort_creds(new);
65494 return old_fsgid;
65495
65496 @@ -1205,19 +1248,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_
65497 return -EFAULT;
65498
65499 down_read(&uts_sem);
65500 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
65501 + error = __copy_to_user(name->sysname, &utsname()->sysname,
65502 __OLD_UTS_LEN);
65503 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
65504 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
65505 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
65506 __OLD_UTS_LEN);
65507 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
65508 - error |= __copy_to_user(&name->release, &utsname()->release,
65509 + error |= __copy_to_user(name->release, &utsname()->release,
65510 __OLD_UTS_LEN);
65511 error |= __put_user(0, name->release + __OLD_UTS_LEN);
65512 - error |= __copy_to_user(&name->version, &utsname()->version,
65513 + error |= __copy_to_user(name->version, &utsname()->version,
65514 __OLD_UTS_LEN);
65515 error |= __put_user(0, name->version + __OLD_UTS_LEN);
65516 - error |= __copy_to_user(&name->machine, &utsname()->machine,
65517 + error |= __copy_to_user(name->machine, &utsname()->machine,
65518 __OLD_UTS_LEN);
65519 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
65520 up_read(&uts_sem);
65521 @@ -1680,7 +1723,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
65522 error = get_dumpable(me->mm);
65523 break;
65524 case PR_SET_DUMPABLE:
65525 - if (arg2 < 0 || arg2 > 1) {
65526 + if (arg2 > 1) {
65527 error = -EINVAL;
65528 break;
65529 }
65530 diff -urNp linux-3.0.7/kernel/sysctl_binary.c linux-3.0.7/kernel/sysctl_binary.c
65531 --- linux-3.0.7/kernel/sysctl_binary.c 2011-07-21 22:17:23.000000000 -0400
65532 +++ linux-3.0.7/kernel/sysctl_binary.c 2011-10-06 04:17:55.000000000 -0400
65533 @@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *f
65534 int i;
65535
65536 set_fs(KERNEL_DS);
65537 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
65538 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
65539 set_fs(old_fs);
65540 if (result < 0)
65541 goto out_kfree;
65542 @@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *f
65543 }
65544
65545 set_fs(KERNEL_DS);
65546 - result = vfs_write(file, buffer, str - buffer, &pos);
65547 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
65548 set_fs(old_fs);
65549 if (result < 0)
65550 goto out_kfree;
65551 @@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file
65552 int i;
65553
65554 set_fs(KERNEL_DS);
65555 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
65556 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
65557 set_fs(old_fs);
65558 if (result < 0)
65559 goto out_kfree;
65560 @@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file
65561 }
65562
65563 set_fs(KERNEL_DS);
65564 - result = vfs_write(file, buffer, str - buffer, &pos);
65565 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
65566 set_fs(old_fs);
65567 if (result < 0)
65568 goto out_kfree;
65569 @@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *fil
65570 int i;
65571
65572 set_fs(KERNEL_DS);
65573 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
65574 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
65575 set_fs(old_fs);
65576 if (result < 0)
65577 goto out;
65578 @@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struc
65579 __le16 dnaddr;
65580
65581 set_fs(KERNEL_DS);
65582 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
65583 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
65584 set_fs(old_fs);
65585 if (result < 0)
65586 goto out;
65587 @@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struc
65588 le16_to_cpu(dnaddr) & 0x3ff);
65589
65590 set_fs(KERNEL_DS);
65591 - result = vfs_write(file, buf, len, &pos);
65592 + result = vfs_write(file, (const char __force_user *)buf, len, &pos);
65593 set_fs(old_fs);
65594 if (result < 0)
65595 goto out;
65596 diff -urNp linux-3.0.7/kernel/sysctl.c linux-3.0.7/kernel/sysctl.c
65597 --- linux-3.0.7/kernel/sysctl.c 2011-07-21 22:17:23.000000000 -0400
65598 +++ linux-3.0.7/kernel/sysctl.c 2011-08-23 21:48:14.000000000 -0400
65599 @@ -85,6 +85,13 @@
65600
65601
65602 #if defined(CONFIG_SYSCTL)
65603 +#include <linux/grsecurity.h>
65604 +#include <linux/grinternal.h>
65605 +
65606 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
65607 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
65608 + const int op);
65609 +extern int gr_handle_chroot_sysctl(const int op);
65610
65611 /* External variables not in a header file. */
65612 extern int sysctl_overcommit_memory;
65613 @@ -197,6 +204,7 @@ static int sysrq_sysctl_handler(ctl_tabl
65614 }
65615
65616 #endif
65617 +extern struct ctl_table grsecurity_table[];
65618
65619 static struct ctl_table root_table[];
65620 static struct ctl_table_root sysctl_table_root;
65621 @@ -226,6 +234,20 @@ extern struct ctl_table epoll_table[];
65622 int sysctl_legacy_va_layout;
65623 #endif
65624
65625 +#ifdef CONFIG_PAX_SOFTMODE
65626 +static ctl_table pax_table[] = {
65627 + {
65628 + .procname = "softmode",
65629 + .data = &pax_softmode,
65630 + .maxlen = sizeof(unsigned int),
65631 + .mode = 0600,
65632 + .proc_handler = &proc_dointvec,
65633 + },
65634 +
65635 + { }
65636 +};
65637 +#endif
65638 +
65639 /* The default sysctl tables: */
65640
65641 static struct ctl_table root_table[] = {
65642 @@ -272,6 +294,22 @@ static int max_extfrag_threshold = 1000;
65643 #endif
65644
65645 static struct ctl_table kern_table[] = {
65646 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
65647 + {
65648 + .procname = "grsecurity",
65649 + .mode = 0500,
65650 + .child = grsecurity_table,
65651 + },
65652 +#endif
65653 +
65654 +#ifdef CONFIG_PAX_SOFTMODE
65655 + {
65656 + .procname = "pax",
65657 + .mode = 0500,
65658 + .child = pax_table,
65659 + },
65660 +#endif
65661 +
65662 {
65663 .procname = "sched_child_runs_first",
65664 .data = &sysctl_sched_child_runs_first,
65665 @@ -546,7 +584,7 @@ static struct ctl_table kern_table[] = {
65666 .data = &modprobe_path,
65667 .maxlen = KMOD_PATH_LEN,
65668 .mode = 0644,
65669 - .proc_handler = proc_dostring,
65670 + .proc_handler = proc_dostring_modpriv,
65671 },
65672 {
65673 .procname = "modules_disabled",
65674 @@ -713,16 +751,20 @@ static struct ctl_table kern_table[] = {
65675 .extra1 = &zero,
65676 .extra2 = &one,
65677 },
65678 +#endif
65679 {
65680 .procname = "kptr_restrict",
65681 .data = &kptr_restrict,
65682 .maxlen = sizeof(int),
65683 .mode = 0644,
65684 .proc_handler = proc_dmesg_restrict,
65685 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65686 + .extra1 = &two,
65687 +#else
65688 .extra1 = &zero,
65689 +#endif
65690 .extra2 = &two,
65691 },
65692 -#endif
65693 {
65694 .procname = "ngroups_max",
65695 .data = &ngroups_max,
65696 @@ -1205,6 +1247,13 @@ static struct ctl_table vm_table[] = {
65697 .proc_handler = proc_dointvec_minmax,
65698 .extra1 = &zero,
65699 },
65700 + {
65701 + .procname = "heap_stack_gap",
65702 + .data = &sysctl_heap_stack_gap,
65703 + .maxlen = sizeof(sysctl_heap_stack_gap),
65704 + .mode = 0644,
65705 + .proc_handler = proc_doulongvec_minmax,
65706 + },
65707 #else
65708 {
65709 .procname = "nr_trim_pages",
65710 @@ -1714,6 +1763,17 @@ static int test_perm(int mode, int op)
65711 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
65712 {
65713 int mode;
65714 + int error;
65715 +
65716 + if (table->parent != NULL && table->parent->procname != NULL &&
65717 + table->procname != NULL &&
65718 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
65719 + return -EACCES;
65720 + if (gr_handle_chroot_sysctl(op))
65721 + return -EACCES;
65722 + error = gr_handle_sysctl(table, op);
65723 + if (error)
65724 + return error;
65725
65726 if (root->permissions)
65727 mode = root->permissions(root, current->nsproxy, table);
65728 @@ -2118,6 +2178,16 @@ int proc_dostring(struct ctl_table *tabl
65729 buffer, lenp, ppos);
65730 }
65731
65732 +int proc_dostring_modpriv(struct ctl_table *table, int write,
65733 + void __user *buffer, size_t *lenp, loff_t *ppos)
65734 +{
65735 + if (write && !capable(CAP_SYS_MODULE))
65736 + return -EPERM;
65737 +
65738 + return _proc_do_string(table->data, table->maxlen, write,
65739 + buffer, lenp, ppos);
65740 +}
65741 +
65742 static size_t proc_skip_spaces(char **buf)
65743 {
65744 size_t ret;
65745 @@ -2223,6 +2293,8 @@ static int proc_put_long(void __user **b
65746 len = strlen(tmp);
65747 if (len > *size)
65748 len = *size;
65749 + if (len > sizeof(tmp))
65750 + len = sizeof(tmp);
65751 if (copy_to_user(*buf, tmp, len))
65752 return -EFAULT;
65753 *size -= len;
65754 @@ -2539,8 +2611,11 @@ static int __do_proc_doulongvec_minmax(v
65755 *i = val;
65756 } else {
65757 val = convdiv * (*i) / convmul;
65758 - if (!first)
65759 + if (!first) {
65760 err = proc_put_char(&buffer, &left, '\t');
65761 + if (err)
65762 + break;
65763 + }
65764 err = proc_put_long(&buffer, &left, val, false);
65765 if (err)
65766 break;
65767 @@ -2935,6 +3010,12 @@ int proc_dostring(struct ctl_table *tabl
65768 return -ENOSYS;
65769 }
65770
65771 +int proc_dostring_modpriv(struct ctl_table *table, int write,
65772 + void __user *buffer, size_t *lenp, loff_t *ppos)
65773 +{
65774 + return -ENOSYS;
65775 +}
65776 +
65777 int proc_dointvec(struct ctl_table *table, int write,
65778 void __user *buffer, size_t *lenp, loff_t *ppos)
65779 {
65780 @@ -2991,6 +3072,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
65781 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
65782 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
65783 EXPORT_SYMBOL(proc_dostring);
65784 +EXPORT_SYMBOL(proc_dostring_modpriv);
65785 EXPORT_SYMBOL(proc_doulongvec_minmax);
65786 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
65787 EXPORT_SYMBOL(register_sysctl_table);
65788 diff -urNp linux-3.0.7/kernel/sysctl_check.c linux-3.0.7/kernel/sysctl_check.c
65789 --- linux-3.0.7/kernel/sysctl_check.c 2011-07-21 22:17:23.000000000 -0400
65790 +++ linux-3.0.7/kernel/sysctl_check.c 2011-08-23 21:48:14.000000000 -0400
65791 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
65792 set_fail(&fail, table, "Directory with extra2");
65793 } else {
65794 if ((table->proc_handler == proc_dostring) ||
65795 + (table->proc_handler == proc_dostring_modpriv) ||
65796 (table->proc_handler == proc_dointvec) ||
65797 (table->proc_handler == proc_dointvec_minmax) ||
65798 (table->proc_handler == proc_dointvec_jiffies) ||
65799 diff -urNp linux-3.0.7/kernel/taskstats.c linux-3.0.7/kernel/taskstats.c
65800 --- linux-3.0.7/kernel/taskstats.c 2011-07-21 22:17:23.000000000 -0400
65801 +++ linux-3.0.7/kernel/taskstats.c 2011-08-23 21:48:14.000000000 -0400
65802 @@ -27,9 +27,12 @@
65803 #include <linux/cgroup.h>
65804 #include <linux/fs.h>
65805 #include <linux/file.h>
65806 +#include <linux/grsecurity.h>
65807 #include <net/genetlink.h>
65808 #include <asm/atomic.h>
65809
65810 +extern int gr_is_taskstats_denied(int pid);
65811 +
65812 /*
65813 * Maximum length of a cpumask that can be specified in
65814 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
65815 @@ -558,6 +561,9 @@ err:
65816
65817 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
65818 {
65819 + if (gr_is_taskstats_denied(current->pid))
65820 + return -EACCES;
65821 +
65822 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
65823 return cmd_attr_register_cpumask(info);
65824 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
65825 diff -urNp linux-3.0.7/kernel/time/alarmtimer.c linux-3.0.7/kernel/time/alarmtimer.c
65826 --- linux-3.0.7/kernel/time/alarmtimer.c 2011-10-16 21:54:54.000000000 -0400
65827 +++ linux-3.0.7/kernel/time/alarmtimer.c 2011-10-16 21:55:28.000000000 -0400
65828 @@ -693,7 +693,7 @@ static int __init alarmtimer_init(void)
65829 {
65830 int error = 0;
65831 int i;
65832 - struct k_clock alarm_clock = {
65833 + static struct k_clock alarm_clock = {
65834 .clock_getres = alarm_clock_getres,
65835 .clock_get = alarm_clock_get,
65836 .timer_create = alarm_timer_create,
65837 diff -urNp linux-3.0.7/kernel/time/tick-broadcast.c linux-3.0.7/kernel/time/tick-broadcast.c
65838 --- linux-3.0.7/kernel/time/tick-broadcast.c 2011-07-21 22:17:23.000000000 -0400
65839 +++ linux-3.0.7/kernel/time/tick-broadcast.c 2011-08-23 21:47:56.000000000 -0400
65840 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
65841 * then clear the broadcast bit.
65842 */
65843 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
65844 - int cpu = smp_processor_id();
65845 + cpu = smp_processor_id();
65846
65847 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
65848 tick_broadcast_clear_oneshot(cpu);
65849 diff -urNp linux-3.0.7/kernel/time/timekeeping.c linux-3.0.7/kernel/time/timekeeping.c
65850 --- linux-3.0.7/kernel/time/timekeeping.c 2011-07-21 22:17:23.000000000 -0400
65851 +++ linux-3.0.7/kernel/time/timekeeping.c 2011-08-23 21:48:14.000000000 -0400
65852 @@ -14,6 +14,7 @@
65853 #include <linux/init.h>
65854 #include <linux/mm.h>
65855 #include <linux/sched.h>
65856 +#include <linux/grsecurity.h>
65857 #include <linux/syscore_ops.h>
65858 #include <linux/clocksource.h>
65859 #include <linux/jiffies.h>
65860 @@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
65861 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
65862 return -EINVAL;
65863
65864 + gr_log_timechange();
65865 +
65866 write_seqlock_irqsave(&xtime_lock, flags);
65867
65868 timekeeping_forward_now();
65869 diff -urNp linux-3.0.7/kernel/time/timer_list.c linux-3.0.7/kernel/time/timer_list.c
65870 --- linux-3.0.7/kernel/time/timer_list.c 2011-07-21 22:17:23.000000000 -0400
65871 +++ linux-3.0.7/kernel/time/timer_list.c 2011-08-23 21:48:14.000000000 -0400
65872 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
65873
65874 static void print_name_offset(struct seq_file *m, void *sym)
65875 {
65876 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65877 + SEQ_printf(m, "<%p>", NULL);
65878 +#else
65879 char symname[KSYM_NAME_LEN];
65880
65881 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
65882 SEQ_printf(m, "<%pK>", sym);
65883 else
65884 SEQ_printf(m, "%s", symname);
65885 +#endif
65886 }
65887
65888 static void
65889 @@ -112,7 +116,11 @@ next_one:
65890 static void
65891 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
65892 {
65893 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65894 + SEQ_printf(m, " .base: %p\n", NULL);
65895 +#else
65896 SEQ_printf(m, " .base: %pK\n", base);
65897 +#endif
65898 SEQ_printf(m, " .index: %d\n",
65899 base->index);
65900 SEQ_printf(m, " .resolution: %Lu nsecs\n",
65901 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
65902 {
65903 struct proc_dir_entry *pe;
65904
65905 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
65906 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
65907 +#else
65908 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
65909 +#endif
65910 if (!pe)
65911 return -ENOMEM;
65912 return 0;
65913 diff -urNp linux-3.0.7/kernel/time/timer_stats.c linux-3.0.7/kernel/time/timer_stats.c
65914 --- linux-3.0.7/kernel/time/timer_stats.c 2011-07-21 22:17:23.000000000 -0400
65915 +++ linux-3.0.7/kernel/time/timer_stats.c 2011-08-23 21:48:14.000000000 -0400
65916 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
65917 static unsigned long nr_entries;
65918 static struct entry entries[MAX_ENTRIES];
65919
65920 -static atomic_t overflow_count;
65921 +static atomic_unchecked_t overflow_count;
65922
65923 /*
65924 * The entries are in a hash-table, for fast lookup:
65925 @@ -140,7 +140,7 @@ static void reset_entries(void)
65926 nr_entries = 0;
65927 memset(entries, 0, sizeof(entries));
65928 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
65929 - atomic_set(&overflow_count, 0);
65930 + atomic_set_unchecked(&overflow_count, 0);
65931 }
65932
65933 static struct entry *alloc_entry(void)
65934 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
65935 if (likely(entry))
65936 entry->count++;
65937 else
65938 - atomic_inc(&overflow_count);
65939 + atomic_inc_unchecked(&overflow_count);
65940
65941 out_unlock:
65942 raw_spin_unlock_irqrestore(lock, flags);
65943 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
65944
65945 static void print_name_offset(struct seq_file *m, unsigned long addr)
65946 {
65947 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65948 + seq_printf(m, "<%p>", NULL);
65949 +#else
65950 char symname[KSYM_NAME_LEN];
65951
65952 if (lookup_symbol_name(addr, symname) < 0)
65953 seq_printf(m, "<%p>", (void *)addr);
65954 else
65955 seq_printf(m, "%s", symname);
65956 +#endif
65957 }
65958
65959 static int tstats_show(struct seq_file *m, void *v)
65960 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
65961
65962 seq_puts(m, "Timer Stats Version: v0.2\n");
65963 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
65964 - if (atomic_read(&overflow_count))
65965 + if (atomic_read_unchecked(&overflow_count))
65966 seq_printf(m, "Overflow: %d entries\n",
65967 - atomic_read(&overflow_count));
65968 + atomic_read_unchecked(&overflow_count));
65969
65970 for (i = 0; i < nr_entries; i++) {
65971 entry = entries + i;
65972 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
65973 {
65974 struct proc_dir_entry *pe;
65975
65976 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
65977 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
65978 +#else
65979 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
65980 +#endif
65981 if (!pe)
65982 return -ENOMEM;
65983 return 0;
65984 diff -urNp linux-3.0.7/kernel/time.c linux-3.0.7/kernel/time.c
65985 --- linux-3.0.7/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
65986 +++ linux-3.0.7/kernel/time.c 2011-08-23 21:48:14.000000000 -0400
65987 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
65988 return error;
65989
65990 if (tz) {
65991 + /* we log in do_settimeofday called below, so don't log twice
65992 + */
65993 + if (!tv)
65994 + gr_log_timechange();
65995 +
65996 /* SMP safe, global irq locking makes it work. */
65997 sys_tz = *tz;
65998 update_vsyscall_tz();
65999 diff -urNp linux-3.0.7/kernel/timer.c linux-3.0.7/kernel/timer.c
66000 --- linux-3.0.7/kernel/timer.c 2011-07-21 22:17:23.000000000 -0400
66001 +++ linux-3.0.7/kernel/timer.c 2011-08-23 21:47:56.000000000 -0400
66002 @@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
66003 /*
66004 * This function runs timers and the timer-tq in bottom half context.
66005 */
66006 -static void run_timer_softirq(struct softirq_action *h)
66007 +static void run_timer_softirq(void)
66008 {
66009 struct tvec_base *base = __this_cpu_read(tvec_bases);
66010
66011 diff -urNp linux-3.0.7/kernel/trace/blktrace.c linux-3.0.7/kernel/trace/blktrace.c
66012 --- linux-3.0.7/kernel/trace/blktrace.c 2011-07-21 22:17:23.000000000 -0400
66013 +++ linux-3.0.7/kernel/trace/blktrace.c 2011-08-23 21:47:56.000000000 -0400
66014 @@ -321,7 +321,7 @@ static ssize_t blk_dropped_read(struct f
66015 struct blk_trace *bt = filp->private_data;
66016 char buf[16];
66017
66018 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
66019 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
66020
66021 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
66022 }
66023 @@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(str
66024 return 1;
66025
66026 bt = buf->chan->private_data;
66027 - atomic_inc(&bt->dropped);
66028 + atomic_inc_unchecked(&bt->dropped);
66029 return 0;
66030 }
66031
66032 @@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_qu
66033
66034 bt->dir = dir;
66035 bt->dev = dev;
66036 - atomic_set(&bt->dropped, 0);
66037 + atomic_set_unchecked(&bt->dropped, 0);
66038
66039 ret = -EIO;
66040 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
66041 diff -urNp linux-3.0.7/kernel/trace/ftrace.c linux-3.0.7/kernel/trace/ftrace.c
66042 --- linux-3.0.7/kernel/trace/ftrace.c 2011-10-17 23:17:09.000000000 -0400
66043 +++ linux-3.0.7/kernel/trace/ftrace.c 2011-10-17 23:17:19.000000000 -0400
66044 @@ -1566,12 +1566,17 @@ ftrace_code_disable(struct module *mod,
66045 if (unlikely(ftrace_disabled))
66046 return 0;
66047
66048 + ret = ftrace_arch_code_modify_prepare();
66049 + FTRACE_WARN_ON(ret);
66050 + if (ret)
66051 + return 0;
66052 +
66053 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
66054 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
66055 if (ret) {
66056 ftrace_bug(ret, ip);
66057 - return 0;
66058 }
66059 - return 1;
66060 + return ret ? 0 : 1;
66061 }
66062
66063 /*
66064 @@ -2570,7 +2575,7 @@ static void ftrace_free_entry_rcu(struct
66065
66066 int
66067 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
66068 - void *data)
66069 + void *data)
66070 {
66071 struct ftrace_func_probe *entry;
66072 struct ftrace_page *pg;
66073 diff -urNp linux-3.0.7/kernel/trace/trace.c linux-3.0.7/kernel/trace/trace.c
66074 --- linux-3.0.7/kernel/trace/trace.c 2011-07-21 22:17:23.000000000 -0400
66075 +++ linux-3.0.7/kernel/trace/trace.c 2011-08-23 21:48:14.000000000 -0400
66076 @@ -3339,6 +3339,8 @@ static ssize_t tracing_splice_read_pipe(
66077 size_t rem;
66078 unsigned int i;
66079
66080 + pax_track_stack();
66081 +
66082 if (splice_grow_spd(pipe, &spd))
66083 return -ENOMEM;
66084
66085 @@ -3822,6 +3824,8 @@ tracing_buffers_splice_read(struct file
66086 int entries, size, i;
66087 size_t ret;
66088
66089 + pax_track_stack();
66090 +
66091 if (splice_grow_spd(pipe, &spd))
66092 return -ENOMEM;
66093
66094 @@ -3990,10 +3994,9 @@ static const struct file_operations trac
66095 };
66096 #endif
66097
66098 -static struct dentry *d_tracer;
66099 -
66100 struct dentry *tracing_init_dentry(void)
66101 {
66102 + static struct dentry *d_tracer;
66103 static int once;
66104
66105 if (d_tracer)
66106 @@ -4013,10 +4016,9 @@ struct dentry *tracing_init_dentry(void)
66107 return d_tracer;
66108 }
66109
66110 -static struct dentry *d_percpu;
66111 -
66112 struct dentry *tracing_dentry_percpu(void)
66113 {
66114 + static struct dentry *d_percpu;
66115 static int once;
66116 struct dentry *d_tracer;
66117
66118 diff -urNp linux-3.0.7/kernel/trace/trace_events.c linux-3.0.7/kernel/trace/trace_events.c
66119 --- linux-3.0.7/kernel/trace/trace_events.c 2011-09-02 18:11:21.000000000 -0400
66120 +++ linux-3.0.7/kernel/trace/trace_events.c 2011-08-23 21:47:56.000000000 -0400
66121 @@ -1318,10 +1318,6 @@ static LIST_HEAD(ftrace_module_file_list
66122 struct ftrace_module_file_ops {
66123 struct list_head list;
66124 struct module *mod;
66125 - struct file_operations id;
66126 - struct file_operations enable;
66127 - struct file_operations format;
66128 - struct file_operations filter;
66129 };
66130
66131 static struct ftrace_module_file_ops *
66132 @@ -1342,17 +1338,12 @@ trace_create_file_ops(struct module *mod
66133
66134 file_ops->mod = mod;
66135
66136 - file_ops->id = ftrace_event_id_fops;
66137 - file_ops->id.owner = mod;
66138 -
66139 - file_ops->enable = ftrace_enable_fops;
66140 - file_ops->enable.owner = mod;
66141 -
66142 - file_ops->filter = ftrace_event_filter_fops;
66143 - file_ops->filter.owner = mod;
66144 -
66145 - file_ops->format = ftrace_event_format_fops;
66146 - file_ops->format.owner = mod;
66147 + pax_open_kernel();
66148 + *(void **)&mod->trace_id.owner = mod;
66149 + *(void **)&mod->trace_enable.owner = mod;
66150 + *(void **)&mod->trace_filter.owner = mod;
66151 + *(void **)&mod->trace_format.owner = mod;
66152 + pax_close_kernel();
66153
66154 list_add(&file_ops->list, &ftrace_module_file_list);
66155
66156 @@ -1376,8 +1367,8 @@ static void trace_module_add_events(stru
66157
66158 for_each_event(call, start, end) {
66159 __trace_add_event_call(*call, mod,
66160 - &file_ops->id, &file_ops->enable,
66161 - &file_ops->filter, &file_ops->format);
66162 + &mod->trace_id, &mod->trace_enable,
66163 + &mod->trace_filter, &mod->trace_format);
66164 }
66165 }
66166
66167 diff -urNp linux-3.0.7/kernel/trace/trace_kprobe.c linux-3.0.7/kernel/trace/trace_kprobe.c
66168 --- linux-3.0.7/kernel/trace/trace_kprobe.c 2011-07-21 22:17:23.000000000 -0400
66169 +++ linux-3.0.7/kernel/trace/trace_kprobe.c 2011-10-06 04:17:55.000000000 -0400
66170 @@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66171 long ret;
66172 int maxlen = get_rloc_len(*(u32 *)dest);
66173 u8 *dst = get_rloc_data(dest);
66174 - u8 *src = addr;
66175 + const u8 __user *src = (const u8 __force_user *)addr;
66176 mm_segment_t old_fs = get_fs();
66177 if (!maxlen)
66178 return;
66179 @@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66180 pagefault_disable();
66181 do
66182 ret = __copy_from_user_inatomic(dst++, src++, 1);
66183 - while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
66184 + while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
66185 dst[-1] = '\0';
66186 pagefault_enable();
66187 set_fs(old_fs);
66188 @@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66189 ((u8 *)get_rloc_data(dest))[0] = '\0';
66190 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
66191 } else
66192 - *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
66193 + *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
66194 get_rloc_offs(*(u32 *)dest));
66195 }
66196 /* Return the length of string -- including null terminal byte */
66197 @@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66198 set_fs(KERNEL_DS);
66199 pagefault_disable();
66200 do {
66201 - ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
66202 + ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
66203 len++;
66204 } while (c && ret == 0 && len < MAX_STRING_SIZE);
66205 pagefault_enable();
66206 diff -urNp linux-3.0.7/kernel/trace/trace_mmiotrace.c linux-3.0.7/kernel/trace/trace_mmiotrace.c
66207 --- linux-3.0.7/kernel/trace/trace_mmiotrace.c 2011-07-21 22:17:23.000000000 -0400
66208 +++ linux-3.0.7/kernel/trace/trace_mmiotrace.c 2011-08-23 21:47:56.000000000 -0400
66209 @@ -24,7 +24,7 @@ struct header_iter {
66210 static struct trace_array *mmio_trace_array;
66211 static bool overrun_detected;
66212 static unsigned long prev_overruns;
66213 -static atomic_t dropped_count;
66214 +static atomic_unchecked_t dropped_count;
66215
66216 static void mmio_reset_data(struct trace_array *tr)
66217 {
66218 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
66219
66220 static unsigned long count_overruns(struct trace_iterator *iter)
66221 {
66222 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
66223 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
66224 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
66225
66226 if (over > prev_overruns)
66227 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
66228 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
66229 sizeof(*entry), 0, pc);
66230 if (!event) {
66231 - atomic_inc(&dropped_count);
66232 + atomic_inc_unchecked(&dropped_count);
66233 return;
66234 }
66235 entry = ring_buffer_event_data(event);
66236 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
66237 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
66238 sizeof(*entry), 0, pc);
66239 if (!event) {
66240 - atomic_inc(&dropped_count);
66241 + atomic_inc_unchecked(&dropped_count);
66242 return;
66243 }
66244 entry = ring_buffer_event_data(event);
66245 diff -urNp linux-3.0.7/kernel/trace/trace_output.c linux-3.0.7/kernel/trace/trace_output.c
66246 --- linux-3.0.7/kernel/trace/trace_output.c 2011-07-21 22:17:23.000000000 -0400
66247 +++ linux-3.0.7/kernel/trace/trace_output.c 2011-08-23 21:47:56.000000000 -0400
66248 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
66249
66250 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
66251 if (!IS_ERR(p)) {
66252 - p = mangle_path(s->buffer + s->len, p, "\n");
66253 + p = mangle_path(s->buffer + s->len, p, "\n\\");
66254 if (p) {
66255 s->len = p - s->buffer;
66256 return 1;
66257 diff -urNp linux-3.0.7/kernel/trace/trace_stack.c linux-3.0.7/kernel/trace/trace_stack.c
66258 --- linux-3.0.7/kernel/trace/trace_stack.c 2011-07-21 22:17:23.000000000 -0400
66259 +++ linux-3.0.7/kernel/trace/trace_stack.c 2011-08-23 21:47:56.000000000 -0400
66260 @@ -50,7 +50,7 @@ static inline void check_stack(void)
66261 return;
66262
66263 /* we do not handle interrupt stacks yet */
66264 - if (!object_is_on_stack(&this_size))
66265 + if (!object_starts_on_stack(&this_size))
66266 return;
66267
66268 local_irq_save(flags);
66269 diff -urNp linux-3.0.7/kernel/trace/trace_workqueue.c linux-3.0.7/kernel/trace/trace_workqueue.c
66270 --- linux-3.0.7/kernel/trace/trace_workqueue.c 2011-07-21 22:17:23.000000000 -0400
66271 +++ linux-3.0.7/kernel/trace/trace_workqueue.c 2011-08-23 21:47:56.000000000 -0400
66272 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
66273 int cpu;
66274 pid_t pid;
66275 /* Can be inserted from interrupt or user context, need to be atomic */
66276 - atomic_t inserted;
66277 + atomic_unchecked_t inserted;
66278 /*
66279 * Don't need to be atomic, works are serialized in a single workqueue thread
66280 * on a single CPU.
66281 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
66282 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
66283 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
66284 if (node->pid == wq_thread->pid) {
66285 - atomic_inc(&node->inserted);
66286 + atomic_inc_unchecked(&node->inserted);
66287 goto found;
66288 }
66289 }
66290 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
66291 tsk = get_pid_task(pid, PIDTYPE_PID);
66292 if (tsk) {
66293 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
66294 - atomic_read(&cws->inserted), cws->executed,
66295 + atomic_read_unchecked(&cws->inserted), cws->executed,
66296 tsk->comm);
66297 put_task_struct(tsk);
66298 }
66299 diff -urNp linux-3.0.7/lib/bitmap.c linux-3.0.7/lib/bitmap.c
66300 --- linux-3.0.7/lib/bitmap.c 2011-07-21 22:17:23.000000000 -0400
66301 +++ linux-3.0.7/lib/bitmap.c 2011-10-06 04:17:55.000000000 -0400
66302 @@ -421,7 +421,7 @@ int __bitmap_parse(const char *buf, unsi
66303 {
66304 int c, old_c, totaldigits, ndigits, nchunks, nbits;
66305 u32 chunk;
66306 - const char __user *ubuf = buf;
66307 + const char __user *ubuf = (const char __force_user *)buf;
66308
66309 bitmap_zero(maskp, nmaskbits);
66310
66311 @@ -506,7 +506,7 @@ int bitmap_parse_user(const char __user
66312 {
66313 if (!access_ok(VERIFY_READ, ubuf, ulen))
66314 return -EFAULT;
66315 - return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
66316 + return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
66317 }
66318 EXPORT_SYMBOL(bitmap_parse_user);
66319
66320 @@ -596,7 +596,7 @@ static int __bitmap_parselist(const char
66321 {
66322 unsigned a, b;
66323 int c, old_c, totaldigits;
66324 - const char __user *ubuf = buf;
66325 + const char __user *ubuf = (const char __force_user *)buf;
66326 int exp_digit, in_range;
66327
66328 totaldigits = c = 0;
66329 @@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __u
66330 {
66331 if (!access_ok(VERIFY_READ, ubuf, ulen))
66332 return -EFAULT;
66333 - return __bitmap_parselist((const char *)ubuf,
66334 + return __bitmap_parselist((const char __force_kernel *)ubuf,
66335 ulen, 1, maskp, nmaskbits);
66336 }
66337 EXPORT_SYMBOL(bitmap_parselist_user);
66338 diff -urNp linux-3.0.7/lib/bug.c linux-3.0.7/lib/bug.c
66339 --- linux-3.0.7/lib/bug.c 2011-07-21 22:17:23.000000000 -0400
66340 +++ linux-3.0.7/lib/bug.c 2011-08-23 21:47:56.000000000 -0400
66341 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
66342 return BUG_TRAP_TYPE_NONE;
66343
66344 bug = find_bug(bugaddr);
66345 + if (!bug)
66346 + return BUG_TRAP_TYPE_NONE;
66347
66348 file = NULL;
66349 line = 0;
66350 diff -urNp linux-3.0.7/lib/debugobjects.c linux-3.0.7/lib/debugobjects.c
66351 --- linux-3.0.7/lib/debugobjects.c 2011-07-21 22:17:23.000000000 -0400
66352 +++ linux-3.0.7/lib/debugobjects.c 2011-08-23 21:47:56.000000000 -0400
66353 @@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
66354 if (limit > 4)
66355 return;
66356
66357 - is_on_stack = object_is_on_stack(addr);
66358 + is_on_stack = object_starts_on_stack(addr);
66359 if (is_on_stack == onstack)
66360 return;
66361
66362 diff -urNp linux-3.0.7/lib/devres.c linux-3.0.7/lib/devres.c
66363 --- linux-3.0.7/lib/devres.c 2011-07-21 22:17:23.000000000 -0400
66364 +++ linux-3.0.7/lib/devres.c 2011-10-06 04:17:55.000000000 -0400
66365 @@ -81,7 +81,7 @@ void devm_iounmap(struct device *dev, vo
66366 {
66367 iounmap(addr);
66368 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
66369 - (void *)addr));
66370 + (void __force *)addr));
66371 }
66372 EXPORT_SYMBOL(devm_iounmap);
66373
66374 @@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *de
66375 {
66376 ioport_unmap(addr);
66377 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
66378 - devm_ioport_map_match, (void *)addr));
66379 + devm_ioport_map_match, (void __force *)addr));
66380 }
66381 EXPORT_SYMBOL(devm_ioport_unmap);
66382
66383 diff -urNp linux-3.0.7/lib/dma-debug.c linux-3.0.7/lib/dma-debug.c
66384 --- linux-3.0.7/lib/dma-debug.c 2011-07-21 22:17:23.000000000 -0400
66385 +++ linux-3.0.7/lib/dma-debug.c 2011-08-23 21:47:56.000000000 -0400
66386 @@ -870,7 +870,7 @@ out:
66387
66388 static void check_for_stack(struct device *dev, void *addr)
66389 {
66390 - if (object_is_on_stack(addr))
66391 + if (object_starts_on_stack(addr))
66392 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
66393 "stack [addr=%p]\n", addr);
66394 }
66395 diff -urNp linux-3.0.7/lib/extable.c linux-3.0.7/lib/extable.c
66396 --- linux-3.0.7/lib/extable.c 2011-07-21 22:17:23.000000000 -0400
66397 +++ linux-3.0.7/lib/extable.c 2011-08-23 21:47:56.000000000 -0400
66398 @@ -13,6 +13,7 @@
66399 #include <linux/init.h>
66400 #include <linux/sort.h>
66401 #include <asm/uaccess.h>
66402 +#include <asm/pgtable.h>
66403
66404 #ifndef ARCH_HAS_SORT_EXTABLE
66405 /*
66406 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const v
66407 void sort_extable(struct exception_table_entry *start,
66408 struct exception_table_entry *finish)
66409 {
66410 + pax_open_kernel();
66411 sort(start, finish - start, sizeof(struct exception_table_entry),
66412 cmp_ex, NULL);
66413 + pax_close_kernel();
66414 }
66415
66416 #ifdef CONFIG_MODULES
66417 diff -urNp linux-3.0.7/lib/inflate.c linux-3.0.7/lib/inflate.c
66418 --- linux-3.0.7/lib/inflate.c 2011-07-21 22:17:23.000000000 -0400
66419 +++ linux-3.0.7/lib/inflate.c 2011-08-23 21:47:56.000000000 -0400
66420 @@ -269,7 +269,7 @@ static void free(void *where)
66421 malloc_ptr = free_mem_ptr;
66422 }
66423 #else
66424 -#define malloc(a) kmalloc(a, GFP_KERNEL)
66425 +#define malloc(a) kmalloc((a), GFP_KERNEL)
66426 #define free(a) kfree(a)
66427 #endif
66428
66429 diff -urNp linux-3.0.7/lib/Kconfig.debug linux-3.0.7/lib/Kconfig.debug
66430 --- linux-3.0.7/lib/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
66431 +++ linux-3.0.7/lib/Kconfig.debug 2011-08-23 21:48:14.000000000 -0400
66432 @@ -1088,6 +1088,7 @@ config LATENCYTOP
66433 depends on DEBUG_KERNEL
66434 depends on STACKTRACE_SUPPORT
66435 depends on PROC_FS
66436 + depends on !GRKERNSEC_HIDESYM
66437 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
66438 select KALLSYMS
66439 select KALLSYMS_ALL
66440 diff -urNp linux-3.0.7/lib/kref.c linux-3.0.7/lib/kref.c
66441 --- linux-3.0.7/lib/kref.c 2011-07-21 22:17:23.000000000 -0400
66442 +++ linux-3.0.7/lib/kref.c 2011-08-23 21:47:56.000000000 -0400
66443 @@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
66444 */
66445 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
66446 {
66447 - WARN_ON(release == NULL);
66448 + BUG_ON(release == NULL);
66449 WARN_ON(release == (void (*)(struct kref *))kfree);
66450
66451 if (atomic_dec_and_test(&kref->refcount)) {
66452 diff -urNp linux-3.0.7/lib/radix-tree.c linux-3.0.7/lib/radix-tree.c
66453 --- linux-3.0.7/lib/radix-tree.c 2011-07-21 22:17:23.000000000 -0400
66454 +++ linux-3.0.7/lib/radix-tree.c 2011-08-23 21:47:56.000000000 -0400
66455 @@ -80,7 +80,7 @@ struct radix_tree_preload {
66456 int nr;
66457 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
66458 };
66459 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
66460 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
66461
66462 static inline void *ptr_to_indirect(void *ptr)
66463 {
66464 diff -urNp linux-3.0.7/lib/vsprintf.c linux-3.0.7/lib/vsprintf.c
66465 --- linux-3.0.7/lib/vsprintf.c 2011-07-21 22:17:23.000000000 -0400
66466 +++ linux-3.0.7/lib/vsprintf.c 2011-08-23 21:48:14.000000000 -0400
66467 @@ -16,6 +16,9 @@
66468 * - scnprintf and vscnprintf
66469 */
66470
66471 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66472 +#define __INCLUDED_BY_HIDESYM 1
66473 +#endif
66474 #include <stdarg.h>
66475 #include <linux/module.h>
66476 #include <linux/types.h>
66477 @@ -435,7 +438,7 @@ char *symbol_string(char *buf, char *end
66478 char sym[KSYM_SYMBOL_LEN];
66479 if (ext == 'B')
66480 sprint_backtrace(sym, value);
66481 - else if (ext != 'f' && ext != 's')
66482 + else if (ext != 'f' && ext != 's' && ext != 'a')
66483 sprint_symbol(sym, value);
66484 else
66485 kallsyms_lookup(value, NULL, NULL, NULL, sym);
66486 @@ -799,7 +802,11 @@ char *uuid_string(char *buf, char *end,
66487 return string(buf, end, uuid, spec);
66488 }
66489
66490 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66491 +int kptr_restrict __read_mostly = 2;
66492 +#else
66493 int kptr_restrict __read_mostly;
66494 +#endif
66495
66496 /*
66497 * Show a '%p' thing. A kernel extension is that the '%p' is followed
66498 @@ -813,6 +820,8 @@ int kptr_restrict __read_mostly;
66499 * - 'S' For symbolic direct pointers with offset
66500 * - 's' For symbolic direct pointers without offset
66501 * - 'B' For backtraced symbolic direct pointers with offset
66502 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
66503 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
66504 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
66505 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
66506 * - 'M' For a 6-byte MAC address, it prints the address in the
66507 @@ -857,12 +866,12 @@ char *pointer(const char *fmt, char *buf
66508 {
66509 if (!ptr && *fmt != 'K') {
66510 /*
66511 - * Print (null) with the same width as a pointer so it makes
66512 + * Print (nil) with the same width as a pointer so it makes
66513 * tabular output look nice.
66514 */
66515 if (spec.field_width == -1)
66516 spec.field_width = 2 * sizeof(void *);
66517 - return string(buf, end, "(null)", spec);
66518 + return string(buf, end, "(nil)", spec);
66519 }
66520
66521 switch (*fmt) {
66522 @@ -872,6 +881,13 @@ char *pointer(const char *fmt, char *buf
66523 /* Fallthrough */
66524 case 'S':
66525 case 's':
66526 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66527 + break;
66528 +#else
66529 + return symbol_string(buf, end, ptr, spec, *fmt);
66530 +#endif
66531 + case 'A':
66532 + case 'a':
66533 case 'B':
66534 return symbol_string(buf, end, ptr, spec, *fmt);
66535 case 'R':
66536 @@ -1631,11 +1647,11 @@ int bstr_printf(char *buf, size_t size,
66537 typeof(type) value; \
66538 if (sizeof(type) == 8) { \
66539 args = PTR_ALIGN(args, sizeof(u32)); \
66540 - *(u32 *)&value = *(u32 *)args; \
66541 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
66542 + *(u32 *)&value = *(const u32 *)args; \
66543 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
66544 } else { \
66545 args = PTR_ALIGN(args, sizeof(type)); \
66546 - value = *(typeof(type) *)args; \
66547 + value = *(const typeof(type) *)args; \
66548 } \
66549 args += sizeof(type); \
66550 value; \
66551 @@ -1698,7 +1714,7 @@ int bstr_printf(char *buf, size_t size,
66552 case FORMAT_TYPE_STR: {
66553 const char *str_arg = args;
66554 args += strlen(str_arg) + 1;
66555 - str = string(str, end, (char *)str_arg, spec);
66556 + str = string(str, end, str_arg, spec);
66557 break;
66558 }
66559
66560 diff -urNp linux-3.0.7/localversion-grsec linux-3.0.7/localversion-grsec
66561 --- linux-3.0.7/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
66562 +++ linux-3.0.7/localversion-grsec 2011-08-23 21:48:14.000000000 -0400
66563 @@ -0,0 +1 @@
66564 +-grsec
66565 diff -urNp linux-3.0.7/Makefile linux-3.0.7/Makefile
66566 --- linux-3.0.7/Makefile 2011-10-17 23:17:08.000000000 -0400
66567 +++ linux-3.0.7/Makefile 2011-10-17 23:17:19.000000000 -0400
66568 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
66569
66570 HOSTCC = gcc
66571 HOSTCXX = g++
66572 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
66573 -HOSTCXXFLAGS = -O2
66574 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
66575 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
66576 +HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
66577
66578 # Decide whether to build built-in, modular, or both.
66579 # Normally, just do built-in.
66580 @@ -365,10 +366,12 @@ LINUXINCLUDE := -I$(srctree)/arch/$(h
66581 KBUILD_CPPFLAGS := -D__KERNEL__
66582
66583 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
66584 + -W -Wno-unused-parameter -Wno-missing-field-initializers \
66585 -fno-strict-aliasing -fno-common \
66586 -Werror-implicit-function-declaration \
66587 -Wno-format-security \
66588 -fno-delete-null-pointer-checks
66589 +KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
66590 KBUILD_AFLAGS_KERNEL :=
66591 KBUILD_CFLAGS_KERNEL :=
66592 KBUILD_AFLAGS := -D__ASSEMBLY__
66593 @@ -407,8 +410,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
66594 # Rules shared between *config targets and build targets
66595
66596 # Basic helpers built in scripts/
66597 -PHONY += scripts_basic
66598 -scripts_basic:
66599 +PHONY += scripts_basic gcc-plugins
66600 +scripts_basic: gcc-plugins
66601 $(Q)$(MAKE) $(build)=scripts/basic
66602 $(Q)rm -f .tmp_quiet_recordmcount
66603
66604 @@ -564,6 +567,36 @@ else
66605 KBUILD_CFLAGS += -O2
66606 endif
66607
66608 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
66609 +CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
66610 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
66611 +STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
66612 +endif
66613 +ifdef CONFIG_KALLOCSTAT_PLUGIN
66614 +KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
66615 +endif
66616 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
66617 +KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
66618 +endif
66619 +ifdef CONFIG_CHECKER_PLUGIN
66620 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
66621 +CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
66622 +endif
66623 +endif
66624 +GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
66625 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
66626 +gcc-plugins:
66627 + $(Q)$(MAKE) $(build)=tools/gcc
66628 +else
66629 +gcc-plugins:
66630 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
66631 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
66632 +else
66633 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
66634 +endif
66635 + $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
66636 +endif
66637 +
66638 include $(srctree)/arch/$(SRCARCH)/Makefile
66639
66640 ifneq ($(CONFIG_FRAME_WARN),0)
66641 @@ -708,7 +741,7 @@ export mod_strip_cmd
66642
66643
66644 ifeq ($(KBUILD_EXTMOD),)
66645 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
66646 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
66647
66648 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
66649 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
66650 @@ -932,6 +965,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-mai
66651
66652 # The actual objects are generated when descending,
66653 # make sure no implicit rule kicks in
66654 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
66655 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
66656
66657 # Handle descending into subdirectories listed in $(vmlinux-dirs)
66658 @@ -941,7 +975,7 @@ $(sort $(vmlinux-init) $(vmlinux-main))
66659 # Error messages still appears in the original language
66660
66661 PHONY += $(vmlinux-dirs)
66662 -$(vmlinux-dirs): prepare scripts
66663 +$(vmlinux-dirs): gcc-plugins prepare scripts
66664 $(Q)$(MAKE) $(build)=$@
66665
66666 # Store (new) KERNELRELASE string in include/config/kernel.release
66667 @@ -986,6 +1020,7 @@ prepare0: archprepare FORCE
66668 $(Q)$(MAKE) $(build)=. missing-syscalls
66669
66670 # All the preparing..
66671 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
66672 prepare: prepare0
66673
66674 # Generate some files
66675 @@ -1087,6 +1122,7 @@ all: modules
66676 # using awk while concatenating to the final file.
66677
66678 PHONY += modules
66679 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
66680 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
66681 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
66682 @$(kecho) ' Building modules, stage 2.';
66683 @@ -1102,7 +1138,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modu
66684
66685 # Target to prepare building external modules
66686 PHONY += modules_prepare
66687 -modules_prepare: prepare scripts
66688 +modules_prepare: gcc-plugins prepare scripts
66689
66690 # Target to install modules
66691 PHONY += modules_install
66692 @@ -1198,7 +1234,7 @@ distclean: mrproper
66693 @find $(srctree) $(RCS_FIND_IGNORE) \
66694 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
66695 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
66696 - -o -name '.*.rej' -o -size 0 \
66697 + -o -name '.*.rej' -o -name '*.so' -o -size 0 \
66698 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
66699 -type f -print | xargs rm -f
66700
66701 @@ -1359,6 +1395,7 @@ PHONY += $(module-dirs) modules
66702 $(module-dirs): crmodverdir $(objtree)/Module.symvers
66703 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
66704
66705 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
66706 modules: $(module-dirs)
66707 @$(kecho) ' Building modules, stage 2.';
66708 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
66709 @@ -1485,17 +1522,19 @@ else
66710 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
66711 endif
66712
66713 -%.s: %.c prepare scripts FORCE
66714 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
66715 +%.s: %.c gcc-plugins prepare scripts FORCE
66716 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
66717 %.i: %.c prepare scripts FORCE
66718 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
66719 -%.o: %.c prepare scripts FORCE
66720 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
66721 +%.o: %.c gcc-plugins prepare scripts FORCE
66722 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
66723 %.lst: %.c prepare scripts FORCE
66724 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
66725 -%.s: %.S prepare scripts FORCE
66726 +%.s: %.S gcc-plugins prepare scripts FORCE
66727 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
66728 -%.o: %.S prepare scripts FORCE
66729 +%.o: %.S gcc-plugins prepare scripts FORCE
66730 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
66731 %.symtypes: %.c prepare scripts FORCE
66732 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
66733 @@ -1505,11 +1544,13 @@ endif
66734 $(cmd_crmodverdir)
66735 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
66736 $(build)=$(build-dir)
66737 -%/: prepare scripts FORCE
66738 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
66739 +%/: gcc-plugins prepare scripts FORCE
66740 $(cmd_crmodverdir)
66741 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
66742 $(build)=$(build-dir)
66743 -%.ko: prepare scripts FORCE
66744 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
66745 +%.ko: gcc-plugins prepare scripts FORCE
66746 $(cmd_crmodverdir)
66747 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
66748 $(build)=$(build-dir) $(@:.ko=.o)
66749 diff -urNp linux-3.0.7/mm/filemap.c linux-3.0.7/mm/filemap.c
66750 --- linux-3.0.7/mm/filemap.c 2011-07-21 22:17:23.000000000 -0400
66751 +++ linux-3.0.7/mm/filemap.c 2011-08-23 21:48:14.000000000 -0400
66752 @@ -1763,7 +1763,7 @@ int generic_file_mmap(struct file * file
66753 struct address_space *mapping = file->f_mapping;
66754
66755 if (!mapping->a_ops->readpage)
66756 - return -ENOEXEC;
66757 + return -ENODEV;
66758 file_accessed(file);
66759 vma->vm_ops = &generic_file_vm_ops;
66760 vma->vm_flags |= VM_CAN_NONLINEAR;
66761 @@ -2169,6 +2169,7 @@ inline int generic_write_checks(struct f
66762 *pos = i_size_read(inode);
66763
66764 if (limit != RLIM_INFINITY) {
66765 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
66766 if (*pos >= limit) {
66767 send_sig(SIGXFSZ, current, 0);
66768 return -EFBIG;
66769 diff -urNp linux-3.0.7/mm/fremap.c linux-3.0.7/mm/fremap.c
66770 --- linux-3.0.7/mm/fremap.c 2011-07-21 22:17:23.000000000 -0400
66771 +++ linux-3.0.7/mm/fremap.c 2011-08-23 21:47:56.000000000 -0400
66772 @@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
66773 retry:
66774 vma = find_vma(mm, start);
66775
66776 +#ifdef CONFIG_PAX_SEGMEXEC
66777 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
66778 + goto out;
66779 +#endif
66780 +
66781 /*
66782 * Make sure the vma is shared, that it supports prefaulting,
66783 * and that the remapped range is valid and fully within
66784 diff -urNp linux-3.0.7/mm/highmem.c linux-3.0.7/mm/highmem.c
66785 --- linux-3.0.7/mm/highmem.c 2011-07-21 22:17:23.000000000 -0400
66786 +++ linux-3.0.7/mm/highmem.c 2011-08-23 21:47:56.000000000 -0400
66787 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
66788 * So no dangers, even with speculative execution.
66789 */
66790 page = pte_page(pkmap_page_table[i]);
66791 + pax_open_kernel();
66792 pte_clear(&init_mm, (unsigned long)page_address(page),
66793 &pkmap_page_table[i]);
66794 -
66795 + pax_close_kernel();
66796 set_page_address(page, NULL);
66797 need_flush = 1;
66798 }
66799 @@ -186,9 +187,11 @@ start:
66800 }
66801 }
66802 vaddr = PKMAP_ADDR(last_pkmap_nr);
66803 +
66804 + pax_open_kernel();
66805 set_pte_at(&init_mm, vaddr,
66806 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
66807 -
66808 + pax_close_kernel();
66809 pkmap_count[last_pkmap_nr] = 1;
66810 set_page_address(page, (void *)vaddr);
66811
66812 diff -urNp linux-3.0.7/mm/huge_memory.c linux-3.0.7/mm/huge_memory.c
66813 --- linux-3.0.7/mm/huge_memory.c 2011-07-21 22:17:23.000000000 -0400
66814 +++ linux-3.0.7/mm/huge_memory.c 2011-08-23 21:47:56.000000000 -0400
66815 @@ -702,7 +702,7 @@ out:
66816 * run pte_offset_map on the pmd, if an huge pmd could
66817 * materialize from under us from a different thread.
66818 */
66819 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
66820 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
66821 return VM_FAULT_OOM;
66822 /* if an huge pmd materialized from under us just retry later */
66823 if (unlikely(pmd_trans_huge(*pmd)))
66824 diff -urNp linux-3.0.7/mm/hugetlb.c linux-3.0.7/mm/hugetlb.c
66825 --- linux-3.0.7/mm/hugetlb.c 2011-07-21 22:17:23.000000000 -0400
66826 +++ linux-3.0.7/mm/hugetlb.c 2011-08-23 21:47:56.000000000 -0400
66827 @@ -2339,6 +2339,27 @@ static int unmap_ref_private(struct mm_s
66828 return 1;
66829 }
66830
66831 +#ifdef CONFIG_PAX_SEGMEXEC
66832 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
66833 +{
66834 + struct mm_struct *mm = vma->vm_mm;
66835 + struct vm_area_struct *vma_m;
66836 + unsigned long address_m;
66837 + pte_t *ptep_m;
66838 +
66839 + vma_m = pax_find_mirror_vma(vma);
66840 + if (!vma_m)
66841 + return;
66842 +
66843 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
66844 + address_m = address + SEGMEXEC_TASK_SIZE;
66845 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
66846 + get_page(page_m);
66847 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
66848 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
66849 +}
66850 +#endif
66851 +
66852 /*
66853 * Hugetlb_cow() should be called with page lock of the original hugepage held.
66854 */
66855 @@ -2440,6 +2461,11 @@ retry_avoidcopy:
66856 make_huge_pte(vma, new_page, 1));
66857 page_remove_rmap(old_page);
66858 hugepage_add_new_anon_rmap(new_page, vma, address);
66859 +
66860 +#ifdef CONFIG_PAX_SEGMEXEC
66861 + pax_mirror_huge_pte(vma, address, new_page);
66862 +#endif
66863 +
66864 /* Make the old page be freed below */
66865 new_page = old_page;
66866 mmu_notifier_invalidate_range_end(mm,
66867 @@ -2591,6 +2617,10 @@ retry:
66868 && (vma->vm_flags & VM_SHARED)));
66869 set_huge_pte_at(mm, address, ptep, new_pte);
66870
66871 +#ifdef CONFIG_PAX_SEGMEXEC
66872 + pax_mirror_huge_pte(vma, address, page);
66873 +#endif
66874 +
66875 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
66876 /* Optimization, do the COW without a second fault */
66877 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
66878 @@ -2620,6 +2650,10 @@ int hugetlb_fault(struct mm_struct *mm,
66879 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
66880 struct hstate *h = hstate_vma(vma);
66881
66882 +#ifdef CONFIG_PAX_SEGMEXEC
66883 + struct vm_area_struct *vma_m;
66884 +#endif
66885 +
66886 ptep = huge_pte_offset(mm, address);
66887 if (ptep) {
66888 entry = huge_ptep_get(ptep);
66889 @@ -2631,6 +2665,26 @@ int hugetlb_fault(struct mm_struct *mm,
66890 VM_FAULT_SET_HINDEX(h - hstates);
66891 }
66892
66893 +#ifdef CONFIG_PAX_SEGMEXEC
66894 + vma_m = pax_find_mirror_vma(vma);
66895 + if (vma_m) {
66896 + unsigned long address_m;
66897 +
66898 + if (vma->vm_start > vma_m->vm_start) {
66899 + address_m = address;
66900 + address -= SEGMEXEC_TASK_SIZE;
66901 + vma = vma_m;
66902 + h = hstate_vma(vma);
66903 + } else
66904 + address_m = address + SEGMEXEC_TASK_SIZE;
66905 +
66906 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
66907 + return VM_FAULT_OOM;
66908 + address_m &= HPAGE_MASK;
66909 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
66910 + }
66911 +#endif
66912 +
66913 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
66914 if (!ptep)
66915 return VM_FAULT_OOM;
66916 diff -urNp linux-3.0.7/mm/internal.h linux-3.0.7/mm/internal.h
66917 --- linux-3.0.7/mm/internal.h 2011-07-21 22:17:23.000000000 -0400
66918 +++ linux-3.0.7/mm/internal.h 2011-08-23 21:47:56.000000000 -0400
66919 @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
66920 * in mm/page_alloc.c
66921 */
66922 extern void __free_pages_bootmem(struct page *page, unsigned int order);
66923 +extern void free_compound_page(struct page *page);
66924 extern void prep_compound_page(struct page *page, unsigned long order);
66925 #ifdef CONFIG_MEMORY_FAILURE
66926 extern bool is_free_buddy_page(struct page *page);
66927 diff -urNp linux-3.0.7/mm/Kconfig linux-3.0.7/mm/Kconfig
66928 --- linux-3.0.7/mm/Kconfig 2011-07-21 22:17:23.000000000 -0400
66929 +++ linux-3.0.7/mm/Kconfig 2011-08-23 21:48:14.000000000 -0400
66930 @@ -240,7 +240,7 @@ config KSM
66931 config DEFAULT_MMAP_MIN_ADDR
66932 int "Low address space to protect from user allocation"
66933 depends on MMU
66934 - default 4096
66935 + default 65536
66936 help
66937 This is the portion of low virtual memory which should be protected
66938 from userspace allocation. Keeping a user from writing to low pages
66939 diff -urNp linux-3.0.7/mm/kmemleak.c linux-3.0.7/mm/kmemleak.c
66940 --- linux-3.0.7/mm/kmemleak.c 2011-07-21 22:17:23.000000000 -0400
66941 +++ linux-3.0.7/mm/kmemleak.c 2011-08-23 21:48:14.000000000 -0400
66942 @@ -357,7 +357,7 @@ static void print_unreferenced(struct se
66943
66944 for (i = 0; i < object->trace_len; i++) {
66945 void *ptr = (void *)object->trace[i];
66946 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
66947 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
66948 }
66949 }
66950
66951 diff -urNp linux-3.0.7/mm/maccess.c linux-3.0.7/mm/maccess.c
66952 --- linux-3.0.7/mm/maccess.c 2011-07-21 22:17:23.000000000 -0400
66953 +++ linux-3.0.7/mm/maccess.c 2011-10-06 04:17:55.000000000 -0400
66954 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, cons
66955 set_fs(KERNEL_DS);
66956 pagefault_disable();
66957 ret = __copy_from_user_inatomic(dst,
66958 - (__force const void __user *)src, size);
66959 + (const void __force_user *)src, size);
66960 pagefault_enable();
66961 set_fs(old_fs);
66962
66963 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, con
66964
66965 set_fs(KERNEL_DS);
66966 pagefault_disable();
66967 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
66968 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
66969 pagefault_enable();
66970 set_fs(old_fs);
66971
66972 diff -urNp linux-3.0.7/mm/madvise.c linux-3.0.7/mm/madvise.c
66973 --- linux-3.0.7/mm/madvise.c 2011-07-21 22:17:23.000000000 -0400
66974 +++ linux-3.0.7/mm/madvise.c 2011-08-23 21:47:56.000000000 -0400
66975 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
66976 pgoff_t pgoff;
66977 unsigned long new_flags = vma->vm_flags;
66978
66979 +#ifdef CONFIG_PAX_SEGMEXEC
66980 + struct vm_area_struct *vma_m;
66981 +#endif
66982 +
66983 switch (behavior) {
66984 case MADV_NORMAL:
66985 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
66986 @@ -110,6 +114,13 @@ success:
66987 /*
66988 * vm_flags is protected by the mmap_sem held in write mode.
66989 */
66990 +
66991 +#ifdef CONFIG_PAX_SEGMEXEC
66992 + vma_m = pax_find_mirror_vma(vma);
66993 + if (vma_m)
66994 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
66995 +#endif
66996 +
66997 vma->vm_flags = new_flags;
66998
66999 out:
67000 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
67001 struct vm_area_struct ** prev,
67002 unsigned long start, unsigned long end)
67003 {
67004 +
67005 +#ifdef CONFIG_PAX_SEGMEXEC
67006 + struct vm_area_struct *vma_m;
67007 +#endif
67008 +
67009 *prev = vma;
67010 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
67011 return -EINVAL;
67012 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
67013 zap_page_range(vma, start, end - start, &details);
67014 } else
67015 zap_page_range(vma, start, end - start, NULL);
67016 +
67017 +#ifdef CONFIG_PAX_SEGMEXEC
67018 + vma_m = pax_find_mirror_vma(vma);
67019 + if (vma_m) {
67020 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
67021 + struct zap_details details = {
67022 + .nonlinear_vma = vma_m,
67023 + .last_index = ULONG_MAX,
67024 + };
67025 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
67026 + } else
67027 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
67028 + }
67029 +#endif
67030 +
67031 return 0;
67032 }
67033
67034 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
67035 if (end < start)
67036 goto out;
67037
67038 +#ifdef CONFIG_PAX_SEGMEXEC
67039 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
67040 + if (end > SEGMEXEC_TASK_SIZE)
67041 + goto out;
67042 + } else
67043 +#endif
67044 +
67045 + if (end > TASK_SIZE)
67046 + goto out;
67047 +
67048 error = 0;
67049 if (end == start)
67050 goto out;
67051 diff -urNp linux-3.0.7/mm/memory.c linux-3.0.7/mm/memory.c
67052 --- linux-3.0.7/mm/memory.c 2011-09-02 18:11:21.000000000 -0400
67053 +++ linux-3.0.7/mm/memory.c 2011-08-23 21:47:56.000000000 -0400
67054 @@ -457,8 +457,12 @@ static inline void free_pmd_range(struct
67055 return;
67056
67057 pmd = pmd_offset(pud, start);
67058 +
67059 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
67060 pud_clear(pud);
67061 pmd_free_tlb(tlb, pmd, start);
67062 +#endif
67063 +
67064 }
67065
67066 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67067 @@ -489,9 +493,12 @@ static inline void free_pud_range(struct
67068 if (end - 1 > ceiling - 1)
67069 return;
67070
67071 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
67072 pud = pud_offset(pgd, start);
67073 pgd_clear(pgd);
67074 pud_free_tlb(tlb, pud, start);
67075 +#endif
67076 +
67077 }
67078
67079 /*
67080 @@ -1577,12 +1584,6 @@ no_page_table:
67081 return page;
67082 }
67083
67084 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
67085 -{
67086 - return stack_guard_page_start(vma, addr) ||
67087 - stack_guard_page_end(vma, addr+PAGE_SIZE);
67088 -}
67089 -
67090 /**
67091 * __get_user_pages() - pin user pages in memory
67092 * @tsk: task_struct of target task
67093 @@ -1655,10 +1656,10 @@ int __get_user_pages(struct task_struct
67094 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
67095 i = 0;
67096
67097 - do {
67098 + while (nr_pages) {
67099 struct vm_area_struct *vma;
67100
67101 - vma = find_extend_vma(mm, start);
67102 + vma = find_vma(mm, start);
67103 if (!vma && in_gate_area(mm, start)) {
67104 unsigned long pg = start & PAGE_MASK;
67105 pgd_t *pgd;
67106 @@ -1706,7 +1707,7 @@ int __get_user_pages(struct task_struct
67107 goto next_page;
67108 }
67109
67110 - if (!vma ||
67111 + if (!vma || start < vma->vm_start ||
67112 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
67113 !(vm_flags & vma->vm_flags))
67114 return i ? : -EFAULT;
67115 @@ -1733,11 +1734,6 @@ int __get_user_pages(struct task_struct
67116 int ret;
67117 unsigned int fault_flags = 0;
67118
67119 - /* For mlock, just skip the stack guard page. */
67120 - if (foll_flags & FOLL_MLOCK) {
67121 - if (stack_guard_page(vma, start))
67122 - goto next_page;
67123 - }
67124 if (foll_flags & FOLL_WRITE)
67125 fault_flags |= FAULT_FLAG_WRITE;
67126 if (nonblocking)
67127 @@ -1811,7 +1807,7 @@ next_page:
67128 start += PAGE_SIZE;
67129 nr_pages--;
67130 } while (nr_pages && start < vma->vm_end);
67131 - } while (nr_pages);
67132 + }
67133 return i;
67134 }
67135 EXPORT_SYMBOL(__get_user_pages);
67136 @@ -2018,6 +2014,10 @@ static int insert_page(struct vm_area_st
67137 page_add_file_rmap(page);
67138 set_pte_at(mm, addr, pte, mk_pte(page, prot));
67139
67140 +#ifdef CONFIG_PAX_SEGMEXEC
67141 + pax_mirror_file_pte(vma, addr, page, ptl);
67142 +#endif
67143 +
67144 retval = 0;
67145 pte_unmap_unlock(pte, ptl);
67146 return retval;
67147 @@ -2052,10 +2052,22 @@ out:
67148 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
67149 struct page *page)
67150 {
67151 +
67152 +#ifdef CONFIG_PAX_SEGMEXEC
67153 + struct vm_area_struct *vma_m;
67154 +#endif
67155 +
67156 if (addr < vma->vm_start || addr >= vma->vm_end)
67157 return -EFAULT;
67158 if (!page_count(page))
67159 return -EINVAL;
67160 +
67161 +#ifdef CONFIG_PAX_SEGMEXEC
67162 + vma_m = pax_find_mirror_vma(vma);
67163 + if (vma_m)
67164 + vma_m->vm_flags |= VM_INSERTPAGE;
67165 +#endif
67166 +
67167 vma->vm_flags |= VM_INSERTPAGE;
67168 return insert_page(vma, addr, page, vma->vm_page_prot);
67169 }
67170 @@ -2141,6 +2153,7 @@ int vm_insert_mixed(struct vm_area_struc
67171 unsigned long pfn)
67172 {
67173 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
67174 + BUG_ON(vma->vm_mirror);
67175
67176 if (addr < vma->vm_start || addr >= vma->vm_end)
67177 return -EFAULT;
67178 @@ -2456,6 +2469,186 @@ static inline void cow_user_page(struct
67179 copy_user_highpage(dst, src, va, vma);
67180 }
67181
67182 +#ifdef CONFIG_PAX_SEGMEXEC
67183 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
67184 +{
67185 + struct mm_struct *mm = vma->vm_mm;
67186 + spinlock_t *ptl;
67187 + pte_t *pte, entry;
67188 +
67189 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
67190 + entry = *pte;
67191 + if (!pte_present(entry)) {
67192 + if (!pte_none(entry)) {
67193 + BUG_ON(pte_file(entry));
67194 + free_swap_and_cache(pte_to_swp_entry(entry));
67195 + pte_clear_not_present_full(mm, address, pte, 0);
67196 + }
67197 + } else {
67198 + struct page *page;
67199 +
67200 + flush_cache_page(vma, address, pte_pfn(entry));
67201 + entry = ptep_clear_flush(vma, address, pte);
67202 + BUG_ON(pte_dirty(entry));
67203 + page = vm_normal_page(vma, address, entry);
67204 + if (page) {
67205 + update_hiwater_rss(mm);
67206 + if (PageAnon(page))
67207 + dec_mm_counter_fast(mm, MM_ANONPAGES);
67208 + else
67209 + dec_mm_counter_fast(mm, MM_FILEPAGES);
67210 + page_remove_rmap(page);
67211 + page_cache_release(page);
67212 + }
67213 + }
67214 + pte_unmap_unlock(pte, ptl);
67215 +}
67216 +
67217 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
67218 + *
67219 + * the ptl of the lower mapped page is held on entry and is not released on exit
67220 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
67221 + */
67222 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67223 +{
67224 + struct mm_struct *mm = vma->vm_mm;
67225 + unsigned long address_m;
67226 + spinlock_t *ptl_m;
67227 + struct vm_area_struct *vma_m;
67228 + pmd_t *pmd_m;
67229 + pte_t *pte_m, entry_m;
67230 +
67231 + BUG_ON(!page_m || !PageAnon(page_m));
67232 +
67233 + vma_m = pax_find_mirror_vma(vma);
67234 + if (!vma_m)
67235 + return;
67236 +
67237 + BUG_ON(!PageLocked(page_m));
67238 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67239 + address_m = address + SEGMEXEC_TASK_SIZE;
67240 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67241 + pte_m = pte_offset_map(pmd_m, address_m);
67242 + ptl_m = pte_lockptr(mm, pmd_m);
67243 + if (ptl != ptl_m) {
67244 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67245 + if (!pte_none(*pte_m))
67246 + goto out;
67247 + }
67248 +
67249 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67250 + page_cache_get(page_m);
67251 + page_add_anon_rmap(page_m, vma_m, address_m);
67252 + inc_mm_counter_fast(mm, MM_ANONPAGES);
67253 + set_pte_at(mm, address_m, pte_m, entry_m);
67254 + update_mmu_cache(vma_m, address_m, entry_m);
67255 +out:
67256 + if (ptl != ptl_m)
67257 + spin_unlock(ptl_m);
67258 + pte_unmap(pte_m);
67259 + unlock_page(page_m);
67260 +}
67261 +
67262 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67263 +{
67264 + struct mm_struct *mm = vma->vm_mm;
67265 + unsigned long address_m;
67266 + spinlock_t *ptl_m;
67267 + struct vm_area_struct *vma_m;
67268 + pmd_t *pmd_m;
67269 + pte_t *pte_m, entry_m;
67270 +
67271 + BUG_ON(!page_m || PageAnon(page_m));
67272 +
67273 + vma_m = pax_find_mirror_vma(vma);
67274 + if (!vma_m)
67275 + return;
67276 +
67277 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67278 + address_m = address + SEGMEXEC_TASK_SIZE;
67279 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67280 + pte_m = pte_offset_map(pmd_m, address_m);
67281 + ptl_m = pte_lockptr(mm, pmd_m);
67282 + if (ptl != ptl_m) {
67283 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67284 + if (!pte_none(*pte_m))
67285 + goto out;
67286 + }
67287 +
67288 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67289 + page_cache_get(page_m);
67290 + page_add_file_rmap(page_m);
67291 + inc_mm_counter_fast(mm, MM_FILEPAGES);
67292 + set_pte_at(mm, address_m, pte_m, entry_m);
67293 + update_mmu_cache(vma_m, address_m, entry_m);
67294 +out:
67295 + if (ptl != ptl_m)
67296 + spin_unlock(ptl_m);
67297 + pte_unmap(pte_m);
67298 +}
67299 +
67300 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
67301 +{
67302 + struct mm_struct *mm = vma->vm_mm;
67303 + unsigned long address_m;
67304 + spinlock_t *ptl_m;
67305 + struct vm_area_struct *vma_m;
67306 + pmd_t *pmd_m;
67307 + pte_t *pte_m, entry_m;
67308 +
67309 + vma_m = pax_find_mirror_vma(vma);
67310 + if (!vma_m)
67311 + return;
67312 +
67313 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67314 + address_m = address + SEGMEXEC_TASK_SIZE;
67315 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67316 + pte_m = pte_offset_map(pmd_m, address_m);
67317 + ptl_m = pte_lockptr(mm, pmd_m);
67318 + if (ptl != ptl_m) {
67319 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67320 + if (!pte_none(*pte_m))
67321 + goto out;
67322 + }
67323 +
67324 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
67325 + set_pte_at(mm, address_m, pte_m, entry_m);
67326 +out:
67327 + if (ptl != ptl_m)
67328 + spin_unlock(ptl_m);
67329 + pte_unmap(pte_m);
67330 +}
67331 +
67332 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
67333 +{
67334 + struct page *page_m;
67335 + pte_t entry;
67336 +
67337 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
67338 + goto out;
67339 +
67340 + entry = *pte;
67341 + page_m = vm_normal_page(vma, address, entry);
67342 + if (!page_m)
67343 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
67344 + else if (PageAnon(page_m)) {
67345 + if (pax_find_mirror_vma(vma)) {
67346 + pte_unmap_unlock(pte, ptl);
67347 + lock_page(page_m);
67348 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
67349 + if (pte_same(entry, *pte))
67350 + pax_mirror_anon_pte(vma, address, page_m, ptl);
67351 + else
67352 + unlock_page(page_m);
67353 + }
67354 + } else
67355 + pax_mirror_file_pte(vma, address, page_m, ptl);
67356 +
67357 +out:
67358 + pte_unmap_unlock(pte, ptl);
67359 +}
67360 +#endif
67361 +
67362 /*
67363 * This routine handles present pages, when users try to write
67364 * to a shared page. It is done by copying the page to a new address
67365 @@ -2667,6 +2860,12 @@ gotten:
67366 */
67367 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67368 if (likely(pte_same(*page_table, orig_pte))) {
67369 +
67370 +#ifdef CONFIG_PAX_SEGMEXEC
67371 + if (pax_find_mirror_vma(vma))
67372 + BUG_ON(!trylock_page(new_page));
67373 +#endif
67374 +
67375 if (old_page) {
67376 if (!PageAnon(old_page)) {
67377 dec_mm_counter_fast(mm, MM_FILEPAGES);
67378 @@ -2718,6 +2917,10 @@ gotten:
67379 page_remove_rmap(old_page);
67380 }
67381
67382 +#ifdef CONFIG_PAX_SEGMEXEC
67383 + pax_mirror_anon_pte(vma, address, new_page, ptl);
67384 +#endif
67385 +
67386 /* Free the old page.. */
67387 new_page = old_page;
67388 ret |= VM_FAULT_WRITE;
67389 @@ -2997,6 +3200,11 @@ static int do_swap_page(struct mm_struct
67390 swap_free(entry);
67391 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
67392 try_to_free_swap(page);
67393 +
67394 +#ifdef CONFIG_PAX_SEGMEXEC
67395 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
67396 +#endif
67397 +
67398 unlock_page(page);
67399 if (swapcache) {
67400 /*
67401 @@ -3020,6 +3228,11 @@ static int do_swap_page(struct mm_struct
67402
67403 /* No need to invalidate - it was non-present before */
67404 update_mmu_cache(vma, address, page_table);
67405 +
67406 +#ifdef CONFIG_PAX_SEGMEXEC
67407 + pax_mirror_anon_pte(vma, address, page, ptl);
67408 +#endif
67409 +
67410 unlock:
67411 pte_unmap_unlock(page_table, ptl);
67412 out:
67413 @@ -3039,40 +3252,6 @@ out_release:
67414 }
67415
67416 /*
67417 - * This is like a special single-page "expand_{down|up}wards()",
67418 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
67419 - * doesn't hit another vma.
67420 - */
67421 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
67422 -{
67423 - address &= PAGE_MASK;
67424 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
67425 - struct vm_area_struct *prev = vma->vm_prev;
67426 -
67427 - /*
67428 - * Is there a mapping abutting this one below?
67429 - *
67430 - * That's only ok if it's the same stack mapping
67431 - * that has gotten split..
67432 - */
67433 - if (prev && prev->vm_end == address)
67434 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
67435 -
67436 - expand_downwards(vma, address - PAGE_SIZE);
67437 - }
67438 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
67439 - struct vm_area_struct *next = vma->vm_next;
67440 -
67441 - /* As VM_GROWSDOWN but s/below/above/ */
67442 - if (next && next->vm_start == address + PAGE_SIZE)
67443 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
67444 -
67445 - expand_upwards(vma, address + PAGE_SIZE);
67446 - }
67447 - return 0;
67448 -}
67449 -
67450 -/*
67451 * We enter with non-exclusive mmap_sem (to exclude vma changes,
67452 * but allow concurrent faults), and pte mapped but not yet locked.
67453 * We return with mmap_sem still held, but pte unmapped and unlocked.
67454 @@ -3081,27 +3260,23 @@ static int do_anonymous_page(struct mm_s
67455 unsigned long address, pte_t *page_table, pmd_t *pmd,
67456 unsigned int flags)
67457 {
67458 - struct page *page;
67459 + struct page *page = NULL;
67460 spinlock_t *ptl;
67461 pte_t entry;
67462
67463 - pte_unmap(page_table);
67464 -
67465 - /* Check if we need to add a guard page to the stack */
67466 - if (check_stack_guard_page(vma, address) < 0)
67467 - return VM_FAULT_SIGBUS;
67468 -
67469 - /* Use the zero-page for reads */
67470 if (!(flags & FAULT_FLAG_WRITE)) {
67471 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
67472 vma->vm_page_prot));
67473 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67474 + ptl = pte_lockptr(mm, pmd);
67475 + spin_lock(ptl);
67476 if (!pte_none(*page_table))
67477 goto unlock;
67478 goto setpte;
67479 }
67480
67481 /* Allocate our own private page. */
67482 + pte_unmap(page_table);
67483 +
67484 if (unlikely(anon_vma_prepare(vma)))
67485 goto oom;
67486 page = alloc_zeroed_user_highpage_movable(vma, address);
67487 @@ -3120,6 +3295,11 @@ static int do_anonymous_page(struct mm_s
67488 if (!pte_none(*page_table))
67489 goto release;
67490
67491 +#ifdef CONFIG_PAX_SEGMEXEC
67492 + if (pax_find_mirror_vma(vma))
67493 + BUG_ON(!trylock_page(page));
67494 +#endif
67495 +
67496 inc_mm_counter_fast(mm, MM_ANONPAGES);
67497 page_add_new_anon_rmap(page, vma, address);
67498 setpte:
67499 @@ -3127,6 +3307,12 @@ setpte:
67500
67501 /* No need to invalidate - it was non-present before */
67502 update_mmu_cache(vma, address, page_table);
67503 +
67504 +#ifdef CONFIG_PAX_SEGMEXEC
67505 + if (page)
67506 + pax_mirror_anon_pte(vma, address, page, ptl);
67507 +#endif
67508 +
67509 unlock:
67510 pte_unmap_unlock(page_table, ptl);
67511 return 0;
67512 @@ -3264,6 +3450,12 @@ static int __do_fault(struct mm_struct *
67513 */
67514 /* Only go through if we didn't race with anybody else... */
67515 if (likely(pte_same(*page_table, orig_pte))) {
67516 +
67517 +#ifdef CONFIG_PAX_SEGMEXEC
67518 + if (anon && pax_find_mirror_vma(vma))
67519 + BUG_ON(!trylock_page(page));
67520 +#endif
67521 +
67522 flush_icache_page(vma, page);
67523 entry = mk_pte(page, vma->vm_page_prot);
67524 if (flags & FAULT_FLAG_WRITE)
67525 @@ -3283,6 +3475,14 @@ static int __do_fault(struct mm_struct *
67526
67527 /* no need to invalidate: a not-present page won't be cached */
67528 update_mmu_cache(vma, address, page_table);
67529 +
67530 +#ifdef CONFIG_PAX_SEGMEXEC
67531 + if (anon)
67532 + pax_mirror_anon_pte(vma, address, page, ptl);
67533 + else
67534 + pax_mirror_file_pte(vma, address, page, ptl);
67535 +#endif
67536 +
67537 } else {
67538 if (charged)
67539 mem_cgroup_uncharge_page(page);
67540 @@ -3430,6 +3630,12 @@ int handle_pte_fault(struct mm_struct *m
67541 if (flags & FAULT_FLAG_WRITE)
67542 flush_tlb_fix_spurious_fault(vma, address);
67543 }
67544 +
67545 +#ifdef CONFIG_PAX_SEGMEXEC
67546 + pax_mirror_pte(vma, address, pte, pmd, ptl);
67547 + return 0;
67548 +#endif
67549 +
67550 unlock:
67551 pte_unmap_unlock(pte, ptl);
67552 return 0;
67553 @@ -3446,6 +3652,10 @@ int handle_mm_fault(struct mm_struct *mm
67554 pmd_t *pmd;
67555 pte_t *pte;
67556
67557 +#ifdef CONFIG_PAX_SEGMEXEC
67558 + struct vm_area_struct *vma_m;
67559 +#endif
67560 +
67561 __set_current_state(TASK_RUNNING);
67562
67563 count_vm_event(PGFAULT);
67564 @@ -3457,6 +3667,34 @@ int handle_mm_fault(struct mm_struct *mm
67565 if (unlikely(is_vm_hugetlb_page(vma)))
67566 return hugetlb_fault(mm, vma, address, flags);
67567
67568 +#ifdef CONFIG_PAX_SEGMEXEC
67569 + vma_m = pax_find_mirror_vma(vma);
67570 + if (vma_m) {
67571 + unsigned long address_m;
67572 + pgd_t *pgd_m;
67573 + pud_t *pud_m;
67574 + pmd_t *pmd_m;
67575 +
67576 + if (vma->vm_start > vma_m->vm_start) {
67577 + address_m = address;
67578 + address -= SEGMEXEC_TASK_SIZE;
67579 + vma = vma_m;
67580 + } else
67581 + address_m = address + SEGMEXEC_TASK_SIZE;
67582 +
67583 + pgd_m = pgd_offset(mm, address_m);
67584 + pud_m = pud_alloc(mm, pgd_m, address_m);
67585 + if (!pud_m)
67586 + return VM_FAULT_OOM;
67587 + pmd_m = pmd_alloc(mm, pud_m, address_m);
67588 + if (!pmd_m)
67589 + return VM_FAULT_OOM;
67590 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
67591 + return VM_FAULT_OOM;
67592 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
67593 + }
67594 +#endif
67595 +
67596 pgd = pgd_offset(mm, address);
67597 pud = pud_alloc(mm, pgd, address);
67598 if (!pud)
67599 @@ -3486,7 +3724,7 @@ int handle_mm_fault(struct mm_struct *mm
67600 * run pte_offset_map on the pmd, if an huge pmd could
67601 * materialize from under us from a different thread.
67602 */
67603 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
67604 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
67605 return VM_FAULT_OOM;
67606 /* if an huge pmd materialized from under us just retry later */
67607 if (unlikely(pmd_trans_huge(*pmd)))
67608 @@ -3590,7 +3828,7 @@ static int __init gate_vma_init(void)
67609 gate_vma.vm_start = FIXADDR_USER_START;
67610 gate_vma.vm_end = FIXADDR_USER_END;
67611 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
67612 - gate_vma.vm_page_prot = __P101;
67613 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
67614 /*
67615 * Make sure the vDSO gets into every core dump.
67616 * Dumping its contents makes post-mortem fully interpretable later
67617 diff -urNp linux-3.0.7/mm/memory-failure.c linux-3.0.7/mm/memory-failure.c
67618 --- linux-3.0.7/mm/memory-failure.c 2011-07-21 22:17:23.000000000 -0400
67619 +++ linux-3.0.7/mm/memory-failure.c 2011-10-06 04:17:55.000000000 -0400
67620 @@ -59,7 +59,7 @@ int sysctl_memory_failure_early_kill __r
67621
67622 int sysctl_memory_failure_recovery __read_mostly = 1;
67623
67624 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67625 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67626
67627 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
67628
67629 @@ -200,7 +200,7 @@ static int kill_proc_ao(struct task_stru
67630 si.si_signo = SIGBUS;
67631 si.si_errno = 0;
67632 si.si_code = BUS_MCEERR_AO;
67633 - si.si_addr = (void *)addr;
67634 + si.si_addr = (void __user *)addr;
67635 #ifdef __ARCH_SI_TRAPNO
67636 si.si_trapno = trapno;
67637 #endif
67638 @@ -1008,7 +1008,7 @@ int __memory_failure(unsigned long pfn,
67639 }
67640
67641 nr_pages = 1 << compound_trans_order(hpage);
67642 - atomic_long_add(nr_pages, &mce_bad_pages);
67643 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
67644
67645 /*
67646 * We need/can do nothing about count=0 pages.
67647 @@ -1038,7 +1038,7 @@ int __memory_failure(unsigned long pfn,
67648 if (!PageHWPoison(hpage)
67649 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
67650 || (p != hpage && TestSetPageHWPoison(hpage))) {
67651 - atomic_long_sub(nr_pages, &mce_bad_pages);
67652 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67653 return 0;
67654 }
67655 set_page_hwpoison_huge_page(hpage);
67656 @@ -1096,7 +1096,7 @@ int __memory_failure(unsigned long pfn,
67657 }
67658 if (hwpoison_filter(p)) {
67659 if (TestClearPageHWPoison(p))
67660 - atomic_long_sub(nr_pages, &mce_bad_pages);
67661 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67662 unlock_page(hpage);
67663 put_page(hpage);
67664 return 0;
67665 @@ -1222,7 +1222,7 @@ int unpoison_memory(unsigned long pfn)
67666 return 0;
67667 }
67668 if (TestClearPageHWPoison(p))
67669 - atomic_long_sub(nr_pages, &mce_bad_pages);
67670 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67671 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
67672 return 0;
67673 }
67674 @@ -1236,7 +1236,7 @@ int unpoison_memory(unsigned long pfn)
67675 */
67676 if (TestClearPageHWPoison(page)) {
67677 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
67678 - atomic_long_sub(nr_pages, &mce_bad_pages);
67679 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67680 freeit = 1;
67681 if (PageHuge(page))
67682 clear_page_hwpoison_huge_page(page);
67683 @@ -1349,7 +1349,7 @@ static int soft_offline_huge_page(struct
67684 }
67685 done:
67686 if (!PageHWPoison(hpage))
67687 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
67688 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
67689 set_page_hwpoison_huge_page(hpage);
67690 dequeue_hwpoisoned_huge_page(hpage);
67691 /* keep elevated page count for bad page */
67692 @@ -1480,7 +1480,7 @@ int soft_offline_page(struct page *page,
67693 return ret;
67694
67695 done:
67696 - atomic_long_add(1, &mce_bad_pages);
67697 + atomic_long_add_unchecked(1, &mce_bad_pages);
67698 SetPageHWPoison(page);
67699 /* keep elevated page count for bad page */
67700 return ret;
67701 diff -urNp linux-3.0.7/mm/mempolicy.c linux-3.0.7/mm/mempolicy.c
67702 --- linux-3.0.7/mm/mempolicy.c 2011-07-21 22:17:23.000000000 -0400
67703 +++ linux-3.0.7/mm/mempolicy.c 2011-08-23 21:48:14.000000000 -0400
67704 @@ -639,6 +639,10 @@ static int mbind_range(struct mm_struct
67705 unsigned long vmstart;
67706 unsigned long vmend;
67707
67708 +#ifdef CONFIG_PAX_SEGMEXEC
67709 + struct vm_area_struct *vma_m;
67710 +#endif
67711 +
67712 vma = find_vma_prev(mm, start, &prev);
67713 if (!vma || vma->vm_start > start)
67714 return -EFAULT;
67715 @@ -669,6 +673,16 @@ static int mbind_range(struct mm_struct
67716 err = policy_vma(vma, new_pol);
67717 if (err)
67718 goto out;
67719 +
67720 +#ifdef CONFIG_PAX_SEGMEXEC
67721 + vma_m = pax_find_mirror_vma(vma);
67722 + if (vma_m) {
67723 + err = policy_vma(vma_m, new_pol);
67724 + if (err)
67725 + goto out;
67726 + }
67727 +#endif
67728 +
67729 }
67730
67731 out:
67732 @@ -1102,6 +1116,17 @@ static long do_mbind(unsigned long start
67733
67734 if (end < start)
67735 return -EINVAL;
67736 +
67737 +#ifdef CONFIG_PAX_SEGMEXEC
67738 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
67739 + if (end > SEGMEXEC_TASK_SIZE)
67740 + return -EINVAL;
67741 + } else
67742 +#endif
67743 +
67744 + if (end > TASK_SIZE)
67745 + return -EINVAL;
67746 +
67747 if (end == start)
67748 return 0;
67749
67750 @@ -1320,6 +1345,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
67751 if (!mm)
67752 goto out;
67753
67754 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67755 + if (mm != current->mm &&
67756 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
67757 + err = -EPERM;
67758 + goto out;
67759 + }
67760 +#endif
67761 +
67762 /*
67763 * Check if this process has the right to modify the specified
67764 * process. The right exists if the process has administrative
67765 @@ -1329,8 +1362,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
67766 rcu_read_lock();
67767 tcred = __task_cred(task);
67768 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
67769 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
67770 - !capable(CAP_SYS_NICE)) {
67771 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
67772 rcu_read_unlock();
67773 err = -EPERM;
67774 goto out;
67775 diff -urNp linux-3.0.7/mm/migrate.c linux-3.0.7/mm/migrate.c
67776 --- linux-3.0.7/mm/migrate.c 2011-07-21 22:17:23.000000000 -0400
67777 +++ linux-3.0.7/mm/migrate.c 2011-08-23 21:48:14.000000000 -0400
67778 @@ -1124,6 +1124,8 @@ static int do_pages_move(struct mm_struc
67779 unsigned long chunk_start;
67780 int err;
67781
67782 + pax_track_stack();
67783 +
67784 task_nodes = cpuset_mems_allowed(task);
67785
67786 err = -ENOMEM;
67787 @@ -1308,6 +1310,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
67788 if (!mm)
67789 return -EINVAL;
67790
67791 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67792 + if (mm != current->mm &&
67793 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
67794 + err = -EPERM;
67795 + goto out;
67796 + }
67797 +#endif
67798 +
67799 /*
67800 * Check if this process has the right to modify the specified
67801 * process. The right exists if the process has administrative
67802 @@ -1317,8 +1327,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
67803 rcu_read_lock();
67804 tcred = __task_cred(task);
67805 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
67806 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
67807 - !capable(CAP_SYS_NICE)) {
67808 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
67809 rcu_read_unlock();
67810 err = -EPERM;
67811 goto out;
67812 diff -urNp linux-3.0.7/mm/mlock.c linux-3.0.7/mm/mlock.c
67813 --- linux-3.0.7/mm/mlock.c 2011-07-21 22:17:23.000000000 -0400
67814 +++ linux-3.0.7/mm/mlock.c 2011-08-23 21:48:14.000000000 -0400
67815 @@ -13,6 +13,7 @@
67816 #include <linux/pagemap.h>
67817 #include <linux/mempolicy.h>
67818 #include <linux/syscalls.h>
67819 +#include <linux/security.h>
67820 #include <linux/sched.h>
67821 #include <linux/module.h>
67822 #include <linux/rmap.h>
67823 @@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
67824 return -EINVAL;
67825 if (end == start)
67826 return 0;
67827 + if (end > TASK_SIZE)
67828 + return -EINVAL;
67829 +
67830 vma = find_vma_prev(current->mm, start, &prev);
67831 if (!vma || vma->vm_start > start)
67832 return -ENOMEM;
67833 @@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
67834 for (nstart = start ; ; ) {
67835 vm_flags_t newflags;
67836
67837 +#ifdef CONFIG_PAX_SEGMEXEC
67838 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
67839 + break;
67840 +#endif
67841 +
67842 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
67843
67844 newflags = vma->vm_flags | VM_LOCKED;
67845 @@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
67846 lock_limit >>= PAGE_SHIFT;
67847
67848 /* check against resource limits */
67849 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
67850 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
67851 error = do_mlock(start, len, 1);
67852 up_write(&current->mm->mmap_sem);
67853 @@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
67854 static int do_mlockall(int flags)
67855 {
67856 struct vm_area_struct * vma, * prev = NULL;
67857 - unsigned int def_flags = 0;
67858
67859 if (flags & MCL_FUTURE)
67860 - def_flags = VM_LOCKED;
67861 - current->mm->def_flags = def_flags;
67862 + current->mm->def_flags |= VM_LOCKED;
67863 + else
67864 + current->mm->def_flags &= ~VM_LOCKED;
67865 if (flags == MCL_FUTURE)
67866 goto out;
67867
67868 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
67869 vm_flags_t newflags;
67870
67871 +#ifdef CONFIG_PAX_SEGMEXEC
67872 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
67873 + break;
67874 +#endif
67875 +
67876 + BUG_ON(vma->vm_end > TASK_SIZE);
67877 newflags = vma->vm_flags | VM_LOCKED;
67878 if (!(flags & MCL_CURRENT))
67879 newflags &= ~VM_LOCKED;
67880 @@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
67881 lock_limit >>= PAGE_SHIFT;
67882
67883 ret = -ENOMEM;
67884 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
67885 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
67886 capable(CAP_IPC_LOCK))
67887 ret = do_mlockall(flags);
67888 diff -urNp linux-3.0.7/mm/mmap.c linux-3.0.7/mm/mmap.c
67889 --- linux-3.0.7/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
67890 +++ linux-3.0.7/mm/mmap.c 2011-08-23 21:48:14.000000000 -0400
67891 @@ -46,6 +46,16 @@
67892 #define arch_rebalance_pgtables(addr, len) (addr)
67893 #endif
67894
67895 +static inline void verify_mm_writelocked(struct mm_struct *mm)
67896 +{
67897 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
67898 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
67899 + up_read(&mm->mmap_sem);
67900 + BUG();
67901 + }
67902 +#endif
67903 +}
67904 +
67905 static void unmap_region(struct mm_struct *mm,
67906 struct vm_area_struct *vma, struct vm_area_struct *prev,
67907 unsigned long start, unsigned long end);
67908 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
67909 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
67910 *
67911 */
67912 -pgprot_t protection_map[16] = {
67913 +pgprot_t protection_map[16] __read_only = {
67914 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
67915 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
67916 };
67917
67918 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
67919 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
67920 {
67921 - return __pgprot(pgprot_val(protection_map[vm_flags &
67922 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
67923 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
67924 pgprot_val(arch_vm_get_page_prot(vm_flags)));
67925 +
67926 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
67927 + if (!(__supported_pte_mask & _PAGE_NX) &&
67928 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
67929 + (vm_flags & (VM_READ | VM_WRITE)))
67930 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
67931 +#endif
67932 +
67933 + return prot;
67934 }
67935 EXPORT_SYMBOL(vm_get_page_prot);
67936
67937 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
67938 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
67939 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
67940 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
67941 /*
67942 * Make sure vm_committed_as in one cacheline and not cacheline shared with
67943 * other variables. It can be updated by several CPUs frequently.
67944 @@ -236,6 +256,7 @@ static struct vm_area_struct *remove_vma
67945 struct vm_area_struct *next = vma->vm_next;
67946
67947 might_sleep();
67948 + BUG_ON(vma->vm_mirror);
67949 if (vma->vm_ops && vma->vm_ops->close)
67950 vma->vm_ops->close(vma);
67951 if (vma->vm_file) {
67952 @@ -280,6 +301,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
67953 * not page aligned -Ram Gupta
67954 */
67955 rlim = rlimit(RLIMIT_DATA);
67956 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
67957 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
67958 (mm->end_data - mm->start_data) > rlim)
67959 goto out;
67960 @@ -697,6 +719,12 @@ static int
67961 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
67962 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
67963 {
67964 +
67965 +#ifdef CONFIG_PAX_SEGMEXEC
67966 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
67967 + return 0;
67968 +#endif
67969 +
67970 if (is_mergeable_vma(vma, file, vm_flags) &&
67971 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
67972 if (vma->vm_pgoff == vm_pgoff)
67973 @@ -716,6 +744,12 @@ static int
67974 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
67975 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
67976 {
67977 +
67978 +#ifdef CONFIG_PAX_SEGMEXEC
67979 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
67980 + return 0;
67981 +#endif
67982 +
67983 if (is_mergeable_vma(vma, file, vm_flags) &&
67984 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
67985 pgoff_t vm_pglen;
67986 @@ -758,13 +792,20 @@ can_vma_merge_after(struct vm_area_struc
67987 struct vm_area_struct *vma_merge(struct mm_struct *mm,
67988 struct vm_area_struct *prev, unsigned long addr,
67989 unsigned long end, unsigned long vm_flags,
67990 - struct anon_vma *anon_vma, struct file *file,
67991 + struct anon_vma *anon_vma, struct file *file,
67992 pgoff_t pgoff, struct mempolicy *policy)
67993 {
67994 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
67995 struct vm_area_struct *area, *next;
67996 int err;
67997
67998 +#ifdef CONFIG_PAX_SEGMEXEC
67999 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
68000 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
68001 +
68002 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
68003 +#endif
68004 +
68005 /*
68006 * We later require that vma->vm_flags == vm_flags,
68007 * so this tests vma->vm_flags & VM_SPECIAL, too.
68008 @@ -780,6 +821,15 @@ struct vm_area_struct *vma_merge(struct
68009 if (next && next->vm_end == end) /* cases 6, 7, 8 */
68010 next = next->vm_next;
68011
68012 +#ifdef CONFIG_PAX_SEGMEXEC
68013 + if (prev)
68014 + prev_m = pax_find_mirror_vma(prev);
68015 + if (area)
68016 + area_m = pax_find_mirror_vma(area);
68017 + if (next)
68018 + next_m = pax_find_mirror_vma(next);
68019 +#endif
68020 +
68021 /*
68022 * Can it merge with the predecessor?
68023 */
68024 @@ -799,9 +849,24 @@ struct vm_area_struct *vma_merge(struct
68025 /* cases 1, 6 */
68026 err = vma_adjust(prev, prev->vm_start,
68027 next->vm_end, prev->vm_pgoff, NULL);
68028 - } else /* cases 2, 5, 7 */
68029 +
68030 +#ifdef CONFIG_PAX_SEGMEXEC
68031 + if (!err && prev_m)
68032 + err = vma_adjust(prev_m, prev_m->vm_start,
68033 + next_m->vm_end, prev_m->vm_pgoff, NULL);
68034 +#endif
68035 +
68036 + } else { /* cases 2, 5, 7 */
68037 err = vma_adjust(prev, prev->vm_start,
68038 end, prev->vm_pgoff, NULL);
68039 +
68040 +#ifdef CONFIG_PAX_SEGMEXEC
68041 + if (!err && prev_m)
68042 + err = vma_adjust(prev_m, prev_m->vm_start,
68043 + end_m, prev_m->vm_pgoff, NULL);
68044 +#endif
68045 +
68046 + }
68047 if (err)
68048 return NULL;
68049 khugepaged_enter_vma_merge(prev);
68050 @@ -815,12 +880,27 @@ struct vm_area_struct *vma_merge(struct
68051 mpol_equal(policy, vma_policy(next)) &&
68052 can_vma_merge_before(next, vm_flags,
68053 anon_vma, file, pgoff+pglen)) {
68054 - if (prev && addr < prev->vm_end) /* case 4 */
68055 + if (prev && addr < prev->vm_end) { /* case 4 */
68056 err = vma_adjust(prev, prev->vm_start,
68057 addr, prev->vm_pgoff, NULL);
68058 - else /* cases 3, 8 */
68059 +
68060 +#ifdef CONFIG_PAX_SEGMEXEC
68061 + if (!err && prev_m)
68062 + err = vma_adjust(prev_m, prev_m->vm_start,
68063 + addr_m, prev_m->vm_pgoff, NULL);
68064 +#endif
68065 +
68066 + } else { /* cases 3, 8 */
68067 err = vma_adjust(area, addr, next->vm_end,
68068 next->vm_pgoff - pglen, NULL);
68069 +
68070 +#ifdef CONFIG_PAX_SEGMEXEC
68071 + if (!err && area_m)
68072 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
68073 + next_m->vm_pgoff - pglen, NULL);
68074 +#endif
68075 +
68076 + }
68077 if (err)
68078 return NULL;
68079 khugepaged_enter_vma_merge(area);
68080 @@ -929,14 +1009,11 @@ none:
68081 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
68082 struct file *file, long pages)
68083 {
68084 - const unsigned long stack_flags
68085 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
68086 -
68087 if (file) {
68088 mm->shared_vm += pages;
68089 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
68090 mm->exec_vm += pages;
68091 - } else if (flags & stack_flags)
68092 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
68093 mm->stack_vm += pages;
68094 if (flags & (VM_RESERVED|VM_IO))
68095 mm->reserved_vm += pages;
68096 @@ -963,7 +1040,7 @@ unsigned long do_mmap_pgoff(struct file
68097 * (the exception is when the underlying filesystem is noexec
68098 * mounted, in which case we dont add PROT_EXEC.)
68099 */
68100 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
68101 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
68102 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
68103 prot |= PROT_EXEC;
68104
68105 @@ -989,7 +1066,7 @@ unsigned long do_mmap_pgoff(struct file
68106 /* Obtain the address to map to. we verify (or select) it and ensure
68107 * that it represents a valid section of the address space.
68108 */
68109 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
68110 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
68111 if (addr & ~PAGE_MASK)
68112 return addr;
68113
68114 @@ -1000,6 +1077,36 @@ unsigned long do_mmap_pgoff(struct file
68115 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
68116 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
68117
68118 +#ifdef CONFIG_PAX_MPROTECT
68119 + if (mm->pax_flags & MF_PAX_MPROTECT) {
68120 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
68121 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
68122 + gr_log_rwxmmap(file);
68123 +
68124 +#ifdef CONFIG_PAX_EMUPLT
68125 + vm_flags &= ~VM_EXEC;
68126 +#else
68127 + return -EPERM;
68128 +#endif
68129 +
68130 + }
68131 +
68132 + if (!(vm_flags & VM_EXEC))
68133 + vm_flags &= ~VM_MAYEXEC;
68134 +#else
68135 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
68136 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68137 +#endif
68138 + else
68139 + vm_flags &= ~VM_MAYWRITE;
68140 + }
68141 +#endif
68142 +
68143 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68144 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
68145 + vm_flags &= ~VM_PAGEEXEC;
68146 +#endif
68147 +
68148 if (flags & MAP_LOCKED)
68149 if (!can_do_mlock())
68150 return -EPERM;
68151 @@ -1011,6 +1118,7 @@ unsigned long do_mmap_pgoff(struct file
68152 locked += mm->locked_vm;
68153 lock_limit = rlimit(RLIMIT_MEMLOCK);
68154 lock_limit >>= PAGE_SHIFT;
68155 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68156 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68157 return -EAGAIN;
68158 }
68159 @@ -1081,6 +1189,9 @@ unsigned long do_mmap_pgoff(struct file
68160 if (error)
68161 return error;
68162
68163 + if (!gr_acl_handle_mmap(file, prot))
68164 + return -EACCES;
68165 +
68166 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
68167 }
68168 EXPORT_SYMBOL(do_mmap_pgoff);
68169 @@ -1161,7 +1272,7 @@ int vma_wants_writenotify(struct vm_area
68170 vm_flags_t vm_flags = vma->vm_flags;
68171
68172 /* If it was private or non-writable, the write bit is already clear */
68173 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
68174 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
68175 return 0;
68176
68177 /* The backer wishes to know when pages are first written to? */
68178 @@ -1210,14 +1321,24 @@ unsigned long mmap_region(struct file *f
68179 unsigned long charged = 0;
68180 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
68181
68182 +#ifdef CONFIG_PAX_SEGMEXEC
68183 + struct vm_area_struct *vma_m = NULL;
68184 +#endif
68185 +
68186 + /*
68187 + * mm->mmap_sem is required to protect against another thread
68188 + * changing the mappings in case we sleep.
68189 + */
68190 + verify_mm_writelocked(mm);
68191 +
68192 /* Clear old maps */
68193 error = -ENOMEM;
68194 -munmap_back:
68195 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68196 if (vma && vma->vm_start < addr + len) {
68197 if (do_munmap(mm, addr, len))
68198 return -ENOMEM;
68199 - goto munmap_back;
68200 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68201 + BUG_ON(vma && vma->vm_start < addr + len);
68202 }
68203
68204 /* Check against address space limit. */
68205 @@ -1266,6 +1387,16 @@ munmap_back:
68206 goto unacct_error;
68207 }
68208
68209 +#ifdef CONFIG_PAX_SEGMEXEC
68210 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
68211 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
68212 + if (!vma_m) {
68213 + error = -ENOMEM;
68214 + goto free_vma;
68215 + }
68216 + }
68217 +#endif
68218 +
68219 vma->vm_mm = mm;
68220 vma->vm_start = addr;
68221 vma->vm_end = addr + len;
68222 @@ -1289,6 +1420,19 @@ munmap_back:
68223 error = file->f_op->mmap(file, vma);
68224 if (error)
68225 goto unmap_and_free_vma;
68226 +
68227 +#ifdef CONFIG_PAX_SEGMEXEC
68228 + if (vma_m && (vm_flags & VM_EXECUTABLE))
68229 + added_exe_file_vma(mm);
68230 +#endif
68231 +
68232 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68233 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
68234 + vma->vm_flags |= VM_PAGEEXEC;
68235 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68236 + }
68237 +#endif
68238 +
68239 if (vm_flags & VM_EXECUTABLE)
68240 added_exe_file_vma(mm);
68241
68242 @@ -1324,6 +1468,11 @@ munmap_back:
68243 vma_link(mm, vma, prev, rb_link, rb_parent);
68244 file = vma->vm_file;
68245
68246 +#ifdef CONFIG_PAX_SEGMEXEC
68247 + if (vma_m)
68248 + BUG_ON(pax_mirror_vma(vma_m, vma));
68249 +#endif
68250 +
68251 /* Once vma denies write, undo our temporary denial count */
68252 if (correct_wcount)
68253 atomic_inc(&inode->i_writecount);
68254 @@ -1332,6 +1481,7 @@ out:
68255
68256 mm->total_vm += len >> PAGE_SHIFT;
68257 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
68258 + track_exec_limit(mm, addr, addr + len, vm_flags);
68259 if (vm_flags & VM_LOCKED) {
68260 if (!mlock_vma_pages_range(vma, addr, addr + len))
68261 mm->locked_vm += (len >> PAGE_SHIFT);
68262 @@ -1349,6 +1499,12 @@ unmap_and_free_vma:
68263 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
68264 charged = 0;
68265 free_vma:
68266 +
68267 +#ifdef CONFIG_PAX_SEGMEXEC
68268 + if (vma_m)
68269 + kmem_cache_free(vm_area_cachep, vma_m);
68270 +#endif
68271 +
68272 kmem_cache_free(vm_area_cachep, vma);
68273 unacct_error:
68274 if (charged)
68275 @@ -1356,6 +1512,44 @@ unacct_error:
68276 return error;
68277 }
68278
68279 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
68280 +{
68281 + if (!vma) {
68282 +#ifdef CONFIG_STACK_GROWSUP
68283 + if (addr > sysctl_heap_stack_gap)
68284 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
68285 + else
68286 + vma = find_vma(current->mm, 0);
68287 + if (vma && (vma->vm_flags & VM_GROWSUP))
68288 + return false;
68289 +#endif
68290 + return true;
68291 + }
68292 +
68293 + if (addr + len > vma->vm_start)
68294 + return false;
68295 +
68296 + if (vma->vm_flags & VM_GROWSDOWN)
68297 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
68298 +#ifdef CONFIG_STACK_GROWSUP
68299 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
68300 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
68301 +#endif
68302 +
68303 + return true;
68304 +}
68305 +
68306 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
68307 +{
68308 + if (vma->vm_start < len)
68309 + return -ENOMEM;
68310 + if (!(vma->vm_flags & VM_GROWSDOWN))
68311 + return vma->vm_start - len;
68312 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
68313 + return vma->vm_start - len - sysctl_heap_stack_gap;
68314 + return -ENOMEM;
68315 +}
68316 +
68317 /* Get an address range which is currently unmapped.
68318 * For shmat() with addr=0.
68319 *
68320 @@ -1382,18 +1576,23 @@ arch_get_unmapped_area(struct file *filp
68321 if (flags & MAP_FIXED)
68322 return addr;
68323
68324 +#ifdef CONFIG_PAX_RANDMMAP
68325 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68326 +#endif
68327 +
68328 if (addr) {
68329 addr = PAGE_ALIGN(addr);
68330 - vma = find_vma(mm, addr);
68331 - if (TASK_SIZE - len >= addr &&
68332 - (!vma || addr + len <= vma->vm_start))
68333 - return addr;
68334 + if (TASK_SIZE - len >= addr) {
68335 + vma = find_vma(mm, addr);
68336 + if (check_heap_stack_gap(vma, addr, len))
68337 + return addr;
68338 + }
68339 }
68340 if (len > mm->cached_hole_size) {
68341 - start_addr = addr = mm->free_area_cache;
68342 + start_addr = addr = mm->free_area_cache;
68343 } else {
68344 - start_addr = addr = TASK_UNMAPPED_BASE;
68345 - mm->cached_hole_size = 0;
68346 + start_addr = addr = mm->mmap_base;
68347 + mm->cached_hole_size = 0;
68348 }
68349
68350 full_search:
68351 @@ -1404,34 +1603,40 @@ full_search:
68352 * Start a new search - just in case we missed
68353 * some holes.
68354 */
68355 - if (start_addr != TASK_UNMAPPED_BASE) {
68356 - addr = TASK_UNMAPPED_BASE;
68357 - start_addr = addr;
68358 + if (start_addr != mm->mmap_base) {
68359 + start_addr = addr = mm->mmap_base;
68360 mm->cached_hole_size = 0;
68361 goto full_search;
68362 }
68363 return -ENOMEM;
68364 }
68365 - if (!vma || addr + len <= vma->vm_start) {
68366 - /*
68367 - * Remember the place where we stopped the search:
68368 - */
68369 - mm->free_area_cache = addr + len;
68370 - return addr;
68371 - }
68372 + if (check_heap_stack_gap(vma, addr, len))
68373 + break;
68374 if (addr + mm->cached_hole_size < vma->vm_start)
68375 mm->cached_hole_size = vma->vm_start - addr;
68376 addr = vma->vm_end;
68377 }
68378 +
68379 + /*
68380 + * Remember the place where we stopped the search:
68381 + */
68382 + mm->free_area_cache = addr + len;
68383 + return addr;
68384 }
68385 #endif
68386
68387 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
68388 {
68389 +
68390 +#ifdef CONFIG_PAX_SEGMEXEC
68391 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68392 + return;
68393 +#endif
68394 +
68395 /*
68396 * Is this a new hole at the lowest possible address?
68397 */
68398 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
68399 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
68400 mm->free_area_cache = addr;
68401 mm->cached_hole_size = ~0UL;
68402 }
68403 @@ -1449,7 +1654,7 @@ arch_get_unmapped_area_topdown(struct fi
68404 {
68405 struct vm_area_struct *vma;
68406 struct mm_struct *mm = current->mm;
68407 - unsigned long addr = addr0;
68408 + unsigned long base = mm->mmap_base, addr = addr0;
68409
68410 /* requested length too big for entire address space */
68411 if (len > TASK_SIZE)
68412 @@ -1458,13 +1663,18 @@ arch_get_unmapped_area_topdown(struct fi
68413 if (flags & MAP_FIXED)
68414 return addr;
68415
68416 +#ifdef CONFIG_PAX_RANDMMAP
68417 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68418 +#endif
68419 +
68420 /* requesting a specific address */
68421 if (addr) {
68422 addr = PAGE_ALIGN(addr);
68423 - vma = find_vma(mm, addr);
68424 - if (TASK_SIZE - len >= addr &&
68425 - (!vma || addr + len <= vma->vm_start))
68426 - return addr;
68427 + if (TASK_SIZE - len >= addr) {
68428 + vma = find_vma(mm, addr);
68429 + if (check_heap_stack_gap(vma, addr, len))
68430 + return addr;
68431 + }
68432 }
68433
68434 /* check if free_area_cache is useful for us */
68435 @@ -1479,7 +1689,7 @@ arch_get_unmapped_area_topdown(struct fi
68436 /* make sure it can fit in the remaining address space */
68437 if (addr > len) {
68438 vma = find_vma(mm, addr-len);
68439 - if (!vma || addr <= vma->vm_start)
68440 + if (check_heap_stack_gap(vma, addr - len, len))
68441 /* remember the address as a hint for next time */
68442 return (mm->free_area_cache = addr-len);
68443 }
68444 @@ -1496,7 +1706,7 @@ arch_get_unmapped_area_topdown(struct fi
68445 * return with success:
68446 */
68447 vma = find_vma(mm, addr);
68448 - if (!vma || addr+len <= vma->vm_start)
68449 + if (check_heap_stack_gap(vma, addr, len))
68450 /* remember the address as a hint for next time */
68451 return (mm->free_area_cache = addr);
68452
68453 @@ -1505,8 +1715,8 @@ arch_get_unmapped_area_topdown(struct fi
68454 mm->cached_hole_size = vma->vm_start - addr;
68455
68456 /* try just below the current vma->vm_start */
68457 - addr = vma->vm_start-len;
68458 - } while (len < vma->vm_start);
68459 + addr = skip_heap_stack_gap(vma, len);
68460 + } while (!IS_ERR_VALUE(addr));
68461
68462 bottomup:
68463 /*
68464 @@ -1515,13 +1725,21 @@ bottomup:
68465 * can happen with large stack limits and large mmap()
68466 * allocations.
68467 */
68468 + mm->mmap_base = TASK_UNMAPPED_BASE;
68469 +
68470 +#ifdef CONFIG_PAX_RANDMMAP
68471 + if (mm->pax_flags & MF_PAX_RANDMMAP)
68472 + mm->mmap_base += mm->delta_mmap;
68473 +#endif
68474 +
68475 + mm->free_area_cache = mm->mmap_base;
68476 mm->cached_hole_size = ~0UL;
68477 - mm->free_area_cache = TASK_UNMAPPED_BASE;
68478 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
68479 /*
68480 * Restore the topdown base:
68481 */
68482 - mm->free_area_cache = mm->mmap_base;
68483 + mm->mmap_base = base;
68484 + mm->free_area_cache = base;
68485 mm->cached_hole_size = ~0UL;
68486
68487 return addr;
68488 @@ -1530,6 +1748,12 @@ bottomup:
68489
68490 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
68491 {
68492 +
68493 +#ifdef CONFIG_PAX_SEGMEXEC
68494 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68495 + return;
68496 +#endif
68497 +
68498 /*
68499 * Is this a new hole at the highest possible address?
68500 */
68501 @@ -1537,8 +1761,10 @@ void arch_unmap_area_topdown(struct mm_s
68502 mm->free_area_cache = addr;
68503
68504 /* dont allow allocations above current base */
68505 - if (mm->free_area_cache > mm->mmap_base)
68506 + if (mm->free_area_cache > mm->mmap_base) {
68507 mm->free_area_cache = mm->mmap_base;
68508 + mm->cached_hole_size = ~0UL;
68509 + }
68510 }
68511
68512 unsigned long
68513 @@ -1646,6 +1872,28 @@ out:
68514 return prev ? prev->vm_next : vma;
68515 }
68516
68517 +#ifdef CONFIG_PAX_SEGMEXEC
68518 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
68519 +{
68520 + struct vm_area_struct *vma_m;
68521 +
68522 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
68523 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
68524 + BUG_ON(vma->vm_mirror);
68525 + return NULL;
68526 + }
68527 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
68528 + vma_m = vma->vm_mirror;
68529 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
68530 + BUG_ON(vma->vm_file != vma_m->vm_file);
68531 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
68532 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
68533 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
68534 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
68535 + return vma_m;
68536 +}
68537 +#endif
68538 +
68539 /*
68540 * Verify that the stack growth is acceptable and
68541 * update accounting. This is shared with both the
68542 @@ -1662,6 +1910,7 @@ static int acct_stack_growth(struct vm_a
68543 return -ENOMEM;
68544
68545 /* Stack limit test */
68546 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
68547 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
68548 return -ENOMEM;
68549
68550 @@ -1672,6 +1921,7 @@ static int acct_stack_growth(struct vm_a
68551 locked = mm->locked_vm + grow;
68552 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
68553 limit >>= PAGE_SHIFT;
68554 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68555 if (locked > limit && !capable(CAP_IPC_LOCK))
68556 return -ENOMEM;
68557 }
68558 @@ -1702,37 +1952,48 @@ static int acct_stack_growth(struct vm_a
68559 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
68560 * vma is the last one with address > vma->vm_end. Have to extend vma.
68561 */
68562 +#ifndef CONFIG_IA64
68563 +static
68564 +#endif
68565 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
68566 {
68567 int error;
68568 + bool locknext;
68569
68570 if (!(vma->vm_flags & VM_GROWSUP))
68571 return -EFAULT;
68572
68573 + /* Also guard against wrapping around to address 0. */
68574 + if (address < PAGE_ALIGN(address+1))
68575 + address = PAGE_ALIGN(address+1);
68576 + else
68577 + return -ENOMEM;
68578 +
68579 /*
68580 * We must make sure the anon_vma is allocated
68581 * so that the anon_vma locking is not a noop.
68582 */
68583 if (unlikely(anon_vma_prepare(vma)))
68584 return -ENOMEM;
68585 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
68586 + if (locknext && anon_vma_prepare(vma->vm_next))
68587 + return -ENOMEM;
68588 vma_lock_anon_vma(vma);
68589 + if (locknext)
68590 + vma_lock_anon_vma(vma->vm_next);
68591
68592 /*
68593 * vma->vm_start/vm_end cannot change under us because the caller
68594 * is required to hold the mmap_sem in read mode. We need the
68595 - * anon_vma lock to serialize against concurrent expand_stacks.
68596 - * Also guard against wrapping around to address 0.
68597 + * anon_vma locks to serialize against concurrent expand_stacks
68598 + * and expand_upwards.
68599 */
68600 - if (address < PAGE_ALIGN(address+4))
68601 - address = PAGE_ALIGN(address+4);
68602 - else {
68603 - vma_unlock_anon_vma(vma);
68604 - return -ENOMEM;
68605 - }
68606 error = 0;
68607
68608 /* Somebody else might have raced and expanded it already */
68609 - if (address > vma->vm_end) {
68610 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
68611 + error = -ENOMEM;
68612 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
68613 unsigned long size, grow;
68614
68615 size = address - vma->vm_start;
68616 @@ -1747,6 +2008,8 @@ int expand_upwards(struct vm_area_struct
68617 }
68618 }
68619 }
68620 + if (locknext)
68621 + vma_unlock_anon_vma(vma->vm_next);
68622 vma_unlock_anon_vma(vma);
68623 khugepaged_enter_vma_merge(vma);
68624 return error;
68625 @@ -1760,6 +2023,8 @@ int expand_downwards(struct vm_area_stru
68626 unsigned long address)
68627 {
68628 int error;
68629 + bool lockprev = false;
68630 + struct vm_area_struct *prev;
68631
68632 /*
68633 * We must make sure the anon_vma is allocated
68634 @@ -1773,6 +2038,15 @@ int expand_downwards(struct vm_area_stru
68635 if (error)
68636 return error;
68637
68638 + prev = vma->vm_prev;
68639 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
68640 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
68641 +#endif
68642 + if (lockprev && anon_vma_prepare(prev))
68643 + return -ENOMEM;
68644 + if (lockprev)
68645 + vma_lock_anon_vma(prev);
68646 +
68647 vma_lock_anon_vma(vma);
68648
68649 /*
68650 @@ -1782,9 +2056,17 @@ int expand_downwards(struct vm_area_stru
68651 */
68652
68653 /* Somebody else might have raced and expanded it already */
68654 - if (address < vma->vm_start) {
68655 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
68656 + error = -ENOMEM;
68657 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
68658 unsigned long size, grow;
68659
68660 +#ifdef CONFIG_PAX_SEGMEXEC
68661 + struct vm_area_struct *vma_m;
68662 +
68663 + vma_m = pax_find_mirror_vma(vma);
68664 +#endif
68665 +
68666 size = vma->vm_end - address;
68667 grow = (vma->vm_start - address) >> PAGE_SHIFT;
68668
68669 @@ -1794,11 +2076,22 @@ int expand_downwards(struct vm_area_stru
68670 if (!error) {
68671 vma->vm_start = address;
68672 vma->vm_pgoff -= grow;
68673 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
68674 +
68675 +#ifdef CONFIG_PAX_SEGMEXEC
68676 + if (vma_m) {
68677 + vma_m->vm_start -= grow << PAGE_SHIFT;
68678 + vma_m->vm_pgoff -= grow;
68679 + }
68680 +#endif
68681 +
68682 perf_event_mmap(vma);
68683 }
68684 }
68685 }
68686 vma_unlock_anon_vma(vma);
68687 + if (lockprev)
68688 + vma_unlock_anon_vma(prev);
68689 khugepaged_enter_vma_merge(vma);
68690 return error;
68691 }
68692 @@ -1868,6 +2161,13 @@ static void remove_vma_list(struct mm_st
68693 do {
68694 long nrpages = vma_pages(vma);
68695
68696 +#ifdef CONFIG_PAX_SEGMEXEC
68697 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
68698 + vma = remove_vma(vma);
68699 + continue;
68700 + }
68701 +#endif
68702 +
68703 mm->total_vm -= nrpages;
68704 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
68705 vma = remove_vma(vma);
68706 @@ -1913,6 +2213,16 @@ detach_vmas_to_be_unmapped(struct mm_str
68707 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
68708 vma->vm_prev = NULL;
68709 do {
68710 +
68711 +#ifdef CONFIG_PAX_SEGMEXEC
68712 + if (vma->vm_mirror) {
68713 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
68714 + vma->vm_mirror->vm_mirror = NULL;
68715 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
68716 + vma->vm_mirror = NULL;
68717 + }
68718 +#endif
68719 +
68720 rb_erase(&vma->vm_rb, &mm->mm_rb);
68721 mm->map_count--;
68722 tail_vma = vma;
68723 @@ -1941,14 +2251,33 @@ static int __split_vma(struct mm_struct
68724 struct vm_area_struct *new;
68725 int err = -ENOMEM;
68726
68727 +#ifdef CONFIG_PAX_SEGMEXEC
68728 + struct vm_area_struct *vma_m, *new_m = NULL;
68729 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
68730 +#endif
68731 +
68732 if (is_vm_hugetlb_page(vma) && (addr &
68733 ~(huge_page_mask(hstate_vma(vma)))))
68734 return -EINVAL;
68735
68736 +#ifdef CONFIG_PAX_SEGMEXEC
68737 + vma_m = pax_find_mirror_vma(vma);
68738 +#endif
68739 +
68740 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
68741 if (!new)
68742 goto out_err;
68743
68744 +#ifdef CONFIG_PAX_SEGMEXEC
68745 + if (vma_m) {
68746 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
68747 + if (!new_m) {
68748 + kmem_cache_free(vm_area_cachep, new);
68749 + goto out_err;
68750 + }
68751 + }
68752 +#endif
68753 +
68754 /* most fields are the same, copy all, and then fixup */
68755 *new = *vma;
68756
68757 @@ -1961,6 +2290,22 @@ static int __split_vma(struct mm_struct
68758 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
68759 }
68760
68761 +#ifdef CONFIG_PAX_SEGMEXEC
68762 + if (vma_m) {
68763 + *new_m = *vma_m;
68764 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
68765 + new_m->vm_mirror = new;
68766 + new->vm_mirror = new_m;
68767 +
68768 + if (new_below)
68769 + new_m->vm_end = addr_m;
68770 + else {
68771 + new_m->vm_start = addr_m;
68772 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
68773 + }
68774 + }
68775 +#endif
68776 +
68777 pol = mpol_dup(vma_policy(vma));
68778 if (IS_ERR(pol)) {
68779 err = PTR_ERR(pol);
68780 @@ -1986,6 +2331,42 @@ static int __split_vma(struct mm_struct
68781 else
68782 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
68783
68784 +#ifdef CONFIG_PAX_SEGMEXEC
68785 + if (!err && vma_m) {
68786 + if (anon_vma_clone(new_m, vma_m))
68787 + goto out_free_mpol;
68788 +
68789 + mpol_get(pol);
68790 + vma_set_policy(new_m, pol);
68791 +
68792 + if (new_m->vm_file) {
68793 + get_file(new_m->vm_file);
68794 + if (vma_m->vm_flags & VM_EXECUTABLE)
68795 + added_exe_file_vma(mm);
68796 + }
68797 +
68798 + if (new_m->vm_ops && new_m->vm_ops->open)
68799 + new_m->vm_ops->open(new_m);
68800 +
68801 + if (new_below)
68802 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
68803 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
68804 + else
68805 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
68806 +
68807 + if (err) {
68808 + if (new_m->vm_ops && new_m->vm_ops->close)
68809 + new_m->vm_ops->close(new_m);
68810 + if (new_m->vm_file) {
68811 + if (vma_m->vm_flags & VM_EXECUTABLE)
68812 + removed_exe_file_vma(mm);
68813 + fput(new_m->vm_file);
68814 + }
68815 + mpol_put(pol);
68816 + }
68817 + }
68818 +#endif
68819 +
68820 /* Success. */
68821 if (!err)
68822 return 0;
68823 @@ -1998,10 +2379,18 @@ static int __split_vma(struct mm_struct
68824 removed_exe_file_vma(mm);
68825 fput(new->vm_file);
68826 }
68827 - unlink_anon_vmas(new);
68828 out_free_mpol:
68829 mpol_put(pol);
68830 out_free_vma:
68831 +
68832 +#ifdef CONFIG_PAX_SEGMEXEC
68833 + if (new_m) {
68834 + unlink_anon_vmas(new_m);
68835 + kmem_cache_free(vm_area_cachep, new_m);
68836 + }
68837 +#endif
68838 +
68839 + unlink_anon_vmas(new);
68840 kmem_cache_free(vm_area_cachep, new);
68841 out_err:
68842 return err;
68843 @@ -2014,6 +2403,15 @@ static int __split_vma(struct mm_struct
68844 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
68845 unsigned long addr, int new_below)
68846 {
68847 +
68848 +#ifdef CONFIG_PAX_SEGMEXEC
68849 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68850 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
68851 + if (mm->map_count >= sysctl_max_map_count-1)
68852 + return -ENOMEM;
68853 + } else
68854 +#endif
68855 +
68856 if (mm->map_count >= sysctl_max_map_count)
68857 return -ENOMEM;
68858
68859 @@ -2025,11 +2423,30 @@ int split_vma(struct mm_struct *mm, stru
68860 * work. This now handles partial unmappings.
68861 * Jeremy Fitzhardinge <jeremy@goop.org>
68862 */
68863 +#ifdef CONFIG_PAX_SEGMEXEC
68864 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
68865 {
68866 + int ret = __do_munmap(mm, start, len);
68867 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
68868 + return ret;
68869 +
68870 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
68871 +}
68872 +
68873 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
68874 +#else
68875 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
68876 +#endif
68877 +{
68878 unsigned long end;
68879 struct vm_area_struct *vma, *prev, *last;
68880
68881 + /*
68882 + * mm->mmap_sem is required to protect against another thread
68883 + * changing the mappings in case we sleep.
68884 + */
68885 + verify_mm_writelocked(mm);
68886 +
68887 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
68888 return -EINVAL;
68889
68890 @@ -2104,6 +2521,8 @@ int do_munmap(struct mm_struct *mm, unsi
68891 /* Fix up all other VM information */
68892 remove_vma_list(mm, vma);
68893
68894 + track_exec_limit(mm, start, end, 0UL);
68895 +
68896 return 0;
68897 }
68898
68899 @@ -2116,22 +2535,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
68900
68901 profile_munmap(addr);
68902
68903 +#ifdef CONFIG_PAX_SEGMEXEC
68904 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
68905 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
68906 + return -EINVAL;
68907 +#endif
68908 +
68909 down_write(&mm->mmap_sem);
68910 ret = do_munmap(mm, addr, len);
68911 up_write(&mm->mmap_sem);
68912 return ret;
68913 }
68914
68915 -static inline void verify_mm_writelocked(struct mm_struct *mm)
68916 -{
68917 -#ifdef CONFIG_DEBUG_VM
68918 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68919 - WARN_ON(1);
68920 - up_read(&mm->mmap_sem);
68921 - }
68922 -#endif
68923 -}
68924 -
68925 /*
68926 * this is really a simplified "do_mmap". it only handles
68927 * anonymous maps. eventually we may be able to do some
68928 @@ -2145,6 +2560,7 @@ unsigned long do_brk(unsigned long addr,
68929 struct rb_node ** rb_link, * rb_parent;
68930 pgoff_t pgoff = addr >> PAGE_SHIFT;
68931 int error;
68932 + unsigned long charged;
68933
68934 len = PAGE_ALIGN(len);
68935 if (!len)
68936 @@ -2156,16 +2572,30 @@ unsigned long do_brk(unsigned long addr,
68937
68938 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
68939
68940 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
68941 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
68942 + flags &= ~VM_EXEC;
68943 +
68944 +#ifdef CONFIG_PAX_MPROTECT
68945 + if (mm->pax_flags & MF_PAX_MPROTECT)
68946 + flags &= ~VM_MAYEXEC;
68947 +#endif
68948 +
68949 + }
68950 +#endif
68951 +
68952 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
68953 if (error & ~PAGE_MASK)
68954 return error;
68955
68956 + charged = len >> PAGE_SHIFT;
68957 +
68958 /*
68959 * mlock MCL_FUTURE?
68960 */
68961 if (mm->def_flags & VM_LOCKED) {
68962 unsigned long locked, lock_limit;
68963 - locked = len >> PAGE_SHIFT;
68964 + locked = charged;
68965 locked += mm->locked_vm;
68966 lock_limit = rlimit(RLIMIT_MEMLOCK);
68967 lock_limit >>= PAGE_SHIFT;
68968 @@ -2182,22 +2612,22 @@ unsigned long do_brk(unsigned long addr,
68969 /*
68970 * Clear old maps. this also does some error checking for us
68971 */
68972 - munmap_back:
68973 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68974 if (vma && vma->vm_start < addr + len) {
68975 if (do_munmap(mm, addr, len))
68976 return -ENOMEM;
68977 - goto munmap_back;
68978 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68979 + BUG_ON(vma && vma->vm_start < addr + len);
68980 }
68981
68982 /* Check against address space limits *after* clearing old maps... */
68983 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
68984 + if (!may_expand_vm(mm, charged))
68985 return -ENOMEM;
68986
68987 if (mm->map_count > sysctl_max_map_count)
68988 return -ENOMEM;
68989
68990 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
68991 + if (security_vm_enough_memory(charged))
68992 return -ENOMEM;
68993
68994 /* Can we just expand an old private anonymous mapping? */
68995 @@ -2211,7 +2641,7 @@ unsigned long do_brk(unsigned long addr,
68996 */
68997 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
68998 if (!vma) {
68999 - vm_unacct_memory(len >> PAGE_SHIFT);
69000 + vm_unacct_memory(charged);
69001 return -ENOMEM;
69002 }
69003
69004 @@ -2225,11 +2655,12 @@ unsigned long do_brk(unsigned long addr,
69005 vma_link(mm, vma, prev, rb_link, rb_parent);
69006 out:
69007 perf_event_mmap(vma);
69008 - mm->total_vm += len >> PAGE_SHIFT;
69009 + mm->total_vm += charged;
69010 if (flags & VM_LOCKED) {
69011 if (!mlock_vma_pages_range(vma, addr, addr + len))
69012 - mm->locked_vm += (len >> PAGE_SHIFT);
69013 + mm->locked_vm += charged;
69014 }
69015 + track_exec_limit(mm, addr, addr + len, flags);
69016 return addr;
69017 }
69018
69019 @@ -2276,8 +2707,10 @@ void exit_mmap(struct mm_struct *mm)
69020 * Walk the list again, actually closing and freeing it,
69021 * with preemption enabled, without holding any MM locks.
69022 */
69023 - while (vma)
69024 + while (vma) {
69025 + vma->vm_mirror = NULL;
69026 vma = remove_vma(vma);
69027 + }
69028
69029 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
69030 }
69031 @@ -2291,6 +2724,13 @@ int insert_vm_struct(struct mm_struct *
69032 struct vm_area_struct * __vma, * prev;
69033 struct rb_node ** rb_link, * rb_parent;
69034
69035 +#ifdef CONFIG_PAX_SEGMEXEC
69036 + struct vm_area_struct *vma_m = NULL;
69037 +#endif
69038 +
69039 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
69040 + return -EPERM;
69041 +
69042 /*
69043 * The vm_pgoff of a purely anonymous vma should be irrelevant
69044 * until its first write fault, when page's anon_vma and index
69045 @@ -2313,7 +2753,22 @@ int insert_vm_struct(struct mm_struct *
69046 if ((vma->vm_flags & VM_ACCOUNT) &&
69047 security_vm_enough_memory_mm(mm, vma_pages(vma)))
69048 return -ENOMEM;
69049 +
69050 +#ifdef CONFIG_PAX_SEGMEXEC
69051 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
69052 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69053 + if (!vma_m)
69054 + return -ENOMEM;
69055 + }
69056 +#endif
69057 +
69058 vma_link(mm, vma, prev, rb_link, rb_parent);
69059 +
69060 +#ifdef CONFIG_PAX_SEGMEXEC
69061 + if (vma_m)
69062 + BUG_ON(pax_mirror_vma(vma_m, vma));
69063 +#endif
69064 +
69065 return 0;
69066 }
69067
69068 @@ -2331,6 +2786,8 @@ struct vm_area_struct *copy_vma(struct v
69069 struct rb_node **rb_link, *rb_parent;
69070 struct mempolicy *pol;
69071
69072 + BUG_ON(vma->vm_mirror);
69073 +
69074 /*
69075 * If anonymous vma has not yet been faulted, update new pgoff
69076 * to match new location, to increase its chance of merging.
69077 @@ -2381,6 +2838,39 @@ struct vm_area_struct *copy_vma(struct v
69078 return NULL;
69079 }
69080
69081 +#ifdef CONFIG_PAX_SEGMEXEC
69082 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
69083 +{
69084 + struct vm_area_struct *prev_m;
69085 + struct rb_node **rb_link_m, *rb_parent_m;
69086 + struct mempolicy *pol_m;
69087 +
69088 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
69089 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
69090 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
69091 + *vma_m = *vma;
69092 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
69093 + if (anon_vma_clone(vma_m, vma))
69094 + return -ENOMEM;
69095 + pol_m = vma_policy(vma_m);
69096 + mpol_get(pol_m);
69097 + vma_set_policy(vma_m, pol_m);
69098 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
69099 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
69100 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
69101 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
69102 + if (vma_m->vm_file)
69103 + get_file(vma_m->vm_file);
69104 + if (vma_m->vm_ops && vma_m->vm_ops->open)
69105 + vma_m->vm_ops->open(vma_m);
69106 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
69107 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
69108 + vma_m->vm_mirror = vma;
69109 + vma->vm_mirror = vma_m;
69110 + return 0;
69111 +}
69112 +#endif
69113 +
69114 /*
69115 * Return true if the calling process may expand its vm space by the passed
69116 * number of pages
69117 @@ -2391,7 +2881,7 @@ int may_expand_vm(struct mm_struct *mm,
69118 unsigned long lim;
69119
69120 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
69121 -
69122 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
69123 if (cur + npages > lim)
69124 return 0;
69125 return 1;
69126 @@ -2462,6 +2952,22 @@ int install_special_mapping(struct mm_st
69127 vma->vm_start = addr;
69128 vma->vm_end = addr + len;
69129
69130 +#ifdef CONFIG_PAX_MPROTECT
69131 + if (mm->pax_flags & MF_PAX_MPROTECT) {
69132 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
69133 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
69134 + return -EPERM;
69135 + if (!(vm_flags & VM_EXEC))
69136 + vm_flags &= ~VM_MAYEXEC;
69137 +#else
69138 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69139 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69140 +#endif
69141 + else
69142 + vm_flags &= ~VM_MAYWRITE;
69143 + }
69144 +#endif
69145 +
69146 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
69147 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69148
69149 diff -urNp linux-3.0.7/mm/mprotect.c linux-3.0.7/mm/mprotect.c
69150 --- linux-3.0.7/mm/mprotect.c 2011-07-21 22:17:23.000000000 -0400
69151 +++ linux-3.0.7/mm/mprotect.c 2011-08-23 21:48:14.000000000 -0400
69152 @@ -23,10 +23,16 @@
69153 #include <linux/mmu_notifier.h>
69154 #include <linux/migrate.h>
69155 #include <linux/perf_event.h>
69156 +
69157 +#ifdef CONFIG_PAX_MPROTECT
69158 +#include <linux/elf.h>
69159 +#endif
69160 +
69161 #include <asm/uaccess.h>
69162 #include <asm/pgtable.h>
69163 #include <asm/cacheflush.h>
69164 #include <asm/tlbflush.h>
69165 +#include <asm/mmu_context.h>
69166
69167 #ifndef pgprot_modify
69168 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
69169 @@ -141,6 +147,48 @@ static void change_protection(struct vm_
69170 flush_tlb_range(vma, start, end);
69171 }
69172
69173 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69174 +/* called while holding the mmap semaphor for writing except stack expansion */
69175 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
69176 +{
69177 + unsigned long oldlimit, newlimit = 0UL;
69178 +
69179 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
69180 + return;
69181 +
69182 + spin_lock(&mm->page_table_lock);
69183 + oldlimit = mm->context.user_cs_limit;
69184 + if ((prot & VM_EXEC) && oldlimit < end)
69185 + /* USER_CS limit moved up */
69186 + newlimit = end;
69187 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
69188 + /* USER_CS limit moved down */
69189 + newlimit = start;
69190 +
69191 + if (newlimit) {
69192 + mm->context.user_cs_limit = newlimit;
69193 +
69194 +#ifdef CONFIG_SMP
69195 + wmb();
69196 + cpus_clear(mm->context.cpu_user_cs_mask);
69197 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
69198 +#endif
69199 +
69200 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
69201 + }
69202 + spin_unlock(&mm->page_table_lock);
69203 + if (newlimit == end) {
69204 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
69205 +
69206 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
69207 + if (is_vm_hugetlb_page(vma))
69208 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
69209 + else
69210 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
69211 + }
69212 +}
69213 +#endif
69214 +
69215 int
69216 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69217 unsigned long start, unsigned long end, unsigned long newflags)
69218 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
69219 int error;
69220 int dirty_accountable = 0;
69221
69222 +#ifdef CONFIG_PAX_SEGMEXEC
69223 + struct vm_area_struct *vma_m = NULL;
69224 + unsigned long start_m, end_m;
69225 +
69226 + start_m = start + SEGMEXEC_TASK_SIZE;
69227 + end_m = end + SEGMEXEC_TASK_SIZE;
69228 +#endif
69229 +
69230 if (newflags == oldflags) {
69231 *pprev = vma;
69232 return 0;
69233 }
69234
69235 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
69236 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
69237 +
69238 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
69239 + return -ENOMEM;
69240 +
69241 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
69242 + return -ENOMEM;
69243 + }
69244 +
69245 /*
69246 * If we make a private mapping writable we increase our commit;
69247 * but (without finer accounting) cannot reduce our commit if we
69248 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
69249 }
69250 }
69251
69252 +#ifdef CONFIG_PAX_SEGMEXEC
69253 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
69254 + if (start != vma->vm_start) {
69255 + error = split_vma(mm, vma, start, 1);
69256 + if (error)
69257 + goto fail;
69258 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
69259 + *pprev = (*pprev)->vm_next;
69260 + }
69261 +
69262 + if (end != vma->vm_end) {
69263 + error = split_vma(mm, vma, end, 0);
69264 + if (error)
69265 + goto fail;
69266 + }
69267 +
69268 + if (pax_find_mirror_vma(vma)) {
69269 + error = __do_munmap(mm, start_m, end_m - start_m);
69270 + if (error)
69271 + goto fail;
69272 + } else {
69273 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69274 + if (!vma_m) {
69275 + error = -ENOMEM;
69276 + goto fail;
69277 + }
69278 + vma->vm_flags = newflags;
69279 + error = pax_mirror_vma(vma_m, vma);
69280 + if (error) {
69281 + vma->vm_flags = oldflags;
69282 + goto fail;
69283 + }
69284 + }
69285 + }
69286 +#endif
69287 +
69288 /*
69289 * First try to merge with previous and/or next vma.
69290 */
69291 @@ -204,9 +306,21 @@ success:
69292 * vm_flags and vm_page_prot are protected by the mmap_sem
69293 * held in write mode.
69294 */
69295 +
69296 +#ifdef CONFIG_PAX_SEGMEXEC
69297 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
69298 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
69299 +#endif
69300 +
69301 vma->vm_flags = newflags;
69302 +
69303 +#ifdef CONFIG_PAX_MPROTECT
69304 + if (mm->binfmt && mm->binfmt->handle_mprotect)
69305 + mm->binfmt->handle_mprotect(vma, newflags);
69306 +#endif
69307 +
69308 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
69309 - vm_get_page_prot(newflags));
69310 + vm_get_page_prot(vma->vm_flags));
69311
69312 if (vma_wants_writenotify(vma)) {
69313 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
69314 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69315 end = start + len;
69316 if (end <= start)
69317 return -ENOMEM;
69318 +
69319 +#ifdef CONFIG_PAX_SEGMEXEC
69320 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69321 + if (end > SEGMEXEC_TASK_SIZE)
69322 + return -EINVAL;
69323 + } else
69324 +#endif
69325 +
69326 + if (end > TASK_SIZE)
69327 + return -EINVAL;
69328 +
69329 if (!arch_validate_prot(prot))
69330 return -EINVAL;
69331
69332 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69333 /*
69334 * Does the application expect PROT_READ to imply PROT_EXEC:
69335 */
69336 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
69337 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
69338 prot |= PROT_EXEC;
69339
69340 vm_flags = calc_vm_prot_bits(prot);
69341 @@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69342 if (start > vma->vm_start)
69343 prev = vma;
69344
69345 +#ifdef CONFIG_PAX_MPROTECT
69346 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
69347 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
69348 +#endif
69349 +
69350 for (nstart = start ; ; ) {
69351 unsigned long newflags;
69352
69353 @@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69354
69355 /* newflags >> 4 shift VM_MAY% in place of VM_% */
69356 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
69357 + if (prot & (PROT_WRITE | PROT_EXEC))
69358 + gr_log_rwxmprotect(vma->vm_file);
69359 +
69360 + error = -EACCES;
69361 + goto out;
69362 + }
69363 +
69364 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
69365 error = -EACCES;
69366 goto out;
69367 }
69368 @@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69369 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
69370 if (error)
69371 goto out;
69372 +
69373 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
69374 +
69375 nstart = tmp;
69376
69377 if (nstart < prev->vm_end)
69378 diff -urNp linux-3.0.7/mm/mremap.c linux-3.0.7/mm/mremap.c
69379 --- linux-3.0.7/mm/mremap.c 2011-07-21 22:17:23.000000000 -0400
69380 +++ linux-3.0.7/mm/mremap.c 2011-08-23 21:47:56.000000000 -0400
69381 @@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_str
69382 continue;
69383 pte = ptep_clear_flush(vma, old_addr, old_pte);
69384 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
69385 +
69386 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69387 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
69388 + pte = pte_exprotect(pte);
69389 +#endif
69390 +
69391 set_pte_at(mm, new_addr, new_pte, pte);
69392 }
69393
69394 @@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_res
69395 if (is_vm_hugetlb_page(vma))
69396 goto Einval;
69397
69398 +#ifdef CONFIG_PAX_SEGMEXEC
69399 + if (pax_find_mirror_vma(vma))
69400 + goto Einval;
69401 +#endif
69402 +
69403 /* We can't remap across vm area boundaries */
69404 if (old_len > vma->vm_end - addr)
69405 goto Efault;
69406 @@ -328,20 +339,25 @@ static unsigned long mremap_to(unsigned
69407 unsigned long ret = -EINVAL;
69408 unsigned long charged = 0;
69409 unsigned long map_flags;
69410 + unsigned long pax_task_size = TASK_SIZE;
69411
69412 if (new_addr & ~PAGE_MASK)
69413 goto out;
69414
69415 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
69416 +#ifdef CONFIG_PAX_SEGMEXEC
69417 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
69418 + pax_task_size = SEGMEXEC_TASK_SIZE;
69419 +#endif
69420 +
69421 + pax_task_size -= PAGE_SIZE;
69422 +
69423 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
69424 goto out;
69425
69426 /* Check if the location we're moving into overlaps the
69427 * old location at all, and fail if it does.
69428 */
69429 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
69430 - goto out;
69431 -
69432 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
69433 + if (addr + old_len > new_addr && new_addr + new_len > addr)
69434 goto out;
69435
69436 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69437 @@ -413,6 +429,7 @@ unsigned long do_mremap(unsigned long ad
69438 struct vm_area_struct *vma;
69439 unsigned long ret = -EINVAL;
69440 unsigned long charged = 0;
69441 + unsigned long pax_task_size = TASK_SIZE;
69442
69443 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
69444 goto out;
69445 @@ -431,6 +448,17 @@ unsigned long do_mremap(unsigned long ad
69446 if (!new_len)
69447 goto out;
69448
69449 +#ifdef CONFIG_PAX_SEGMEXEC
69450 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
69451 + pax_task_size = SEGMEXEC_TASK_SIZE;
69452 +#endif
69453 +
69454 + pax_task_size -= PAGE_SIZE;
69455 +
69456 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
69457 + old_len > pax_task_size || addr > pax_task_size-old_len)
69458 + goto out;
69459 +
69460 if (flags & MREMAP_FIXED) {
69461 if (flags & MREMAP_MAYMOVE)
69462 ret = mremap_to(addr, old_len, new_addr, new_len);
69463 @@ -480,6 +508,7 @@ unsigned long do_mremap(unsigned long ad
69464 addr + new_len);
69465 }
69466 ret = addr;
69467 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
69468 goto out;
69469 }
69470 }
69471 @@ -506,7 +535,13 @@ unsigned long do_mremap(unsigned long ad
69472 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69473 if (ret)
69474 goto out;
69475 +
69476 + map_flags = vma->vm_flags;
69477 ret = move_vma(vma, addr, old_len, new_len, new_addr);
69478 + if (!(ret & ~PAGE_MASK)) {
69479 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
69480 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
69481 + }
69482 }
69483 out:
69484 if (ret & ~PAGE_MASK)
69485 diff -urNp linux-3.0.7/mm/nobootmem.c linux-3.0.7/mm/nobootmem.c
69486 --- linux-3.0.7/mm/nobootmem.c 2011-07-21 22:17:23.000000000 -0400
69487 +++ linux-3.0.7/mm/nobootmem.c 2011-08-23 21:47:56.000000000 -0400
69488 @@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
69489 unsigned long __init free_all_memory_core_early(int nodeid)
69490 {
69491 int i;
69492 - u64 start, end;
69493 + u64 start, end, startrange, endrange;
69494 unsigned long count = 0;
69495 - struct range *range = NULL;
69496 + struct range *range = NULL, rangerange = { 0, 0 };
69497 int nr_range;
69498
69499 nr_range = get_free_all_memory_range(&range, nodeid);
69500 + startrange = __pa(range) >> PAGE_SHIFT;
69501 + endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
69502
69503 for (i = 0; i < nr_range; i++) {
69504 start = range[i].start;
69505 end = range[i].end;
69506 + if (start <= endrange && startrange < end) {
69507 + BUG_ON(rangerange.start | rangerange.end);
69508 + rangerange = range[i];
69509 + continue;
69510 + }
69511 count += end - start;
69512 __free_pages_memory(start, end);
69513 }
69514 + start = rangerange.start;
69515 + end = rangerange.end;
69516 + count += end - start;
69517 + __free_pages_memory(start, end);
69518
69519 return count;
69520 }
69521 diff -urNp linux-3.0.7/mm/nommu.c linux-3.0.7/mm/nommu.c
69522 --- linux-3.0.7/mm/nommu.c 2011-07-21 22:17:23.000000000 -0400
69523 +++ linux-3.0.7/mm/nommu.c 2011-08-23 21:47:56.000000000 -0400
69524 @@ -63,7 +63,6 @@ int sysctl_overcommit_memory = OVERCOMMI
69525 int sysctl_overcommit_ratio = 50; /* default is 50% */
69526 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
69527 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
69528 -int heap_stack_gap = 0;
69529
69530 atomic_long_t mmap_pages_allocated;
69531
69532 @@ -826,15 +825,6 @@ struct vm_area_struct *find_vma(struct m
69533 EXPORT_SYMBOL(find_vma);
69534
69535 /*
69536 - * find a VMA
69537 - * - we don't extend stack VMAs under NOMMU conditions
69538 - */
69539 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
69540 -{
69541 - return find_vma(mm, addr);
69542 -}
69543 -
69544 -/*
69545 * expand a stack to a given address
69546 * - not supported under NOMMU conditions
69547 */
69548 @@ -1554,6 +1544,7 @@ int split_vma(struct mm_struct *mm, stru
69549
69550 /* most fields are the same, copy all, and then fixup */
69551 *new = *vma;
69552 + INIT_LIST_HEAD(&new->anon_vma_chain);
69553 *region = *vma->vm_region;
69554 new->vm_region = region;
69555
69556 diff -urNp linux-3.0.7/mm/page_alloc.c linux-3.0.7/mm/page_alloc.c
69557 --- linux-3.0.7/mm/page_alloc.c 2011-10-16 21:54:54.000000000 -0400
69558 +++ linux-3.0.7/mm/page_alloc.c 2011-10-16 21:55:28.000000000 -0400
69559 @@ -340,7 +340,7 @@ out:
69560 * This usage means that zero-order pages may not be compound.
69561 */
69562
69563 -static void free_compound_page(struct page *page)
69564 +void free_compound_page(struct page *page)
69565 {
69566 __free_pages_ok(page, compound_order(page));
69567 }
69568 @@ -653,6 +653,10 @@ static bool free_pages_prepare(struct pa
69569 int i;
69570 int bad = 0;
69571
69572 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
69573 + unsigned long index = 1UL << order;
69574 +#endif
69575 +
69576 trace_mm_page_free_direct(page, order);
69577 kmemcheck_free_shadow(page, order);
69578
69579 @@ -668,6 +672,12 @@ static bool free_pages_prepare(struct pa
69580 debug_check_no_obj_freed(page_address(page),
69581 PAGE_SIZE << order);
69582 }
69583 +
69584 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
69585 + for (; index; --index)
69586 + sanitize_highpage(page + index - 1);
69587 +#endif
69588 +
69589 arch_free_page(page, order);
69590 kernel_map_pages(page, 1 << order, 0);
69591
69592 @@ -783,8 +793,10 @@ static int prep_new_page(struct page *pa
69593 arch_alloc_page(page, order);
69594 kernel_map_pages(page, 1 << order, 1);
69595
69596 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
69597 if (gfp_flags & __GFP_ZERO)
69598 prep_zero_page(page, order, gfp_flags);
69599 +#endif
69600
69601 if (order && (gfp_flags & __GFP_COMP))
69602 prep_compound_page(page, order);
69603 @@ -2557,6 +2569,8 @@ void show_free_areas(unsigned int filter
69604 int cpu;
69605 struct zone *zone;
69606
69607 + pax_track_stack();
69608 +
69609 for_each_populated_zone(zone) {
69610 if (skip_free_areas_node(filter, zone_to_nid(zone)))
69611 continue;
69612 diff -urNp linux-3.0.7/mm/percpu.c linux-3.0.7/mm/percpu.c
69613 --- linux-3.0.7/mm/percpu.c 2011-07-21 22:17:23.000000000 -0400
69614 +++ linux-3.0.7/mm/percpu.c 2011-08-23 21:47:56.000000000 -0400
69615 @@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
69616 static unsigned int pcpu_last_unit_cpu __read_mostly;
69617
69618 /* the address of the first chunk which starts with the kernel static area */
69619 -void *pcpu_base_addr __read_mostly;
69620 +void *pcpu_base_addr __read_only;
69621 EXPORT_SYMBOL_GPL(pcpu_base_addr);
69622
69623 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
69624 diff -urNp linux-3.0.7/mm/rmap.c linux-3.0.7/mm/rmap.c
69625 --- linux-3.0.7/mm/rmap.c 2011-07-21 22:17:23.000000000 -0400
69626 +++ linux-3.0.7/mm/rmap.c 2011-08-23 21:47:56.000000000 -0400
69627 @@ -153,6 +153,10 @@ int anon_vma_prepare(struct vm_area_stru
69628 struct anon_vma *anon_vma = vma->anon_vma;
69629 struct anon_vma_chain *avc;
69630
69631 +#ifdef CONFIG_PAX_SEGMEXEC
69632 + struct anon_vma_chain *avc_m = NULL;
69633 +#endif
69634 +
69635 might_sleep();
69636 if (unlikely(!anon_vma)) {
69637 struct mm_struct *mm = vma->vm_mm;
69638 @@ -162,6 +166,12 @@ int anon_vma_prepare(struct vm_area_stru
69639 if (!avc)
69640 goto out_enomem;
69641
69642 +#ifdef CONFIG_PAX_SEGMEXEC
69643 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
69644 + if (!avc_m)
69645 + goto out_enomem_free_avc;
69646 +#endif
69647 +
69648 anon_vma = find_mergeable_anon_vma(vma);
69649 allocated = NULL;
69650 if (!anon_vma) {
69651 @@ -175,6 +185,21 @@ int anon_vma_prepare(struct vm_area_stru
69652 /* page_table_lock to protect against threads */
69653 spin_lock(&mm->page_table_lock);
69654 if (likely(!vma->anon_vma)) {
69655 +
69656 +#ifdef CONFIG_PAX_SEGMEXEC
69657 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
69658 +
69659 + if (vma_m) {
69660 + BUG_ON(vma_m->anon_vma);
69661 + vma_m->anon_vma = anon_vma;
69662 + avc_m->anon_vma = anon_vma;
69663 + avc_m->vma = vma;
69664 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
69665 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
69666 + avc_m = NULL;
69667 + }
69668 +#endif
69669 +
69670 vma->anon_vma = anon_vma;
69671 avc->anon_vma = anon_vma;
69672 avc->vma = vma;
69673 @@ -188,12 +213,24 @@ int anon_vma_prepare(struct vm_area_stru
69674
69675 if (unlikely(allocated))
69676 put_anon_vma(allocated);
69677 +
69678 +#ifdef CONFIG_PAX_SEGMEXEC
69679 + if (unlikely(avc_m))
69680 + anon_vma_chain_free(avc_m);
69681 +#endif
69682 +
69683 if (unlikely(avc))
69684 anon_vma_chain_free(avc);
69685 }
69686 return 0;
69687
69688 out_enomem_free_avc:
69689 +
69690 +#ifdef CONFIG_PAX_SEGMEXEC
69691 + if (avc_m)
69692 + anon_vma_chain_free(avc_m);
69693 +#endif
69694 +
69695 anon_vma_chain_free(avc);
69696 out_enomem:
69697 return -ENOMEM;
69698 @@ -244,7 +281,7 @@ static void anon_vma_chain_link(struct v
69699 * Attach the anon_vmas from src to dst.
69700 * Returns 0 on success, -ENOMEM on failure.
69701 */
69702 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
69703 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
69704 {
69705 struct anon_vma_chain *avc, *pavc;
69706 struct anon_vma *root = NULL;
69707 @@ -277,7 +314,7 @@ int anon_vma_clone(struct vm_area_struct
69708 * the corresponding VMA in the parent process is attached to.
69709 * Returns 0 on success, non-zero on failure.
69710 */
69711 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
69712 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
69713 {
69714 struct anon_vma_chain *avc;
69715 struct anon_vma *anon_vma;
69716 diff -urNp linux-3.0.7/mm/shmem.c linux-3.0.7/mm/shmem.c
69717 --- linux-3.0.7/mm/shmem.c 2011-07-21 22:17:23.000000000 -0400
69718 +++ linux-3.0.7/mm/shmem.c 2011-08-23 21:48:14.000000000 -0400
69719 @@ -31,7 +31,7 @@
69720 #include <linux/percpu_counter.h>
69721 #include <linux/swap.h>
69722
69723 -static struct vfsmount *shm_mnt;
69724 +struct vfsmount *shm_mnt;
69725
69726 #ifdef CONFIG_SHMEM
69727 /*
69728 @@ -1101,6 +1101,8 @@ static int shmem_writepage(struct page *
69729 goto unlock;
69730 }
69731 entry = shmem_swp_entry(info, index, NULL);
69732 + if (!entry)
69733 + goto unlock;
69734 if (entry->val) {
69735 /*
69736 * The more uptodate page coming down from a stacked
69737 @@ -1172,6 +1174,8 @@ static struct page *shmem_swapin(swp_ent
69738 struct vm_area_struct pvma;
69739 struct page *page;
69740
69741 + pax_track_stack();
69742 +
69743 spol = mpol_cond_copy(&mpol,
69744 mpol_shared_policy_lookup(&info->policy, idx));
69745
69746 @@ -2568,8 +2572,7 @@ int shmem_fill_super(struct super_block
69747 int err = -ENOMEM;
69748
69749 /* Round up to L1_CACHE_BYTES to resist false sharing */
69750 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
69751 - L1_CACHE_BYTES), GFP_KERNEL);
69752 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
69753 if (!sbinfo)
69754 return -ENOMEM;
69755
69756 diff -urNp linux-3.0.7/mm/slab.c linux-3.0.7/mm/slab.c
69757 --- linux-3.0.7/mm/slab.c 2011-07-21 22:17:23.000000000 -0400
69758 +++ linux-3.0.7/mm/slab.c 2011-08-23 21:48:14.000000000 -0400
69759 @@ -151,7 +151,7 @@
69760
69761 /* Legal flag mask for kmem_cache_create(). */
69762 #if DEBUG
69763 -# define CREATE_MASK (SLAB_RED_ZONE | \
69764 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
69765 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
69766 SLAB_CACHE_DMA | \
69767 SLAB_STORE_USER | \
69768 @@ -159,7 +159,7 @@
69769 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
69770 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
69771 #else
69772 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
69773 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
69774 SLAB_CACHE_DMA | \
69775 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
69776 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
69777 @@ -288,7 +288,7 @@ struct kmem_list3 {
69778 * Need this for bootstrapping a per node allocator.
69779 */
69780 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
69781 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
69782 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
69783 #define CACHE_CACHE 0
69784 #define SIZE_AC MAX_NUMNODES
69785 #define SIZE_L3 (2 * MAX_NUMNODES)
69786 @@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_
69787 if ((x)->max_freeable < i) \
69788 (x)->max_freeable = i; \
69789 } while (0)
69790 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
69791 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
69792 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
69793 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
69794 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
69795 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
69796 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
69797 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
69798 #else
69799 #define STATS_INC_ACTIVE(x) do { } while (0)
69800 #define STATS_DEC_ACTIVE(x) do { } while (0)
69801 @@ -538,7 +538,7 @@ static inline void *index_to_obj(struct
69802 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
69803 */
69804 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
69805 - const struct slab *slab, void *obj)
69806 + const struct slab *slab, const void *obj)
69807 {
69808 u32 offset = (obj - slab->s_mem);
69809 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
69810 @@ -564,7 +564,7 @@ struct cache_names {
69811 static struct cache_names __initdata cache_names[] = {
69812 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
69813 #include <linux/kmalloc_sizes.h>
69814 - {NULL,}
69815 + {NULL}
69816 #undef CACHE
69817 };
69818
69819 @@ -1530,7 +1530,7 @@ void __init kmem_cache_init(void)
69820 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
69821 sizes[INDEX_AC].cs_size,
69822 ARCH_KMALLOC_MINALIGN,
69823 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69824 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69825 NULL);
69826
69827 if (INDEX_AC != INDEX_L3) {
69828 @@ -1538,7 +1538,7 @@ void __init kmem_cache_init(void)
69829 kmem_cache_create(names[INDEX_L3].name,
69830 sizes[INDEX_L3].cs_size,
69831 ARCH_KMALLOC_MINALIGN,
69832 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69833 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69834 NULL);
69835 }
69836
69837 @@ -1556,7 +1556,7 @@ void __init kmem_cache_init(void)
69838 sizes->cs_cachep = kmem_cache_create(names->name,
69839 sizes->cs_size,
69840 ARCH_KMALLOC_MINALIGN,
69841 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69842 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69843 NULL);
69844 }
69845 #ifdef CONFIG_ZONE_DMA
69846 @@ -4272,10 +4272,10 @@ static int s_show(struct seq_file *m, vo
69847 }
69848 /* cpu stats */
69849 {
69850 - unsigned long allochit = atomic_read(&cachep->allochit);
69851 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
69852 - unsigned long freehit = atomic_read(&cachep->freehit);
69853 - unsigned long freemiss = atomic_read(&cachep->freemiss);
69854 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
69855 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
69856 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
69857 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
69858
69859 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
69860 allochit, allocmiss, freehit, freemiss);
69861 @@ -4532,15 +4532,66 @@ static const struct file_operations proc
69862
69863 static int __init slab_proc_init(void)
69864 {
69865 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
69866 + mode_t gr_mode = S_IRUGO;
69867 +
69868 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
69869 + gr_mode = S_IRUSR;
69870 +#endif
69871 +
69872 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
69873 #ifdef CONFIG_DEBUG_SLAB_LEAK
69874 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
69875 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
69876 #endif
69877 return 0;
69878 }
69879 module_init(slab_proc_init);
69880 #endif
69881
69882 +void check_object_size(const void *ptr, unsigned long n, bool to)
69883 +{
69884 +
69885 +#ifdef CONFIG_PAX_USERCOPY
69886 + struct page *page;
69887 + struct kmem_cache *cachep = NULL;
69888 + struct slab *slabp;
69889 + unsigned int objnr;
69890 + unsigned long offset;
69891 +
69892 + if (!n)
69893 + return;
69894 +
69895 + if (ZERO_OR_NULL_PTR(ptr))
69896 + goto report;
69897 +
69898 + if (!virt_addr_valid(ptr))
69899 + return;
69900 +
69901 + page = virt_to_head_page(ptr);
69902 +
69903 + if (!PageSlab(page)) {
69904 + if (object_is_on_stack(ptr, n) == -1)
69905 + goto report;
69906 + return;
69907 + }
69908 +
69909 + cachep = page_get_cache(page);
69910 + if (!(cachep->flags & SLAB_USERCOPY))
69911 + goto report;
69912 +
69913 + slabp = page_get_slab(page);
69914 + objnr = obj_to_index(cachep, slabp, ptr);
69915 + BUG_ON(objnr >= cachep->num);
69916 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
69917 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
69918 + return;
69919 +
69920 +report:
69921 + pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
69922 +#endif
69923 +
69924 +}
69925 +EXPORT_SYMBOL(check_object_size);
69926 +
69927 /**
69928 * ksize - get the actual amount of memory allocated for a given object
69929 * @objp: Pointer to the object
69930 diff -urNp linux-3.0.7/mm/slob.c linux-3.0.7/mm/slob.c
69931 --- linux-3.0.7/mm/slob.c 2011-07-21 22:17:23.000000000 -0400
69932 +++ linux-3.0.7/mm/slob.c 2011-08-23 21:47:56.000000000 -0400
69933 @@ -29,7 +29,7 @@
69934 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
69935 * alloc_pages() directly, allocating compound pages so the page order
69936 * does not have to be separately tracked, and also stores the exact
69937 - * allocation size in page->private so that it can be used to accurately
69938 + * allocation size in slob_page->size so that it can be used to accurately
69939 * provide ksize(). These objects are detected in kfree() because slob_page()
69940 * is false for them.
69941 *
69942 @@ -58,6 +58,7 @@
69943 */
69944
69945 #include <linux/kernel.h>
69946 +#include <linux/sched.h>
69947 #include <linux/slab.h>
69948 #include <linux/mm.h>
69949 #include <linux/swap.h> /* struct reclaim_state */
69950 @@ -102,7 +103,8 @@ struct slob_page {
69951 unsigned long flags; /* mandatory */
69952 atomic_t _count; /* mandatory */
69953 slobidx_t units; /* free units left in page */
69954 - unsigned long pad[2];
69955 + unsigned long pad[1];
69956 + unsigned long size; /* size when >=PAGE_SIZE */
69957 slob_t *free; /* first free slob_t in page */
69958 struct list_head list; /* linked list of free pages */
69959 };
69960 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
69961 */
69962 static inline int is_slob_page(struct slob_page *sp)
69963 {
69964 - return PageSlab((struct page *)sp);
69965 + return PageSlab((struct page *)sp) && !sp->size;
69966 }
69967
69968 static inline void set_slob_page(struct slob_page *sp)
69969 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
69970
69971 static inline struct slob_page *slob_page(const void *addr)
69972 {
69973 - return (struct slob_page *)virt_to_page(addr);
69974 + return (struct slob_page *)virt_to_head_page(addr);
69975 }
69976
69977 /*
69978 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
69979 /*
69980 * Return the size of a slob block.
69981 */
69982 -static slobidx_t slob_units(slob_t *s)
69983 +static slobidx_t slob_units(const slob_t *s)
69984 {
69985 if (s->units > 0)
69986 return s->units;
69987 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
69988 /*
69989 * Return the next free slob block pointer after this one.
69990 */
69991 -static slob_t *slob_next(slob_t *s)
69992 +static slob_t *slob_next(const slob_t *s)
69993 {
69994 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
69995 slobidx_t next;
69996 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
69997 /*
69998 * Returns true if s is the last free block in its page.
69999 */
70000 -static int slob_last(slob_t *s)
70001 +static int slob_last(const slob_t *s)
70002 {
70003 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
70004 }
70005 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
70006 if (!page)
70007 return NULL;
70008
70009 + set_slob_page(page);
70010 return page_address(page);
70011 }
70012
70013 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
70014 if (!b)
70015 return NULL;
70016 sp = slob_page(b);
70017 - set_slob_page(sp);
70018
70019 spin_lock_irqsave(&slob_lock, flags);
70020 sp->units = SLOB_UNITS(PAGE_SIZE);
70021 sp->free = b;
70022 + sp->size = 0;
70023 INIT_LIST_HEAD(&sp->list);
70024 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
70025 set_slob_page_free(sp, slob_list);
70026 @@ -476,10 +479,9 @@ out:
70027 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
70028 */
70029
70030 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70031 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
70032 {
70033 - unsigned int *m;
70034 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70035 + slob_t *m;
70036 void *ret;
70037
70038 lockdep_trace_alloc(gfp);
70039 @@ -492,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t
70040
70041 if (!m)
70042 return NULL;
70043 - *m = size;
70044 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
70045 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
70046 + m[0].units = size;
70047 + m[1].units = align;
70048 ret = (void *)m + align;
70049
70050 trace_kmalloc_node(_RET_IP_, ret,
70051 @@ -504,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t
70052 gfp |= __GFP_COMP;
70053 ret = slob_new_pages(gfp, order, node);
70054 if (ret) {
70055 - struct page *page;
70056 - page = virt_to_page(ret);
70057 - page->private = size;
70058 + struct slob_page *sp;
70059 + sp = slob_page(ret);
70060 + sp->size = size;
70061 }
70062
70063 trace_kmalloc_node(_RET_IP_, ret,
70064 size, PAGE_SIZE << order, gfp, node);
70065 }
70066
70067 - kmemleak_alloc(ret, size, 1, gfp);
70068 + return ret;
70069 +}
70070 +
70071 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70072 +{
70073 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70074 + void *ret = __kmalloc_node_align(size, gfp, node, align);
70075 +
70076 + if (!ZERO_OR_NULL_PTR(ret))
70077 + kmemleak_alloc(ret, size, 1, gfp);
70078 return ret;
70079 }
70080 EXPORT_SYMBOL(__kmalloc_node);
70081 @@ -531,13 +545,88 @@ void kfree(const void *block)
70082 sp = slob_page(block);
70083 if (is_slob_page(sp)) {
70084 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70085 - unsigned int *m = (unsigned int *)(block - align);
70086 - slob_free(m, *m + align);
70087 - } else
70088 + slob_t *m = (slob_t *)(block - align);
70089 + slob_free(m, m[0].units + align);
70090 + } else {
70091 + clear_slob_page(sp);
70092 + free_slob_page(sp);
70093 + sp->size = 0;
70094 put_page(&sp->page);
70095 + }
70096 }
70097 EXPORT_SYMBOL(kfree);
70098
70099 +void check_object_size(const void *ptr, unsigned long n, bool to)
70100 +{
70101 +
70102 +#ifdef CONFIG_PAX_USERCOPY
70103 + struct slob_page *sp;
70104 + const slob_t *free;
70105 + const void *base;
70106 + unsigned long flags;
70107 +
70108 + if (!n)
70109 + return;
70110 +
70111 + if (ZERO_OR_NULL_PTR(ptr))
70112 + goto report;
70113 +
70114 + if (!virt_addr_valid(ptr))
70115 + return;
70116 +
70117 + sp = slob_page(ptr);
70118 + if (!PageSlab((struct page*)sp)) {
70119 + if (object_is_on_stack(ptr, n) == -1)
70120 + goto report;
70121 + return;
70122 + }
70123 +
70124 + if (sp->size) {
70125 + base = page_address(&sp->page);
70126 + if (base <= ptr && n <= sp->size - (ptr - base))
70127 + return;
70128 + goto report;
70129 + }
70130 +
70131 + /* some tricky double walking to find the chunk */
70132 + spin_lock_irqsave(&slob_lock, flags);
70133 + base = (void *)((unsigned long)ptr & PAGE_MASK);
70134 + free = sp->free;
70135 +
70136 + while (!slob_last(free) && (void *)free <= ptr) {
70137 + base = free + slob_units(free);
70138 + free = slob_next(free);
70139 + }
70140 +
70141 + while (base < (void *)free) {
70142 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
70143 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
70144 + int offset;
70145 +
70146 + if (ptr < base + align)
70147 + break;
70148 +
70149 + offset = ptr - base - align;
70150 + if (offset >= m) {
70151 + base += size;
70152 + continue;
70153 + }
70154 +
70155 + if (n > m - offset)
70156 + break;
70157 +
70158 + spin_unlock_irqrestore(&slob_lock, flags);
70159 + return;
70160 + }
70161 +
70162 + spin_unlock_irqrestore(&slob_lock, flags);
70163 +report:
70164 + pax_report_usercopy(ptr, n, to, NULL);
70165 +#endif
70166 +
70167 +}
70168 +EXPORT_SYMBOL(check_object_size);
70169 +
70170 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
70171 size_t ksize(const void *block)
70172 {
70173 @@ -550,10 +639,10 @@ size_t ksize(const void *block)
70174 sp = slob_page(block);
70175 if (is_slob_page(sp)) {
70176 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70177 - unsigned int *m = (unsigned int *)(block - align);
70178 - return SLOB_UNITS(*m) * SLOB_UNIT;
70179 + slob_t *m = (slob_t *)(block - align);
70180 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
70181 } else
70182 - return sp->page.private;
70183 + return sp->size;
70184 }
70185 EXPORT_SYMBOL(ksize);
70186
70187 @@ -569,8 +658,13 @@ struct kmem_cache *kmem_cache_create(con
70188 {
70189 struct kmem_cache *c;
70190
70191 +#ifdef CONFIG_PAX_USERCOPY
70192 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
70193 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
70194 +#else
70195 c = slob_alloc(sizeof(struct kmem_cache),
70196 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
70197 +#endif
70198
70199 if (c) {
70200 c->name = name;
70201 @@ -608,17 +702,25 @@ void *kmem_cache_alloc_node(struct kmem_
70202 {
70203 void *b;
70204
70205 +#ifdef CONFIG_PAX_USERCOPY
70206 + b = __kmalloc_node_align(c->size, flags, node, c->align);
70207 +#else
70208 if (c->size < PAGE_SIZE) {
70209 b = slob_alloc(c->size, flags, c->align, node);
70210 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70211 SLOB_UNITS(c->size) * SLOB_UNIT,
70212 flags, node);
70213 } else {
70214 + struct slob_page *sp;
70215 +
70216 b = slob_new_pages(flags, get_order(c->size), node);
70217 + sp = slob_page(b);
70218 + sp->size = c->size;
70219 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70220 PAGE_SIZE << get_order(c->size),
70221 flags, node);
70222 }
70223 +#endif
70224
70225 if (c->ctor)
70226 c->ctor(b);
70227 @@ -630,10 +732,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
70228
70229 static void __kmem_cache_free(void *b, int size)
70230 {
70231 - if (size < PAGE_SIZE)
70232 + struct slob_page *sp = slob_page(b);
70233 +
70234 + if (is_slob_page(sp))
70235 slob_free(b, size);
70236 - else
70237 + else {
70238 + clear_slob_page(sp);
70239 + free_slob_page(sp);
70240 + sp->size = 0;
70241 slob_free_pages(b, get_order(size));
70242 + }
70243 }
70244
70245 static void kmem_rcu_free(struct rcu_head *head)
70246 @@ -646,17 +754,31 @@ static void kmem_rcu_free(struct rcu_hea
70247
70248 void kmem_cache_free(struct kmem_cache *c, void *b)
70249 {
70250 + int size = c->size;
70251 +
70252 +#ifdef CONFIG_PAX_USERCOPY
70253 + if (size + c->align < PAGE_SIZE) {
70254 + size += c->align;
70255 + b -= c->align;
70256 + }
70257 +#endif
70258 +
70259 kmemleak_free_recursive(b, c->flags);
70260 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
70261 struct slob_rcu *slob_rcu;
70262 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
70263 - slob_rcu->size = c->size;
70264 + slob_rcu = b + (size - sizeof(struct slob_rcu));
70265 + slob_rcu->size = size;
70266 call_rcu(&slob_rcu->head, kmem_rcu_free);
70267 } else {
70268 - __kmem_cache_free(b, c->size);
70269 + __kmem_cache_free(b, size);
70270 }
70271
70272 +#ifdef CONFIG_PAX_USERCOPY
70273 + trace_kfree(_RET_IP_, b);
70274 +#else
70275 trace_kmem_cache_free(_RET_IP_, b);
70276 +#endif
70277 +
70278 }
70279 EXPORT_SYMBOL(kmem_cache_free);
70280
70281 diff -urNp linux-3.0.7/mm/slub.c linux-3.0.7/mm/slub.c
70282 --- linux-3.0.7/mm/slub.c 2011-07-21 22:17:23.000000000 -0400
70283 +++ linux-3.0.7/mm/slub.c 2011-09-25 22:15:40.000000000 -0400
70284 @@ -200,7 +200,7 @@ struct track {
70285
70286 enum track_item { TRACK_ALLOC, TRACK_FREE };
70287
70288 -#ifdef CONFIG_SYSFS
70289 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70290 static int sysfs_slab_add(struct kmem_cache *);
70291 static int sysfs_slab_alias(struct kmem_cache *, const char *);
70292 static void sysfs_slab_remove(struct kmem_cache *);
70293 @@ -442,7 +442,7 @@ static void print_track(const char *s, s
70294 if (!t->addr)
70295 return;
70296
70297 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
70298 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
70299 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
70300 }
70301
70302 @@ -2137,6 +2137,8 @@ void kmem_cache_free(struct kmem_cache *
70303
70304 page = virt_to_head_page(x);
70305
70306 + BUG_ON(!PageSlab(page));
70307 +
70308 slab_free(s, page, x, _RET_IP_);
70309
70310 trace_kmem_cache_free(_RET_IP_, x);
70311 @@ -2170,7 +2172,7 @@ static int slub_min_objects;
70312 * Merge control. If this is set then no merging of slab caches will occur.
70313 * (Could be removed. This was introduced to pacify the merge skeptics.)
70314 */
70315 -static int slub_nomerge;
70316 +static int slub_nomerge = 1;
70317
70318 /*
70319 * Calculate the order of allocation given an slab object size.
70320 @@ -2594,7 +2596,7 @@ static int kmem_cache_open(struct kmem_c
70321 * list to avoid pounding the page allocator excessively.
70322 */
70323 set_min_partial(s, ilog2(s->size));
70324 - s->refcount = 1;
70325 + atomic_set(&s->refcount, 1);
70326 #ifdef CONFIG_NUMA
70327 s->remote_node_defrag_ratio = 1000;
70328 #endif
70329 @@ -2699,8 +2701,7 @@ static inline int kmem_cache_close(struc
70330 void kmem_cache_destroy(struct kmem_cache *s)
70331 {
70332 down_write(&slub_lock);
70333 - s->refcount--;
70334 - if (!s->refcount) {
70335 + if (atomic_dec_and_test(&s->refcount)) {
70336 list_del(&s->list);
70337 if (kmem_cache_close(s)) {
70338 printk(KERN_ERR "SLUB %s: %s called for cache that "
70339 @@ -2910,6 +2911,46 @@ void *__kmalloc_node(size_t size, gfp_t
70340 EXPORT_SYMBOL(__kmalloc_node);
70341 #endif
70342
70343 +void check_object_size(const void *ptr, unsigned long n, bool to)
70344 +{
70345 +
70346 +#ifdef CONFIG_PAX_USERCOPY
70347 + struct page *page;
70348 + struct kmem_cache *s = NULL;
70349 + unsigned long offset;
70350 +
70351 + if (!n)
70352 + return;
70353 +
70354 + if (ZERO_OR_NULL_PTR(ptr))
70355 + goto report;
70356 +
70357 + if (!virt_addr_valid(ptr))
70358 + return;
70359 +
70360 + page = virt_to_head_page(ptr);
70361 +
70362 + if (!PageSlab(page)) {
70363 + if (object_is_on_stack(ptr, n) == -1)
70364 + goto report;
70365 + return;
70366 + }
70367 +
70368 + s = page->slab;
70369 + if (!(s->flags & SLAB_USERCOPY))
70370 + goto report;
70371 +
70372 + offset = (ptr - page_address(page)) % s->size;
70373 + if (offset <= s->objsize && n <= s->objsize - offset)
70374 + return;
70375 +
70376 +report:
70377 + pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
70378 +#endif
70379 +
70380 +}
70381 +EXPORT_SYMBOL(check_object_size);
70382 +
70383 size_t ksize(const void *object)
70384 {
70385 struct page *page;
70386 @@ -3154,7 +3195,7 @@ static void __init kmem_cache_bootstrap_
70387 int node;
70388
70389 list_add(&s->list, &slab_caches);
70390 - s->refcount = -1;
70391 + atomic_set(&s->refcount, -1);
70392
70393 for_each_node_state(node, N_NORMAL_MEMORY) {
70394 struct kmem_cache_node *n = get_node(s, node);
70395 @@ -3271,17 +3312,17 @@ void __init kmem_cache_init(void)
70396
70397 /* Caches that are not of the two-to-the-power-of size */
70398 if (KMALLOC_MIN_SIZE <= 32) {
70399 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
70400 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
70401 caches++;
70402 }
70403
70404 if (KMALLOC_MIN_SIZE <= 64) {
70405 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
70406 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
70407 caches++;
70408 }
70409
70410 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
70411 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
70412 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
70413 caches++;
70414 }
70415
70416 @@ -3349,7 +3390,7 @@ static int slab_unmergeable(struct kmem_
70417 /*
70418 * We may have set a slab to be unmergeable during bootstrap.
70419 */
70420 - if (s->refcount < 0)
70421 + if (atomic_read(&s->refcount) < 0)
70422 return 1;
70423
70424 return 0;
70425 @@ -3408,7 +3449,7 @@ struct kmem_cache *kmem_cache_create(con
70426 down_write(&slub_lock);
70427 s = find_mergeable(size, align, flags, name, ctor);
70428 if (s) {
70429 - s->refcount++;
70430 + atomic_inc(&s->refcount);
70431 /*
70432 * Adjust the object sizes so that we clear
70433 * the complete object on kzalloc.
70434 @@ -3417,7 +3458,7 @@ struct kmem_cache *kmem_cache_create(con
70435 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
70436
70437 if (sysfs_slab_alias(s, name)) {
70438 - s->refcount--;
70439 + atomic_dec(&s->refcount);
70440 goto err;
70441 }
70442 up_write(&slub_lock);
70443 @@ -3545,7 +3586,7 @@ void *__kmalloc_node_track_caller(size_t
70444 }
70445 #endif
70446
70447 -#ifdef CONFIG_SYSFS
70448 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70449 static int count_inuse(struct page *page)
70450 {
70451 return page->inuse;
70452 @@ -3935,12 +3976,12 @@ static void resiliency_test(void)
70453 validate_slab_cache(kmalloc_caches[9]);
70454 }
70455 #else
70456 -#ifdef CONFIG_SYSFS
70457 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70458 static void resiliency_test(void) {};
70459 #endif
70460 #endif
70461
70462 -#ifdef CONFIG_SYSFS
70463 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70464 enum slab_stat_type {
70465 SL_ALL, /* All slabs */
70466 SL_PARTIAL, /* Only partially allocated slabs */
70467 @@ -4150,7 +4191,7 @@ SLAB_ATTR_RO(ctor);
70468
70469 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
70470 {
70471 - return sprintf(buf, "%d\n", s->refcount - 1);
70472 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
70473 }
70474 SLAB_ATTR_RO(aliases);
70475
70476 @@ -4662,6 +4703,7 @@ static char *create_unique_id(struct kme
70477 return name;
70478 }
70479
70480 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70481 static int sysfs_slab_add(struct kmem_cache *s)
70482 {
70483 int err;
70484 @@ -4724,6 +4766,7 @@ static void sysfs_slab_remove(struct kme
70485 kobject_del(&s->kobj);
70486 kobject_put(&s->kobj);
70487 }
70488 +#endif
70489
70490 /*
70491 * Need to buffer aliases during bootup until sysfs becomes
70492 @@ -4737,6 +4780,7 @@ struct saved_alias {
70493
70494 static struct saved_alias *alias_list;
70495
70496 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70497 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
70498 {
70499 struct saved_alias *al;
70500 @@ -4759,6 +4803,7 @@ static int sysfs_slab_alias(struct kmem_
70501 alias_list = al;
70502 return 0;
70503 }
70504 +#endif
70505
70506 static int __init slab_sysfs_init(void)
70507 {
70508 @@ -4894,7 +4939,13 @@ static const struct file_operations proc
70509
70510 static int __init slab_proc_init(void)
70511 {
70512 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
70513 + mode_t gr_mode = S_IRUGO;
70514 +
70515 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
70516 + gr_mode = S_IRUSR;
70517 +#endif
70518 +
70519 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
70520 return 0;
70521 }
70522 module_init(slab_proc_init);
70523 diff -urNp linux-3.0.7/mm/swap.c linux-3.0.7/mm/swap.c
70524 --- linux-3.0.7/mm/swap.c 2011-07-21 22:17:23.000000000 -0400
70525 +++ linux-3.0.7/mm/swap.c 2011-08-23 21:47:56.000000000 -0400
70526 @@ -31,6 +31,7 @@
70527 #include <linux/backing-dev.h>
70528 #include <linux/memcontrol.h>
70529 #include <linux/gfp.h>
70530 +#include <linux/hugetlb.h>
70531
70532 #include "internal.h"
70533
70534 @@ -71,6 +72,8 @@ static void __put_compound_page(struct p
70535
70536 __page_cache_release(page);
70537 dtor = get_compound_page_dtor(page);
70538 + if (!PageHuge(page))
70539 + BUG_ON(dtor != free_compound_page);
70540 (*dtor)(page);
70541 }
70542
70543 diff -urNp linux-3.0.7/mm/swapfile.c linux-3.0.7/mm/swapfile.c
70544 --- linux-3.0.7/mm/swapfile.c 2011-07-21 22:17:23.000000000 -0400
70545 +++ linux-3.0.7/mm/swapfile.c 2011-08-23 21:47:56.000000000 -0400
70546 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(swapon_mutex);
70547
70548 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
70549 /* Activity counter to indicate that a swapon or swapoff has occurred */
70550 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
70551 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
70552
70553 static inline unsigned char swap_count(unsigned char ent)
70554 {
70555 @@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
70556 }
70557 filp_close(swap_file, NULL);
70558 err = 0;
70559 - atomic_inc(&proc_poll_event);
70560 + atomic_inc_unchecked(&proc_poll_event);
70561 wake_up_interruptible(&proc_poll_wait);
70562
70563 out_dput:
70564 @@ -1692,8 +1692,8 @@ static unsigned swaps_poll(struct file *
70565
70566 poll_wait(file, &proc_poll_wait, wait);
70567
70568 - if (s->event != atomic_read(&proc_poll_event)) {
70569 - s->event = atomic_read(&proc_poll_event);
70570 + if (s->event != atomic_read_unchecked(&proc_poll_event)) {
70571 + s->event = atomic_read_unchecked(&proc_poll_event);
70572 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
70573 }
70574
70575 @@ -1799,7 +1799,7 @@ static int swaps_open(struct inode *inod
70576 }
70577
70578 s->seq.private = s;
70579 - s->event = atomic_read(&proc_poll_event);
70580 + s->event = atomic_read_unchecked(&proc_poll_event);
70581 return ret;
70582 }
70583
70584 @@ -2133,7 +2133,7 @@ SYSCALL_DEFINE2(swapon, const char __use
70585 (p->flags & SWP_DISCARDABLE) ? "D" : "");
70586
70587 mutex_unlock(&swapon_mutex);
70588 - atomic_inc(&proc_poll_event);
70589 + atomic_inc_unchecked(&proc_poll_event);
70590 wake_up_interruptible(&proc_poll_wait);
70591
70592 if (S_ISREG(inode->i_mode))
70593 diff -urNp linux-3.0.7/mm/util.c linux-3.0.7/mm/util.c
70594 --- linux-3.0.7/mm/util.c 2011-07-21 22:17:23.000000000 -0400
70595 +++ linux-3.0.7/mm/util.c 2011-08-23 21:47:56.000000000 -0400
70596 @@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
70597 * allocated buffer. Use this if you don't want to free the buffer immediately
70598 * like, for example, with RCU.
70599 */
70600 +#undef __krealloc
70601 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
70602 {
70603 void *ret;
70604 @@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
70605 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
70606 * %NULL pointer, the object pointed to is freed.
70607 */
70608 +#undef krealloc
70609 void *krealloc(const void *p, size_t new_size, gfp_t flags)
70610 {
70611 void *ret;
70612 @@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *m
70613 void arch_pick_mmap_layout(struct mm_struct *mm)
70614 {
70615 mm->mmap_base = TASK_UNMAPPED_BASE;
70616 +
70617 +#ifdef CONFIG_PAX_RANDMMAP
70618 + if (mm->pax_flags & MF_PAX_RANDMMAP)
70619 + mm->mmap_base += mm->delta_mmap;
70620 +#endif
70621 +
70622 mm->get_unmapped_area = arch_get_unmapped_area;
70623 mm->unmap_area = arch_unmap_area;
70624 }
70625 diff -urNp linux-3.0.7/mm/vmalloc.c linux-3.0.7/mm/vmalloc.c
70626 --- linux-3.0.7/mm/vmalloc.c 2011-10-16 21:54:54.000000000 -0400
70627 +++ linux-3.0.7/mm/vmalloc.c 2011-10-16 21:55:28.000000000 -0400
70628 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
70629
70630 pte = pte_offset_kernel(pmd, addr);
70631 do {
70632 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70633 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70634 +
70635 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70636 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
70637 + BUG_ON(!pte_exec(*pte));
70638 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
70639 + continue;
70640 + }
70641 +#endif
70642 +
70643 + {
70644 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70645 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70646 + }
70647 } while (pte++, addr += PAGE_SIZE, addr != end);
70648 }
70649
70650 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
70651 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
70652 {
70653 pte_t *pte;
70654 + int ret = -ENOMEM;
70655
70656 /*
70657 * nr is a running index into the array which helps higher level
70658 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
70659 pte = pte_alloc_kernel(pmd, addr);
70660 if (!pte)
70661 return -ENOMEM;
70662 +
70663 + pax_open_kernel();
70664 do {
70665 struct page *page = pages[*nr];
70666
70667 - if (WARN_ON(!pte_none(*pte)))
70668 - return -EBUSY;
70669 - if (WARN_ON(!page))
70670 - return -ENOMEM;
70671 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70672 + if (pgprot_val(prot) & _PAGE_NX)
70673 +#endif
70674 +
70675 + if (WARN_ON(!pte_none(*pte))) {
70676 + ret = -EBUSY;
70677 + goto out;
70678 + }
70679 + if (WARN_ON(!page)) {
70680 + ret = -ENOMEM;
70681 + goto out;
70682 + }
70683 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
70684 (*nr)++;
70685 } while (pte++, addr += PAGE_SIZE, addr != end);
70686 - return 0;
70687 + ret = 0;
70688 +out:
70689 + pax_close_kernel();
70690 + return ret;
70691 }
70692
70693 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
70694 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
70695 * and fall back on vmalloc() if that fails. Others
70696 * just put it in the vmalloc space.
70697 */
70698 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
70699 +#ifdef CONFIG_MODULES
70700 +#ifdef MODULES_VADDR
70701 unsigned long addr = (unsigned long)x;
70702 if (addr >= MODULES_VADDR && addr < MODULES_END)
70703 return 1;
70704 #endif
70705 +
70706 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70707 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
70708 + return 1;
70709 +#endif
70710 +
70711 +#endif
70712 +
70713 return is_vmalloc_addr(x);
70714 }
70715
70716 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
70717
70718 if (!pgd_none(*pgd)) {
70719 pud_t *pud = pud_offset(pgd, addr);
70720 +#ifdef CONFIG_X86
70721 + if (!pud_large(*pud))
70722 +#endif
70723 if (!pud_none(*pud)) {
70724 pmd_t *pmd = pmd_offset(pud, addr);
70725 +#ifdef CONFIG_X86
70726 + if (!pmd_large(*pmd))
70727 +#endif
70728 if (!pmd_none(*pmd)) {
70729 pte_t *ptep, pte;
70730
70731 @@ -1297,6 +1337,16 @@ static struct vm_struct *__get_vm_area_n
70732 struct vm_struct *area;
70733
70734 BUG_ON(in_interrupt());
70735 +
70736 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70737 + if (flags & VM_KERNEXEC) {
70738 + if (start != VMALLOC_START || end != VMALLOC_END)
70739 + return NULL;
70740 + start = (unsigned long)MODULES_EXEC_VADDR;
70741 + end = (unsigned long)MODULES_EXEC_END;
70742 + }
70743 +#endif
70744 +
70745 if (flags & VM_IOREMAP) {
70746 int bit = fls(size);
70747
70748 @@ -1515,6 +1565,11 @@ void *vmap(struct page **pages, unsigned
70749 if (count > totalram_pages)
70750 return NULL;
70751
70752 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70753 + if (!(pgprot_val(prot) & _PAGE_NX))
70754 + flags |= VM_KERNEXEC;
70755 +#endif
70756 +
70757 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
70758 __builtin_return_address(0));
70759 if (!area)
70760 @@ -1616,6 +1671,13 @@ void *__vmalloc_node_range(unsigned long
70761 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
70762 return NULL;
70763
70764 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70765 + if (!(pgprot_val(prot) & _PAGE_NX))
70766 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
70767 + node, gfp_mask, caller);
70768 + else
70769 +#endif
70770 +
70771 area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
70772 gfp_mask, caller);
70773
70774 @@ -1655,6 +1717,7 @@ static void *__vmalloc_node(unsigned lon
70775 gfp_mask, prot, node, caller);
70776 }
70777
70778 +#undef __vmalloc
70779 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
70780 {
70781 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
70782 @@ -1678,6 +1741,7 @@ static inline void *__vmalloc_node_flags
70783 * For tight control over page level allocator and protection flags
70784 * use __vmalloc() instead.
70785 */
70786 +#undef vmalloc
70787 void *vmalloc(unsigned long size)
70788 {
70789 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
70790 @@ -1694,6 +1758,7 @@ EXPORT_SYMBOL(vmalloc);
70791 * For tight control over page level allocator and protection flags
70792 * use __vmalloc() instead.
70793 */
70794 +#undef vzalloc
70795 void *vzalloc(unsigned long size)
70796 {
70797 return __vmalloc_node_flags(size, -1,
70798 @@ -1708,6 +1773,7 @@ EXPORT_SYMBOL(vzalloc);
70799 * The resulting memory area is zeroed so it can be mapped to userspace
70800 * without leaking data.
70801 */
70802 +#undef vmalloc_user
70803 void *vmalloc_user(unsigned long size)
70804 {
70805 struct vm_struct *area;
70806 @@ -1735,6 +1801,7 @@ EXPORT_SYMBOL(vmalloc_user);
70807 * For tight control over page level allocator and protection flags
70808 * use __vmalloc() instead.
70809 */
70810 +#undef vmalloc_node
70811 void *vmalloc_node(unsigned long size, int node)
70812 {
70813 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
70814 @@ -1754,6 +1821,7 @@ EXPORT_SYMBOL(vmalloc_node);
70815 * For tight control over page level allocator and protection flags
70816 * use __vmalloc_node() instead.
70817 */
70818 +#undef vzalloc_node
70819 void *vzalloc_node(unsigned long size, int node)
70820 {
70821 return __vmalloc_node_flags(size, node,
70822 @@ -1776,10 +1844,10 @@ EXPORT_SYMBOL(vzalloc_node);
70823 * For tight control over page level allocator and protection flags
70824 * use __vmalloc() instead.
70825 */
70826 -
70827 +#undef vmalloc_exec
70828 void *vmalloc_exec(unsigned long size)
70829 {
70830 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
70831 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
70832 -1, __builtin_return_address(0));
70833 }
70834
70835 @@ -1798,6 +1866,7 @@ void *vmalloc_exec(unsigned long size)
70836 * Allocate enough 32bit PA addressable pages to cover @size from the
70837 * page level allocator and map them into contiguous kernel virtual space.
70838 */
70839 +#undef vmalloc_32
70840 void *vmalloc_32(unsigned long size)
70841 {
70842 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
70843 @@ -1812,6 +1881,7 @@ EXPORT_SYMBOL(vmalloc_32);
70844 * The resulting memory area is 32bit addressable and zeroed so it can be
70845 * mapped to userspace without leaking data.
70846 */
70847 +#undef vmalloc_32_user
70848 void *vmalloc_32_user(unsigned long size)
70849 {
70850 struct vm_struct *area;
70851 @@ -2074,6 +2144,8 @@ int remap_vmalloc_range(struct vm_area_s
70852 unsigned long uaddr = vma->vm_start;
70853 unsigned long usize = vma->vm_end - vma->vm_start;
70854
70855 + BUG_ON(vma->vm_mirror);
70856 +
70857 if ((PAGE_SIZE-1) & (unsigned long)addr)
70858 return -EINVAL;
70859
70860 diff -urNp linux-3.0.7/mm/vmstat.c linux-3.0.7/mm/vmstat.c
70861 --- linux-3.0.7/mm/vmstat.c 2011-07-21 22:17:23.000000000 -0400
70862 +++ linux-3.0.7/mm/vmstat.c 2011-08-23 21:48:14.000000000 -0400
70863 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
70864 *
70865 * vm_stat contains the global counters
70866 */
70867 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70868 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70869 EXPORT_SYMBOL(vm_stat);
70870
70871 #ifdef CONFIG_SMP
70872 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
70873 v = p->vm_stat_diff[i];
70874 p->vm_stat_diff[i] = 0;
70875 local_irq_restore(flags);
70876 - atomic_long_add(v, &zone->vm_stat[i]);
70877 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
70878 global_diff[i] += v;
70879 #ifdef CONFIG_NUMA
70880 /* 3 seconds idle till flush */
70881 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
70882
70883 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
70884 if (global_diff[i])
70885 - atomic_long_add(global_diff[i], &vm_stat[i]);
70886 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
70887 }
70888
70889 #endif
70890 @@ -1207,10 +1207,20 @@ static int __init setup_vmstat(void)
70891 start_cpu_timer(cpu);
70892 #endif
70893 #ifdef CONFIG_PROC_FS
70894 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
70895 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
70896 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
70897 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
70898 + {
70899 + mode_t gr_mode = S_IRUGO;
70900 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
70901 + gr_mode = S_IRUSR;
70902 +#endif
70903 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
70904 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
70905 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
70906 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
70907 +#else
70908 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
70909 +#endif
70910 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
70911 + }
70912 #endif
70913 return 0;
70914 }
70915 diff -urNp linux-3.0.7/net/8021q/vlan.c linux-3.0.7/net/8021q/vlan.c
70916 --- linux-3.0.7/net/8021q/vlan.c 2011-07-21 22:17:23.000000000 -0400
70917 +++ linux-3.0.7/net/8021q/vlan.c 2011-08-23 21:47:56.000000000 -0400
70918 @@ -591,8 +591,7 @@ static int vlan_ioctl_handler(struct net
70919 err = -EPERM;
70920 if (!capable(CAP_NET_ADMIN))
70921 break;
70922 - if ((args.u.name_type >= 0) &&
70923 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
70924 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
70925 struct vlan_net *vn;
70926
70927 vn = net_generic(net, vlan_net_id);
70928 diff -urNp linux-3.0.7/net/9p/trans_fd.c linux-3.0.7/net/9p/trans_fd.c
70929 --- linux-3.0.7/net/9p/trans_fd.c 2011-07-21 22:17:23.000000000 -0400
70930 +++ linux-3.0.7/net/9p/trans_fd.c 2011-10-06 04:17:55.000000000 -0400
70931 @@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client
70932 oldfs = get_fs();
70933 set_fs(get_ds());
70934 /* The cast to a user pointer is valid due to the set_fs() */
70935 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
70936 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
70937 set_fs(oldfs);
70938
70939 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
70940 diff -urNp linux-3.0.7/net/9p/trans_virtio.c linux-3.0.7/net/9p/trans_virtio.c
70941 --- linux-3.0.7/net/9p/trans_virtio.c 2011-10-16 21:54:54.000000000 -0400
70942 +++ linux-3.0.7/net/9p/trans_virtio.c 2011-10-16 21:55:28.000000000 -0400
70943 @@ -327,7 +327,7 @@ req_retry_pinned:
70944 } else {
70945 char *pbuf;
70946 if (req->tc->pubuf)
70947 - pbuf = (__force char *) req->tc->pubuf;
70948 + pbuf = (char __force_kernel *) req->tc->pubuf;
70949 else
70950 pbuf = req->tc->pkbuf;
70951 outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf,
70952 @@ -357,7 +357,7 @@ req_retry_pinned:
70953 } else {
70954 char *pbuf;
70955 if (req->tc->pubuf)
70956 - pbuf = (__force char *) req->tc->pubuf;
70957 + pbuf = (char __force_kernel *) req->tc->pubuf;
70958 else
70959 pbuf = req->tc->pkbuf;
70960
70961 diff -urNp linux-3.0.7/net/atm/atm_misc.c linux-3.0.7/net/atm/atm_misc.c
70962 --- linux-3.0.7/net/atm/atm_misc.c 2011-07-21 22:17:23.000000000 -0400
70963 +++ linux-3.0.7/net/atm/atm_misc.c 2011-08-23 21:47:56.000000000 -0400
70964 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
70965 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
70966 return 1;
70967 atm_return(vcc, truesize);
70968 - atomic_inc(&vcc->stats->rx_drop);
70969 + atomic_inc_unchecked(&vcc->stats->rx_drop);
70970 return 0;
70971 }
70972 EXPORT_SYMBOL(atm_charge);
70973 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
70974 }
70975 }
70976 atm_return(vcc, guess);
70977 - atomic_inc(&vcc->stats->rx_drop);
70978 + atomic_inc_unchecked(&vcc->stats->rx_drop);
70979 return NULL;
70980 }
70981 EXPORT_SYMBOL(atm_alloc_charge);
70982 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
70983
70984 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
70985 {
70986 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
70987 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
70988 __SONET_ITEMS
70989 #undef __HANDLE_ITEM
70990 }
70991 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
70992
70993 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
70994 {
70995 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
70996 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
70997 __SONET_ITEMS
70998 #undef __HANDLE_ITEM
70999 }
71000 diff -urNp linux-3.0.7/net/atm/lec.h linux-3.0.7/net/atm/lec.h
71001 --- linux-3.0.7/net/atm/lec.h 2011-07-21 22:17:23.000000000 -0400
71002 +++ linux-3.0.7/net/atm/lec.h 2011-08-23 21:47:56.000000000 -0400
71003 @@ -48,7 +48,7 @@ struct lane2_ops {
71004 const u8 *tlvs, u32 sizeoftlvs);
71005 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
71006 const u8 *tlvs, u32 sizeoftlvs);
71007 -};
71008 +} __no_const;
71009
71010 /*
71011 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
71012 diff -urNp linux-3.0.7/net/atm/mpc.h linux-3.0.7/net/atm/mpc.h
71013 --- linux-3.0.7/net/atm/mpc.h 2011-07-21 22:17:23.000000000 -0400
71014 +++ linux-3.0.7/net/atm/mpc.h 2011-08-23 21:47:56.000000000 -0400
71015 @@ -33,7 +33,7 @@ struct mpoa_client {
71016 struct mpc_parameters parameters; /* parameters for this client */
71017
71018 const struct net_device_ops *old_ops;
71019 - struct net_device_ops new_ops;
71020 + net_device_ops_no_const new_ops;
71021 };
71022
71023
71024 diff -urNp linux-3.0.7/net/atm/mpoa_caches.c linux-3.0.7/net/atm/mpoa_caches.c
71025 --- linux-3.0.7/net/atm/mpoa_caches.c 2011-07-21 22:17:23.000000000 -0400
71026 +++ linux-3.0.7/net/atm/mpoa_caches.c 2011-08-23 21:48:14.000000000 -0400
71027 @@ -255,6 +255,8 @@ static void check_resolving_entries(stru
71028 struct timeval now;
71029 struct k_message msg;
71030
71031 + pax_track_stack();
71032 +
71033 do_gettimeofday(&now);
71034
71035 read_lock_bh(&client->ingress_lock);
71036 diff -urNp linux-3.0.7/net/atm/proc.c linux-3.0.7/net/atm/proc.c
71037 --- linux-3.0.7/net/atm/proc.c 2011-07-21 22:17:23.000000000 -0400
71038 +++ linux-3.0.7/net/atm/proc.c 2011-08-23 21:47:56.000000000 -0400
71039 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
71040 const struct k_atm_aal_stats *stats)
71041 {
71042 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
71043 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
71044 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
71045 - atomic_read(&stats->rx_drop));
71046 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
71047 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
71048 + atomic_read_unchecked(&stats->rx_drop));
71049 }
71050
71051 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
71052 diff -urNp linux-3.0.7/net/atm/resources.c linux-3.0.7/net/atm/resources.c
71053 --- linux-3.0.7/net/atm/resources.c 2011-07-21 22:17:23.000000000 -0400
71054 +++ linux-3.0.7/net/atm/resources.c 2011-08-23 21:47:56.000000000 -0400
71055 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
71056 static void copy_aal_stats(struct k_atm_aal_stats *from,
71057 struct atm_aal_stats *to)
71058 {
71059 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71060 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71061 __AAL_STAT_ITEMS
71062 #undef __HANDLE_ITEM
71063 }
71064 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
71065 static void subtract_aal_stats(struct k_atm_aal_stats *from,
71066 struct atm_aal_stats *to)
71067 {
71068 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71069 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
71070 __AAL_STAT_ITEMS
71071 #undef __HANDLE_ITEM
71072 }
71073 diff -urNp linux-3.0.7/net/batman-adv/hard-interface.c linux-3.0.7/net/batman-adv/hard-interface.c
71074 --- linux-3.0.7/net/batman-adv/hard-interface.c 2011-07-21 22:17:23.000000000 -0400
71075 +++ linux-3.0.7/net/batman-adv/hard-interface.c 2011-08-23 21:47:56.000000000 -0400
71076 @@ -351,8 +351,8 @@ int hardif_enable_interface(struct hard_
71077 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
71078 dev_add_pack(&hard_iface->batman_adv_ptype);
71079
71080 - atomic_set(&hard_iface->seqno, 1);
71081 - atomic_set(&hard_iface->frag_seqno, 1);
71082 + atomic_set_unchecked(&hard_iface->seqno, 1);
71083 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
71084 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
71085 hard_iface->net_dev->name);
71086
71087 diff -urNp linux-3.0.7/net/batman-adv/routing.c linux-3.0.7/net/batman-adv/routing.c
71088 --- linux-3.0.7/net/batman-adv/routing.c 2011-07-21 22:17:23.000000000 -0400
71089 +++ linux-3.0.7/net/batman-adv/routing.c 2011-08-23 21:47:56.000000000 -0400
71090 @@ -627,7 +627,7 @@ void receive_bat_packet(struct ethhdr *e
71091 return;
71092
71093 /* could be changed by schedule_own_packet() */
71094 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
71095 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
71096
71097 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
71098
71099 diff -urNp linux-3.0.7/net/batman-adv/send.c linux-3.0.7/net/batman-adv/send.c
71100 --- linux-3.0.7/net/batman-adv/send.c 2011-07-21 22:17:23.000000000 -0400
71101 +++ linux-3.0.7/net/batman-adv/send.c 2011-08-23 21:47:56.000000000 -0400
71102 @@ -279,7 +279,7 @@ void schedule_own_packet(struct hard_ifa
71103
71104 /* change sequence number to network order */
71105 batman_packet->seqno =
71106 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
71107 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
71108
71109 if (vis_server == VIS_TYPE_SERVER_SYNC)
71110 batman_packet->flags |= VIS_SERVER;
71111 @@ -293,7 +293,7 @@ void schedule_own_packet(struct hard_ifa
71112 else
71113 batman_packet->gw_flags = 0;
71114
71115 - atomic_inc(&hard_iface->seqno);
71116 + atomic_inc_unchecked(&hard_iface->seqno);
71117
71118 slide_own_bcast_window(hard_iface);
71119 send_time = own_send_time(bat_priv);
71120 diff -urNp linux-3.0.7/net/batman-adv/soft-interface.c linux-3.0.7/net/batman-adv/soft-interface.c
71121 --- linux-3.0.7/net/batman-adv/soft-interface.c 2011-07-21 22:17:23.000000000 -0400
71122 +++ linux-3.0.7/net/batman-adv/soft-interface.c 2011-08-23 21:47:56.000000000 -0400
71123 @@ -628,7 +628,7 @@ int interface_tx(struct sk_buff *skb, st
71124
71125 /* set broadcast sequence number */
71126 bcast_packet->seqno =
71127 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
71128 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
71129
71130 add_bcast_packet_to_list(bat_priv, skb);
71131
71132 @@ -830,7 +830,7 @@ struct net_device *softif_create(char *n
71133 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
71134
71135 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
71136 - atomic_set(&bat_priv->bcast_seqno, 1);
71137 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
71138 atomic_set(&bat_priv->tt_local_changed, 0);
71139
71140 bat_priv->primary_if = NULL;
71141 diff -urNp linux-3.0.7/net/batman-adv/types.h linux-3.0.7/net/batman-adv/types.h
71142 --- linux-3.0.7/net/batman-adv/types.h 2011-07-21 22:17:23.000000000 -0400
71143 +++ linux-3.0.7/net/batman-adv/types.h 2011-08-23 21:47:56.000000000 -0400
71144 @@ -38,8 +38,8 @@ struct hard_iface {
71145 int16_t if_num;
71146 char if_status;
71147 struct net_device *net_dev;
71148 - atomic_t seqno;
71149 - atomic_t frag_seqno;
71150 + atomic_unchecked_t seqno;
71151 + atomic_unchecked_t frag_seqno;
71152 unsigned char *packet_buff;
71153 int packet_len;
71154 struct kobject *hardif_obj;
71155 @@ -142,7 +142,7 @@ struct bat_priv {
71156 atomic_t orig_interval; /* uint */
71157 atomic_t hop_penalty; /* uint */
71158 atomic_t log_level; /* uint */
71159 - atomic_t bcast_seqno;
71160 + atomic_unchecked_t bcast_seqno;
71161 atomic_t bcast_queue_left;
71162 atomic_t batman_queue_left;
71163 char num_ifaces;
71164 diff -urNp linux-3.0.7/net/batman-adv/unicast.c linux-3.0.7/net/batman-adv/unicast.c
71165 --- linux-3.0.7/net/batman-adv/unicast.c 2011-07-21 22:17:23.000000000 -0400
71166 +++ linux-3.0.7/net/batman-adv/unicast.c 2011-08-23 21:47:56.000000000 -0400
71167 @@ -265,7 +265,7 @@ int frag_send_skb(struct sk_buff *skb, s
71168 frag1->flags = UNI_FRAG_HEAD | large_tail;
71169 frag2->flags = large_tail;
71170
71171 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
71172 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
71173 frag1->seqno = htons(seqno - 1);
71174 frag2->seqno = htons(seqno);
71175
71176 diff -urNp linux-3.0.7/net/bridge/br_multicast.c linux-3.0.7/net/bridge/br_multicast.c
71177 --- linux-3.0.7/net/bridge/br_multicast.c 2011-10-16 21:54:54.000000000 -0400
71178 +++ linux-3.0.7/net/bridge/br_multicast.c 2011-10-16 21:55:28.000000000 -0400
71179 @@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct
71180 nexthdr = ip6h->nexthdr;
71181 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
71182
71183 - if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
71184 + if (nexthdr != IPPROTO_ICMPV6)
71185 return 0;
71186
71187 /* Okay, we found ICMPv6 header */
71188 diff -urNp linux-3.0.7/net/bridge/netfilter/ebtables.c linux-3.0.7/net/bridge/netfilter/ebtables.c
71189 --- linux-3.0.7/net/bridge/netfilter/ebtables.c 2011-07-21 22:17:23.000000000 -0400
71190 +++ linux-3.0.7/net/bridge/netfilter/ebtables.c 2011-08-23 21:48:14.000000000 -0400
71191 @@ -1512,7 +1512,7 @@ static int do_ebt_get_ctl(struct sock *s
71192 tmp.valid_hooks = t->table->valid_hooks;
71193 }
71194 mutex_unlock(&ebt_mutex);
71195 - if (copy_to_user(user, &tmp, *len) != 0){
71196 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
71197 BUGPRINT("c2u Didn't work\n");
71198 ret = -EFAULT;
71199 break;
71200 @@ -1780,6 +1780,8 @@ static int compat_copy_everything_to_use
71201 int ret;
71202 void __user *pos;
71203
71204 + pax_track_stack();
71205 +
71206 memset(&tinfo, 0, sizeof(tinfo));
71207
71208 if (cmd == EBT_SO_GET_ENTRIES) {
71209 diff -urNp linux-3.0.7/net/caif/caif_socket.c linux-3.0.7/net/caif/caif_socket.c
71210 --- linux-3.0.7/net/caif/caif_socket.c 2011-07-21 22:17:23.000000000 -0400
71211 +++ linux-3.0.7/net/caif/caif_socket.c 2011-08-23 21:47:56.000000000 -0400
71212 @@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
71213 #ifdef CONFIG_DEBUG_FS
71214 struct debug_fs_counter {
71215 atomic_t caif_nr_socks;
71216 - atomic_t caif_sock_create;
71217 - atomic_t num_connect_req;
71218 - atomic_t num_connect_resp;
71219 - atomic_t num_connect_fail_resp;
71220 - atomic_t num_disconnect;
71221 - atomic_t num_remote_shutdown_ind;
71222 - atomic_t num_tx_flow_off_ind;
71223 - atomic_t num_tx_flow_on_ind;
71224 - atomic_t num_rx_flow_off;
71225 - atomic_t num_rx_flow_on;
71226 + atomic_unchecked_t caif_sock_create;
71227 + atomic_unchecked_t num_connect_req;
71228 + atomic_unchecked_t num_connect_resp;
71229 + atomic_unchecked_t num_connect_fail_resp;
71230 + atomic_unchecked_t num_disconnect;
71231 + atomic_unchecked_t num_remote_shutdown_ind;
71232 + atomic_unchecked_t num_tx_flow_off_ind;
71233 + atomic_unchecked_t num_tx_flow_on_ind;
71234 + atomic_unchecked_t num_rx_flow_off;
71235 + atomic_unchecked_t num_rx_flow_on;
71236 };
71237 static struct debug_fs_counter cnt;
71238 #define dbfs_atomic_inc(v) atomic_inc_return(v)
71239 +#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
71240 #define dbfs_atomic_dec(v) atomic_dec_return(v)
71241 #else
71242 #define dbfs_atomic_inc(v) 0
71243 @@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct soc
71244 atomic_read(&cf_sk->sk.sk_rmem_alloc),
71245 sk_rcvbuf_lowwater(cf_sk));
71246 set_rx_flow_off(cf_sk);
71247 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
71248 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
71249 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
71250 }
71251
71252 @@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct soc
71253 set_rx_flow_off(cf_sk);
71254 if (net_ratelimit())
71255 pr_debug("sending flow OFF due to rmem_schedule\n");
71256 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
71257 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
71258 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
71259 }
71260 skb->dev = NULL;
71261 @@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer
71262 switch (flow) {
71263 case CAIF_CTRLCMD_FLOW_ON_IND:
71264 /* OK from modem to start sending again */
71265 - dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
71266 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
71267 set_tx_flow_on(cf_sk);
71268 cf_sk->sk.sk_state_change(&cf_sk->sk);
71269 break;
71270
71271 case CAIF_CTRLCMD_FLOW_OFF_IND:
71272 /* Modem asks us to shut up */
71273 - dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
71274 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
71275 set_tx_flow_off(cf_sk);
71276 cf_sk->sk.sk_state_change(&cf_sk->sk);
71277 break;
71278 @@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer
71279 /* We're now connected */
71280 caif_client_register_refcnt(&cf_sk->layer,
71281 cfsk_hold, cfsk_put);
71282 - dbfs_atomic_inc(&cnt.num_connect_resp);
71283 + dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
71284 cf_sk->sk.sk_state = CAIF_CONNECTED;
71285 set_tx_flow_on(cf_sk);
71286 cf_sk->sk.sk_state_change(&cf_sk->sk);
71287 @@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer
71288
71289 case CAIF_CTRLCMD_INIT_FAIL_RSP:
71290 /* Connect request failed */
71291 - dbfs_atomic_inc(&cnt.num_connect_fail_resp);
71292 + dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
71293 cf_sk->sk.sk_err = ECONNREFUSED;
71294 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
71295 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
71296 @@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer
71297
71298 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
71299 /* Modem has closed this connection, or device is down. */
71300 - dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
71301 + dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
71302 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
71303 cf_sk->sk.sk_err = ECONNRESET;
71304 set_rx_flow_on(cf_sk);
71305 @@ -297,7 +298,7 @@ static void caif_check_flow_release(stru
71306 return;
71307
71308 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
71309 - dbfs_atomic_inc(&cnt.num_rx_flow_on);
71310 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
71311 set_rx_flow_on(cf_sk);
71312 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
71313 }
71314 @@ -854,7 +855,7 @@ static int caif_connect(struct socket *s
71315 /*ifindex = id of the interface.*/
71316 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
71317
71318 - dbfs_atomic_inc(&cnt.num_connect_req);
71319 + dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
71320 cf_sk->layer.receive = caif_sktrecv_cb;
71321
71322 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
71323 @@ -943,7 +944,7 @@ static int caif_release(struct socket *s
71324 spin_unlock_bh(&sk->sk_receive_queue.lock);
71325 sock->sk = NULL;
71326
71327 - dbfs_atomic_inc(&cnt.num_disconnect);
71328 + dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
71329
71330 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
71331 if (cf_sk->debugfs_socket_dir != NULL)
71332 @@ -1122,7 +1123,7 @@ static int caif_create(struct net *net,
71333 cf_sk->conn_req.protocol = protocol;
71334 /* Increase the number of sockets created. */
71335 dbfs_atomic_inc(&cnt.caif_nr_socks);
71336 - num = dbfs_atomic_inc(&cnt.caif_sock_create);
71337 + num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
71338 #ifdef CONFIG_DEBUG_FS
71339 if (!IS_ERR(debugfsdir)) {
71340
71341 diff -urNp linux-3.0.7/net/caif/cfctrl.c linux-3.0.7/net/caif/cfctrl.c
71342 --- linux-3.0.7/net/caif/cfctrl.c 2011-07-21 22:17:23.000000000 -0400
71343 +++ linux-3.0.7/net/caif/cfctrl.c 2011-08-23 21:48:14.000000000 -0400
71344 @@ -9,6 +9,7 @@
71345 #include <linux/stddef.h>
71346 #include <linux/spinlock.h>
71347 #include <linux/slab.h>
71348 +#include <linux/sched.h>
71349 #include <net/caif/caif_layer.h>
71350 #include <net/caif/cfpkt.h>
71351 #include <net/caif/cfctrl.h>
71352 @@ -45,8 +46,8 @@ struct cflayer *cfctrl_create(void)
71353 dev_info.id = 0xff;
71354 memset(this, 0, sizeof(*this));
71355 cfsrvl_init(&this->serv, 0, &dev_info, false);
71356 - atomic_set(&this->req_seq_no, 1);
71357 - atomic_set(&this->rsp_seq_no, 1);
71358 + atomic_set_unchecked(&this->req_seq_no, 1);
71359 + atomic_set_unchecked(&this->rsp_seq_no, 1);
71360 this->serv.layer.receive = cfctrl_recv;
71361 sprintf(this->serv.layer.name, "ctrl");
71362 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
71363 @@ -132,8 +133,8 @@ static void cfctrl_insert_req(struct cfc
71364 struct cfctrl_request_info *req)
71365 {
71366 spin_lock_bh(&ctrl->info_list_lock);
71367 - atomic_inc(&ctrl->req_seq_no);
71368 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
71369 + atomic_inc_unchecked(&ctrl->req_seq_no);
71370 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
71371 list_add_tail(&req->list, &ctrl->list);
71372 spin_unlock_bh(&ctrl->info_list_lock);
71373 }
71374 @@ -151,7 +152,7 @@ static struct cfctrl_request_info *cfctr
71375 if (p != first)
71376 pr_warn("Requests are not received in order\n");
71377
71378 - atomic_set(&ctrl->rsp_seq_no,
71379 + atomic_set_unchecked(&ctrl->rsp_seq_no,
71380 p->sequence_no);
71381 list_del(&p->list);
71382 goto out;
71383 @@ -364,6 +365,7 @@ static int cfctrl_recv(struct cflayer *l
71384 struct cfctrl *cfctrl = container_obj(layer);
71385 struct cfctrl_request_info rsp, *req;
71386
71387 + pax_track_stack();
71388
71389 cfpkt_extr_head(pkt, &cmdrsp, 1);
71390 cmd = cmdrsp & CFCTRL_CMD_MASK;
71391 diff -urNp linux-3.0.7/net/compat.c linux-3.0.7/net/compat.c
71392 --- linux-3.0.7/net/compat.c 2011-07-21 22:17:23.000000000 -0400
71393 +++ linux-3.0.7/net/compat.c 2011-10-06 04:17:55.000000000 -0400
71394 @@ -70,9 +70,9 @@ int get_compat_msghdr(struct msghdr *kms
71395 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
71396 __get_user(kmsg->msg_flags, &umsg->msg_flags))
71397 return -EFAULT;
71398 - kmsg->msg_name = compat_ptr(tmp1);
71399 - kmsg->msg_iov = compat_ptr(tmp2);
71400 - kmsg->msg_control = compat_ptr(tmp3);
71401 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
71402 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
71403 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
71404 return 0;
71405 }
71406
71407 @@ -84,7 +84,7 @@ int verify_compat_iovec(struct msghdr *k
71408
71409 if (kern_msg->msg_namelen) {
71410 if (mode == VERIFY_READ) {
71411 - int err = move_addr_to_kernel(kern_msg->msg_name,
71412 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
71413 kern_msg->msg_namelen,
71414 kern_address);
71415 if (err < 0)
71416 @@ -95,7 +95,7 @@ int verify_compat_iovec(struct msghdr *k
71417 kern_msg->msg_name = NULL;
71418
71419 tot_len = iov_from_user_compat_to_kern(kern_iov,
71420 - (struct compat_iovec __user *)kern_msg->msg_iov,
71421 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
71422 kern_msg->msg_iovlen);
71423 if (tot_len >= 0)
71424 kern_msg->msg_iov = kern_iov;
71425 @@ -115,20 +115,20 @@ int verify_compat_iovec(struct msghdr *k
71426
71427 #define CMSG_COMPAT_FIRSTHDR(msg) \
71428 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
71429 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
71430 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
71431 (struct compat_cmsghdr __user *)NULL)
71432
71433 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
71434 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
71435 (ucmlen) <= (unsigned long) \
71436 ((mhdr)->msg_controllen - \
71437 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
71438 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
71439
71440 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
71441 struct compat_cmsghdr __user *cmsg, int cmsg_len)
71442 {
71443 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
71444 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
71445 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
71446 msg->msg_controllen)
71447 return NULL;
71448 return (struct compat_cmsghdr __user *)ptr;
71449 @@ -220,7 +220,7 @@ int put_cmsg_compat(struct msghdr *kmsg,
71450 {
71451 struct compat_timeval ctv;
71452 struct compat_timespec cts[3];
71453 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
71454 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
71455 struct compat_cmsghdr cmhdr;
71456 int cmlen;
71457
71458 @@ -272,7 +272,7 @@ int put_cmsg_compat(struct msghdr *kmsg,
71459
71460 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
71461 {
71462 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
71463 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
71464 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
71465 int fdnum = scm->fp->count;
71466 struct file **fp = scm->fp->fp;
71467 @@ -369,7 +369,7 @@ static int do_set_sock_timeout(struct so
71468 return -EFAULT;
71469 old_fs = get_fs();
71470 set_fs(KERNEL_DS);
71471 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
71472 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
71473 set_fs(old_fs);
71474
71475 return err;
71476 @@ -430,7 +430,7 @@ static int do_get_sock_timeout(struct so
71477 len = sizeof(ktime);
71478 old_fs = get_fs();
71479 set_fs(KERNEL_DS);
71480 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
71481 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
71482 set_fs(old_fs);
71483
71484 if (!err) {
71485 @@ -565,7 +565,7 @@ int compat_mc_setsockopt(struct sock *so
71486 case MCAST_JOIN_GROUP:
71487 case MCAST_LEAVE_GROUP:
71488 {
71489 - struct compat_group_req __user *gr32 = (void *)optval;
71490 + struct compat_group_req __user *gr32 = (void __user *)optval;
71491 struct group_req __user *kgr =
71492 compat_alloc_user_space(sizeof(struct group_req));
71493 u32 interface;
71494 @@ -586,7 +586,7 @@ int compat_mc_setsockopt(struct sock *so
71495 case MCAST_BLOCK_SOURCE:
71496 case MCAST_UNBLOCK_SOURCE:
71497 {
71498 - struct compat_group_source_req __user *gsr32 = (void *)optval;
71499 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
71500 struct group_source_req __user *kgsr = compat_alloc_user_space(
71501 sizeof(struct group_source_req));
71502 u32 interface;
71503 @@ -607,7 +607,7 @@ int compat_mc_setsockopt(struct sock *so
71504 }
71505 case MCAST_MSFILTER:
71506 {
71507 - struct compat_group_filter __user *gf32 = (void *)optval;
71508 + struct compat_group_filter __user *gf32 = (void __user *)optval;
71509 struct group_filter __user *kgf;
71510 u32 interface, fmode, numsrc;
71511
71512 @@ -645,7 +645,7 @@ int compat_mc_getsockopt(struct sock *so
71513 char __user *optval, int __user *optlen,
71514 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
71515 {
71516 - struct compat_group_filter __user *gf32 = (void *)optval;
71517 + struct compat_group_filter __user *gf32 = (void __user *)optval;
71518 struct group_filter __user *kgf;
71519 int __user *koptlen;
71520 u32 interface, fmode, numsrc;
71521 diff -urNp linux-3.0.7/net/core/datagram.c linux-3.0.7/net/core/datagram.c
71522 --- linux-3.0.7/net/core/datagram.c 2011-07-21 22:17:23.000000000 -0400
71523 +++ linux-3.0.7/net/core/datagram.c 2011-08-23 21:47:56.000000000 -0400
71524 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
71525 }
71526
71527 kfree_skb(skb);
71528 - atomic_inc(&sk->sk_drops);
71529 + atomic_inc_unchecked(&sk->sk_drops);
71530 sk_mem_reclaim_partial(sk);
71531
71532 return err;
71533 diff -urNp linux-3.0.7/net/core/dev.c linux-3.0.7/net/core/dev.c
71534 --- linux-3.0.7/net/core/dev.c 2011-07-21 22:17:23.000000000 -0400
71535 +++ linux-3.0.7/net/core/dev.c 2011-08-23 21:48:14.000000000 -0400
71536 @@ -1125,10 +1125,14 @@ void dev_load(struct net *net, const cha
71537 if (no_module && capable(CAP_NET_ADMIN))
71538 no_module = request_module("netdev-%s", name);
71539 if (no_module && capable(CAP_SYS_MODULE)) {
71540 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
71541 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
71542 +#else
71543 if (!request_module("%s", name))
71544 pr_err("Loading kernel module for a network device "
71545 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
71546 "instead\n", name);
71547 +#endif
71548 }
71549 }
71550 EXPORT_SYMBOL(dev_load);
71551 @@ -1959,7 +1963,7 @@ static int illegal_highdma(struct net_de
71552
71553 struct dev_gso_cb {
71554 void (*destructor)(struct sk_buff *skb);
71555 -};
71556 +} __no_const;
71557
71558 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
71559
71560 @@ -2912,7 +2916,7 @@ int netif_rx_ni(struct sk_buff *skb)
71561 }
71562 EXPORT_SYMBOL(netif_rx_ni);
71563
71564 -static void net_tx_action(struct softirq_action *h)
71565 +static void net_tx_action(void)
71566 {
71567 struct softnet_data *sd = &__get_cpu_var(softnet_data);
71568
71569 @@ -3761,7 +3765,7 @@ void netif_napi_del(struct napi_struct *
71570 }
71571 EXPORT_SYMBOL(netif_napi_del);
71572
71573 -static void net_rx_action(struct softirq_action *h)
71574 +static void net_rx_action(void)
71575 {
71576 struct softnet_data *sd = &__get_cpu_var(softnet_data);
71577 unsigned long time_limit = jiffies + 2;
71578 diff -urNp linux-3.0.7/net/core/flow.c linux-3.0.7/net/core/flow.c
71579 --- linux-3.0.7/net/core/flow.c 2011-07-21 22:17:23.000000000 -0400
71580 +++ linux-3.0.7/net/core/flow.c 2011-08-23 21:47:56.000000000 -0400
71581 @@ -60,7 +60,7 @@ struct flow_cache {
71582 struct timer_list rnd_timer;
71583 };
71584
71585 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
71586 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
71587 EXPORT_SYMBOL(flow_cache_genid);
71588 static struct flow_cache flow_cache_global;
71589 static struct kmem_cache *flow_cachep __read_mostly;
71590 @@ -85,7 +85,7 @@ static void flow_cache_new_hashrnd(unsig
71591
71592 static int flow_entry_valid(struct flow_cache_entry *fle)
71593 {
71594 - if (atomic_read(&flow_cache_genid) != fle->genid)
71595 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
71596 return 0;
71597 if (fle->object && !fle->object->ops->check(fle->object))
71598 return 0;
71599 @@ -253,7 +253,7 @@ flow_cache_lookup(struct net *net, const
71600 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
71601 fcp->hash_count++;
71602 }
71603 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
71604 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
71605 flo = fle->object;
71606 if (!flo)
71607 goto ret_object;
71608 @@ -274,7 +274,7 @@ nocache:
71609 }
71610 flo = resolver(net, key, family, dir, flo, ctx);
71611 if (fle) {
71612 - fle->genid = atomic_read(&flow_cache_genid);
71613 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
71614 if (!IS_ERR(flo))
71615 fle->object = flo;
71616 else
71617 diff -urNp linux-3.0.7/net/core/iovec.c linux-3.0.7/net/core/iovec.c
71618 --- linux-3.0.7/net/core/iovec.c 2011-07-21 22:17:23.000000000 -0400
71619 +++ linux-3.0.7/net/core/iovec.c 2011-10-06 04:17:55.000000000 -0400
71620 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struc
71621 if (m->msg_namelen) {
71622 if (mode == VERIFY_READ) {
71623 void __user *namep;
71624 - namep = (void __user __force *) m->msg_name;
71625 + namep = (void __force_user *) m->msg_name;
71626 err = move_addr_to_kernel(namep, m->msg_namelen,
71627 address);
71628 if (err < 0)
71629 @@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struc
71630 }
71631
71632 size = m->msg_iovlen * sizeof(struct iovec);
71633 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
71634 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
71635 return -EFAULT;
71636
71637 m->msg_iov = iov;
71638 diff -urNp linux-3.0.7/net/core/rtnetlink.c linux-3.0.7/net/core/rtnetlink.c
71639 --- linux-3.0.7/net/core/rtnetlink.c 2011-07-21 22:17:23.000000000 -0400
71640 +++ linux-3.0.7/net/core/rtnetlink.c 2011-08-23 21:47:56.000000000 -0400
71641 @@ -56,7 +56,7 @@
71642 struct rtnl_link {
71643 rtnl_doit_func doit;
71644 rtnl_dumpit_func dumpit;
71645 -};
71646 +} __no_const;
71647
71648 static DEFINE_MUTEX(rtnl_mutex);
71649
71650 diff -urNp linux-3.0.7/net/core/scm.c linux-3.0.7/net/core/scm.c
71651 --- linux-3.0.7/net/core/scm.c 2011-10-16 21:54:54.000000000 -0400
71652 +++ linux-3.0.7/net/core/scm.c 2011-10-16 21:55:28.000000000 -0400
71653 @@ -218,7 +218,7 @@ EXPORT_SYMBOL(__scm_send);
71654 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
71655 {
71656 struct cmsghdr __user *cm
71657 - = (__force struct cmsghdr __user *)msg->msg_control;
71658 + = (struct cmsghdr __force_user *)msg->msg_control;
71659 struct cmsghdr cmhdr;
71660 int cmlen = CMSG_LEN(len);
71661 int err;
71662 @@ -241,7 +241,7 @@ int put_cmsg(struct msghdr * msg, int le
71663 err = -EFAULT;
71664 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
71665 goto out;
71666 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
71667 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
71668 goto out;
71669 cmlen = CMSG_SPACE(len);
71670 if (msg->msg_controllen < cmlen)
71671 @@ -257,7 +257,7 @@ EXPORT_SYMBOL(put_cmsg);
71672 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
71673 {
71674 struct cmsghdr __user *cm
71675 - = (__force struct cmsghdr __user*)msg->msg_control;
71676 + = (struct cmsghdr __force_user *)msg->msg_control;
71677
71678 int fdmax = 0;
71679 int fdnum = scm->fp->count;
71680 @@ -277,7 +277,7 @@ void scm_detach_fds(struct msghdr *msg,
71681 if (fdnum < fdmax)
71682 fdmax = fdnum;
71683
71684 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
71685 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
71686 i++, cmfptr++)
71687 {
71688 int new_fd;
71689 diff -urNp linux-3.0.7/net/core/skbuff.c linux-3.0.7/net/core/skbuff.c
71690 --- linux-3.0.7/net/core/skbuff.c 2011-07-21 22:17:23.000000000 -0400
71691 +++ linux-3.0.7/net/core/skbuff.c 2011-08-23 21:48:14.000000000 -0400
71692 @@ -1543,6 +1543,8 @@ int skb_splice_bits(struct sk_buff *skb,
71693 struct sock *sk = skb->sk;
71694 int ret = 0;
71695
71696 + pax_track_stack();
71697 +
71698 if (splice_grow_spd(pipe, &spd))
71699 return -ENOMEM;
71700
71701 diff -urNp linux-3.0.7/net/core/sock.c linux-3.0.7/net/core/sock.c
71702 --- linux-3.0.7/net/core/sock.c 2011-07-21 22:17:23.000000000 -0400
71703 +++ linux-3.0.7/net/core/sock.c 2011-08-23 21:48:14.000000000 -0400
71704 @@ -291,7 +291,7 @@ int sock_queue_rcv_skb(struct sock *sk,
71705 */
71706 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
71707 (unsigned)sk->sk_rcvbuf) {
71708 - atomic_inc(&sk->sk_drops);
71709 + atomic_inc_unchecked(&sk->sk_drops);
71710 return -ENOMEM;
71711 }
71712
71713 @@ -300,7 +300,7 @@ int sock_queue_rcv_skb(struct sock *sk,
71714 return err;
71715
71716 if (!sk_rmem_schedule(sk, skb->truesize)) {
71717 - atomic_inc(&sk->sk_drops);
71718 + atomic_inc_unchecked(&sk->sk_drops);
71719 return -ENOBUFS;
71720 }
71721
71722 @@ -320,7 +320,7 @@ int sock_queue_rcv_skb(struct sock *sk,
71723 skb_dst_force(skb);
71724
71725 spin_lock_irqsave(&list->lock, flags);
71726 - skb->dropcount = atomic_read(&sk->sk_drops);
71727 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
71728 __skb_queue_tail(list, skb);
71729 spin_unlock_irqrestore(&list->lock, flags);
71730
71731 @@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, stru
71732 skb->dev = NULL;
71733
71734 if (sk_rcvqueues_full(sk, skb)) {
71735 - atomic_inc(&sk->sk_drops);
71736 + atomic_inc_unchecked(&sk->sk_drops);
71737 goto discard_and_relse;
71738 }
71739 if (nested)
71740 @@ -358,7 +358,7 @@ int sk_receive_skb(struct sock *sk, stru
71741 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
71742 } else if (sk_add_backlog(sk, skb)) {
71743 bh_unlock_sock(sk);
71744 - atomic_inc(&sk->sk_drops);
71745 + atomic_inc_unchecked(&sk->sk_drops);
71746 goto discard_and_relse;
71747 }
71748
71749 @@ -921,7 +921,7 @@ int sock_getsockopt(struct socket *sock,
71750 if (len > sizeof(peercred))
71751 len = sizeof(peercred);
71752 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
71753 - if (copy_to_user(optval, &peercred, len))
71754 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
71755 return -EFAULT;
71756 goto lenout;
71757 }
71758 @@ -934,7 +934,7 @@ int sock_getsockopt(struct socket *sock,
71759 return -ENOTCONN;
71760 if (lv < len)
71761 return -EINVAL;
71762 - if (copy_to_user(optval, address, len))
71763 + if (len > sizeof(address) || copy_to_user(optval, address, len))
71764 return -EFAULT;
71765 goto lenout;
71766 }
71767 @@ -967,7 +967,7 @@ int sock_getsockopt(struct socket *sock,
71768
71769 if (len > lv)
71770 len = lv;
71771 - if (copy_to_user(optval, &v, len))
71772 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
71773 return -EFAULT;
71774 lenout:
71775 if (put_user(len, optlen))
71776 @@ -2023,7 +2023,7 @@ void sock_init_data(struct socket *sock,
71777 */
71778 smp_wmb();
71779 atomic_set(&sk->sk_refcnt, 1);
71780 - atomic_set(&sk->sk_drops, 0);
71781 + atomic_set_unchecked(&sk->sk_drops, 0);
71782 }
71783 EXPORT_SYMBOL(sock_init_data);
71784
71785 diff -urNp linux-3.0.7/net/decnet/sysctl_net_decnet.c linux-3.0.7/net/decnet/sysctl_net_decnet.c
71786 --- linux-3.0.7/net/decnet/sysctl_net_decnet.c 2011-07-21 22:17:23.000000000 -0400
71787 +++ linux-3.0.7/net/decnet/sysctl_net_decnet.c 2011-08-23 21:47:56.000000000 -0400
71788 @@ -173,7 +173,7 @@ static int dn_node_address_handler(ctl_t
71789
71790 if (len > *lenp) len = *lenp;
71791
71792 - if (copy_to_user(buffer, addr, len))
71793 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
71794 return -EFAULT;
71795
71796 *lenp = len;
71797 @@ -236,7 +236,7 @@ static int dn_def_dev_handler(ctl_table
71798
71799 if (len > *lenp) len = *lenp;
71800
71801 - if (copy_to_user(buffer, devname, len))
71802 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
71803 return -EFAULT;
71804
71805 *lenp = len;
71806 diff -urNp linux-3.0.7/net/econet/Kconfig linux-3.0.7/net/econet/Kconfig
71807 --- linux-3.0.7/net/econet/Kconfig 2011-07-21 22:17:23.000000000 -0400
71808 +++ linux-3.0.7/net/econet/Kconfig 2011-08-23 21:48:14.000000000 -0400
71809 @@ -4,7 +4,7 @@
71810
71811 config ECONET
71812 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
71813 - depends on EXPERIMENTAL && INET
71814 + depends on EXPERIMENTAL && INET && BROKEN
71815 ---help---
71816 Econet is a fairly old and slow networking protocol mainly used by
71817 Acorn computers to access file and print servers. It uses native
71818 diff -urNp linux-3.0.7/net/ipv4/fib_frontend.c linux-3.0.7/net/ipv4/fib_frontend.c
71819 --- linux-3.0.7/net/ipv4/fib_frontend.c 2011-07-21 22:17:23.000000000 -0400
71820 +++ linux-3.0.7/net/ipv4/fib_frontend.c 2011-08-23 21:47:56.000000000 -0400
71821 @@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct not
71822 #ifdef CONFIG_IP_ROUTE_MULTIPATH
71823 fib_sync_up(dev);
71824 #endif
71825 - atomic_inc(&net->ipv4.dev_addr_genid);
71826 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
71827 rt_cache_flush(dev_net(dev), -1);
71828 break;
71829 case NETDEV_DOWN:
71830 fib_del_ifaddr(ifa, NULL);
71831 - atomic_inc(&net->ipv4.dev_addr_genid);
71832 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
71833 if (ifa->ifa_dev->ifa_list == NULL) {
71834 /* Last address was deleted from this interface.
71835 * Disable IP.
71836 @@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notif
71837 #ifdef CONFIG_IP_ROUTE_MULTIPATH
71838 fib_sync_up(dev);
71839 #endif
71840 - atomic_inc(&net->ipv4.dev_addr_genid);
71841 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
71842 rt_cache_flush(dev_net(dev), -1);
71843 break;
71844 case NETDEV_DOWN:
71845 diff -urNp linux-3.0.7/net/ipv4/fib_semantics.c linux-3.0.7/net/ipv4/fib_semantics.c
71846 --- linux-3.0.7/net/ipv4/fib_semantics.c 2011-07-21 22:17:23.000000000 -0400
71847 +++ linux-3.0.7/net/ipv4/fib_semantics.c 2011-08-23 21:47:56.000000000 -0400
71848 @@ -691,7 +691,7 @@ __be32 fib_info_update_nh_saddr(struct n
71849 nh->nh_saddr = inet_select_addr(nh->nh_dev,
71850 nh->nh_gw,
71851 nh->nh_parent->fib_scope);
71852 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
71853 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
71854
71855 return nh->nh_saddr;
71856 }
71857 diff -urNp linux-3.0.7/net/ipv4/inet_diag.c linux-3.0.7/net/ipv4/inet_diag.c
71858 --- linux-3.0.7/net/ipv4/inet_diag.c 2011-07-21 22:17:23.000000000 -0400
71859 +++ linux-3.0.7/net/ipv4/inet_diag.c 2011-08-23 21:48:14.000000000 -0400
71860 @@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
71861 r->idiag_retrans = 0;
71862
71863 r->id.idiag_if = sk->sk_bound_dev_if;
71864 +
71865 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71866 + r->id.idiag_cookie[0] = 0;
71867 + r->id.idiag_cookie[1] = 0;
71868 +#else
71869 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
71870 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
71871 +#endif
71872
71873 r->id.idiag_sport = inet->inet_sport;
71874 r->id.idiag_dport = inet->inet_dport;
71875 @@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
71876 r->idiag_family = tw->tw_family;
71877 r->idiag_retrans = 0;
71878 r->id.idiag_if = tw->tw_bound_dev_if;
71879 +
71880 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71881 + r->id.idiag_cookie[0] = 0;
71882 + r->id.idiag_cookie[1] = 0;
71883 +#else
71884 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
71885 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
71886 +#endif
71887 +
71888 r->id.idiag_sport = tw->tw_sport;
71889 r->id.idiag_dport = tw->tw_dport;
71890 r->id.idiag_src[0] = tw->tw_rcv_saddr;
71891 @@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
71892 if (sk == NULL)
71893 goto unlock;
71894
71895 +#ifndef CONFIG_GRKERNSEC_HIDESYM
71896 err = -ESTALE;
71897 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
71898 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
71899 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
71900 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
71901 goto out;
71902 +#endif
71903
71904 err = -ENOMEM;
71905 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
71906 @@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
71907 r->idiag_retrans = req->retrans;
71908
71909 r->id.idiag_if = sk->sk_bound_dev_if;
71910 +
71911 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71912 + r->id.idiag_cookie[0] = 0;
71913 + r->id.idiag_cookie[1] = 0;
71914 +#else
71915 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
71916 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
71917 +#endif
71918
71919 tmo = req->expires - jiffies;
71920 if (tmo < 0)
71921 diff -urNp linux-3.0.7/net/ipv4/inet_hashtables.c linux-3.0.7/net/ipv4/inet_hashtables.c
71922 --- linux-3.0.7/net/ipv4/inet_hashtables.c 2011-09-02 18:11:21.000000000 -0400
71923 +++ linux-3.0.7/net/ipv4/inet_hashtables.c 2011-08-23 21:55:24.000000000 -0400
71924 @@ -18,12 +18,15 @@
71925 #include <linux/sched.h>
71926 #include <linux/slab.h>
71927 #include <linux/wait.h>
71928 +#include <linux/security.h>
71929
71930 #include <net/inet_connection_sock.h>
71931 #include <net/inet_hashtables.h>
71932 #include <net/secure_seq.h>
71933 #include <net/ip.h>
71934
71935 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
71936 +
71937 /*
71938 * Allocate and initialize a new local port bind bucket.
71939 * The bindhash mutex for snum's hash chain must be held here.
71940 @@ -530,6 +533,8 @@ ok:
71941 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
71942 spin_unlock(&head->lock);
71943
71944 + gr_update_task_in_ip_table(current, inet_sk(sk));
71945 +
71946 if (tw) {
71947 inet_twsk_deschedule(tw, death_row);
71948 while (twrefcnt) {
71949 diff -urNp linux-3.0.7/net/ipv4/inetpeer.c linux-3.0.7/net/ipv4/inetpeer.c
71950 --- linux-3.0.7/net/ipv4/inetpeer.c 2011-09-02 18:11:21.000000000 -0400
71951 +++ linux-3.0.7/net/ipv4/inetpeer.c 2011-08-23 21:48:14.000000000 -0400
71952 @@ -481,6 +481,8 @@ struct inet_peer *inet_getpeer(struct in
71953 unsigned int sequence;
71954 int invalidated, newrefcnt = 0;
71955
71956 + pax_track_stack();
71957 +
71958 /* Look up for the address quickly, lockless.
71959 * Because of a concurrent writer, we might not find an existing entry.
71960 */
71961 @@ -517,8 +519,8 @@ found: /* The existing node has been fo
71962 if (p) {
71963 p->daddr = *daddr;
71964 atomic_set(&p->refcnt, 1);
71965 - atomic_set(&p->rid, 0);
71966 - atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
71967 + atomic_set_unchecked(&p->rid, 0);
71968 + atomic_set_unchecked(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
71969 p->tcp_ts_stamp = 0;
71970 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
71971 p->rate_tokens = 0;
71972 diff -urNp linux-3.0.7/net/ipv4/ipconfig.c linux-3.0.7/net/ipv4/ipconfig.c
71973 --- linux-3.0.7/net/ipv4/ipconfig.c 2011-07-21 22:17:23.000000000 -0400
71974 +++ linux-3.0.7/net/ipv4/ipconfig.c 2011-10-06 04:17:55.000000000 -0400
71975 @@ -313,7 +313,7 @@ static int __init ic_devinet_ioctl(unsig
71976
71977 mm_segment_t oldfs = get_fs();
71978 set_fs(get_ds());
71979 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
71980 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
71981 set_fs(oldfs);
71982 return res;
71983 }
71984 @@ -324,7 +324,7 @@ static int __init ic_dev_ioctl(unsigned
71985
71986 mm_segment_t oldfs = get_fs();
71987 set_fs(get_ds());
71988 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
71989 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
71990 set_fs(oldfs);
71991 return res;
71992 }
71993 @@ -335,7 +335,7 @@ static int __init ic_route_ioctl(unsigne
71994
71995 mm_segment_t oldfs = get_fs();
71996 set_fs(get_ds());
71997 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
71998 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
71999 set_fs(oldfs);
72000 return res;
72001 }
72002 diff -urNp linux-3.0.7/net/ipv4/ip_fragment.c linux-3.0.7/net/ipv4/ip_fragment.c
72003 --- linux-3.0.7/net/ipv4/ip_fragment.c 2011-07-21 22:17:23.000000000 -0400
72004 +++ linux-3.0.7/net/ipv4/ip_fragment.c 2011-08-23 21:47:56.000000000 -0400
72005 @@ -315,7 +315,7 @@ static inline int ip_frag_too_far(struct
72006 return 0;
72007
72008 start = qp->rid;
72009 - end = atomic_inc_return(&peer->rid);
72010 + end = atomic_inc_return_unchecked(&peer->rid);
72011 qp->rid = end;
72012
72013 rc = qp->q.fragments && (end - start) > max;
72014 diff -urNp linux-3.0.7/net/ipv4/ip_sockglue.c linux-3.0.7/net/ipv4/ip_sockglue.c
72015 --- linux-3.0.7/net/ipv4/ip_sockglue.c 2011-07-21 22:17:23.000000000 -0400
72016 +++ linux-3.0.7/net/ipv4/ip_sockglue.c 2011-10-06 04:17:55.000000000 -0400
72017 @@ -1073,6 +1073,8 @@ static int do_ip_getsockopt(struct sock
72018 int val;
72019 int len;
72020
72021 + pax_track_stack();
72022 +
72023 if (level != SOL_IP)
72024 return -EOPNOTSUPP;
72025
72026 @@ -1110,7 +1112,8 @@ static int do_ip_getsockopt(struct sock
72027 len = min_t(unsigned int, len, opt->optlen);
72028 if (put_user(len, optlen))
72029 return -EFAULT;
72030 - if (copy_to_user(optval, opt->__data, len))
72031 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
72032 + copy_to_user(optval, opt->__data, len))
72033 return -EFAULT;
72034 return 0;
72035 }
72036 @@ -1238,7 +1241,7 @@ static int do_ip_getsockopt(struct sock
72037 if (sk->sk_type != SOCK_STREAM)
72038 return -ENOPROTOOPT;
72039
72040 - msg.msg_control = optval;
72041 + msg.msg_control = (void __force_kernel *)optval;
72042 msg.msg_controllen = len;
72043 msg.msg_flags = 0;
72044
72045 diff -urNp linux-3.0.7/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-3.0.7/net/ipv4/netfilter/nf_nat_snmp_basic.c
72046 --- linux-3.0.7/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-07-21 22:17:23.000000000 -0400
72047 +++ linux-3.0.7/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-08-23 21:47:56.000000000 -0400
72048 @@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
72049
72050 *len = 0;
72051
72052 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
72053 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
72054 if (*octets == NULL) {
72055 if (net_ratelimit())
72056 pr_notice("OOM in bsalg (%d)\n", __LINE__);
72057 diff -urNp linux-3.0.7/net/ipv4/ping.c linux-3.0.7/net/ipv4/ping.c
72058 --- linux-3.0.7/net/ipv4/ping.c 2011-07-21 22:17:23.000000000 -0400
72059 +++ linux-3.0.7/net/ipv4/ping.c 2011-08-23 21:47:56.000000000 -0400
72060 @@ -837,7 +837,7 @@ static void ping_format_sock(struct sock
72061 sk_rmem_alloc_get(sp),
72062 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72063 atomic_read(&sp->sk_refcnt), sp,
72064 - atomic_read(&sp->sk_drops), len);
72065 + atomic_read_unchecked(&sp->sk_drops), len);
72066 }
72067
72068 static int ping_seq_show(struct seq_file *seq, void *v)
72069 diff -urNp linux-3.0.7/net/ipv4/raw.c linux-3.0.7/net/ipv4/raw.c
72070 --- linux-3.0.7/net/ipv4/raw.c 2011-07-21 22:17:23.000000000 -0400
72071 +++ linux-3.0.7/net/ipv4/raw.c 2011-08-23 21:48:14.000000000 -0400
72072 @@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
72073 int raw_rcv(struct sock *sk, struct sk_buff *skb)
72074 {
72075 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
72076 - atomic_inc(&sk->sk_drops);
72077 + atomic_inc_unchecked(&sk->sk_drops);
72078 kfree_skb(skb);
72079 return NET_RX_DROP;
72080 }
72081 @@ -736,16 +736,20 @@ static int raw_init(struct sock *sk)
72082
72083 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
72084 {
72085 + struct icmp_filter filter;
72086 +
72087 if (optlen > sizeof(struct icmp_filter))
72088 optlen = sizeof(struct icmp_filter);
72089 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
72090 + if (copy_from_user(&filter, optval, optlen))
72091 return -EFAULT;
72092 + raw_sk(sk)->filter = filter;
72093 return 0;
72094 }
72095
72096 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
72097 {
72098 int len, ret = -EFAULT;
72099 + struct icmp_filter filter;
72100
72101 if (get_user(len, optlen))
72102 goto out;
72103 @@ -755,8 +759,9 @@ static int raw_geticmpfilter(struct sock
72104 if (len > sizeof(struct icmp_filter))
72105 len = sizeof(struct icmp_filter);
72106 ret = -EFAULT;
72107 - if (put_user(len, optlen) ||
72108 - copy_to_user(optval, &raw_sk(sk)->filter, len))
72109 + filter = raw_sk(sk)->filter;
72110 + if (put_user(len, optlen) || len > sizeof filter ||
72111 + copy_to_user(optval, &filter, len))
72112 goto out;
72113 ret = 0;
72114 out: return ret;
72115 @@ -984,7 +989,13 @@ static void raw_sock_seq_show(struct seq
72116 sk_wmem_alloc_get(sp),
72117 sk_rmem_alloc_get(sp),
72118 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72119 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72120 + atomic_read(&sp->sk_refcnt),
72121 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72122 + NULL,
72123 +#else
72124 + sp,
72125 +#endif
72126 + atomic_read_unchecked(&sp->sk_drops));
72127 }
72128
72129 static int raw_seq_show(struct seq_file *seq, void *v)
72130 diff -urNp linux-3.0.7/net/ipv4/route.c linux-3.0.7/net/ipv4/route.c
72131 --- linux-3.0.7/net/ipv4/route.c 2011-10-16 21:54:54.000000000 -0400
72132 +++ linux-3.0.7/net/ipv4/route.c 2011-10-16 21:55:28.000000000 -0400
72133 @@ -304,7 +304,7 @@ static inline unsigned int rt_hash(__be3
72134
72135 static inline int rt_genid(struct net *net)
72136 {
72137 - return atomic_read(&net->ipv4.rt_genid);
72138 + return atomic_read_unchecked(&net->ipv4.rt_genid);
72139 }
72140
72141 #ifdef CONFIG_PROC_FS
72142 @@ -832,7 +832,7 @@ static void rt_cache_invalidate(struct n
72143 unsigned char shuffle;
72144
72145 get_random_bytes(&shuffle, sizeof(shuffle));
72146 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
72147 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
72148 }
72149
72150 /*
72151 @@ -2832,7 +2832,7 @@ static int rt_fill_info(struct net *net,
72152 error = rt->dst.error;
72153 if (peer) {
72154 inet_peer_refcheck(rt->peer);
72155 - id = atomic_read(&peer->ip_id_count) & 0xffff;
72156 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
72157 if (peer->tcp_ts_stamp) {
72158 ts = peer->tcp_ts;
72159 tsage = get_seconds() - peer->tcp_ts_stamp;
72160 diff -urNp linux-3.0.7/net/ipv4/tcp.c linux-3.0.7/net/ipv4/tcp.c
72161 --- linux-3.0.7/net/ipv4/tcp.c 2011-07-21 22:17:23.000000000 -0400
72162 +++ linux-3.0.7/net/ipv4/tcp.c 2011-08-23 21:48:14.000000000 -0400
72163 @@ -2122,6 +2122,8 @@ static int do_tcp_setsockopt(struct sock
72164 int val;
72165 int err = 0;
72166
72167 + pax_track_stack();
72168 +
72169 /* These are data/string values, all the others are ints */
72170 switch (optname) {
72171 case TCP_CONGESTION: {
72172 @@ -2501,6 +2503,8 @@ static int do_tcp_getsockopt(struct sock
72173 struct tcp_sock *tp = tcp_sk(sk);
72174 int val, len;
72175
72176 + pax_track_stack();
72177 +
72178 if (get_user(len, optlen))
72179 return -EFAULT;
72180
72181 diff -urNp linux-3.0.7/net/ipv4/tcp_ipv4.c linux-3.0.7/net/ipv4/tcp_ipv4.c
72182 --- linux-3.0.7/net/ipv4/tcp_ipv4.c 2011-09-02 18:11:21.000000000 -0400
72183 +++ linux-3.0.7/net/ipv4/tcp_ipv4.c 2011-08-23 21:48:14.000000000 -0400
72184 @@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
72185 int sysctl_tcp_low_latency __read_mostly;
72186 EXPORT_SYMBOL(sysctl_tcp_low_latency);
72187
72188 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72189 +extern int grsec_enable_blackhole;
72190 +#endif
72191
72192 #ifdef CONFIG_TCP_MD5SIG
72193 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
72194 @@ -1607,6 +1610,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
72195 return 0;
72196
72197 reset:
72198 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72199 + if (!grsec_enable_blackhole)
72200 +#endif
72201 tcp_v4_send_reset(rsk, skb);
72202 discard:
72203 kfree_skb(skb);
72204 @@ -1669,12 +1675,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
72205 TCP_SKB_CB(skb)->sacked = 0;
72206
72207 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
72208 - if (!sk)
72209 + if (!sk) {
72210 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72211 + ret = 1;
72212 +#endif
72213 goto no_tcp_socket;
72214 -
72215 + }
72216 process:
72217 - if (sk->sk_state == TCP_TIME_WAIT)
72218 + if (sk->sk_state == TCP_TIME_WAIT) {
72219 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72220 + ret = 2;
72221 +#endif
72222 goto do_time_wait;
72223 + }
72224
72225 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
72226 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
72227 @@ -1724,6 +1737,10 @@ no_tcp_socket:
72228 bad_packet:
72229 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
72230 } else {
72231 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72232 + if (!grsec_enable_blackhole || (ret == 1 &&
72233 + (skb->dev->flags & IFF_LOOPBACK)))
72234 +#endif
72235 tcp_v4_send_reset(NULL, skb);
72236 }
72237
72238 @@ -2388,7 +2405,11 @@ static void get_openreq4(struct sock *sk
72239 0, /* non standard timer */
72240 0, /* open_requests have no inode */
72241 atomic_read(&sk->sk_refcnt),
72242 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72243 + NULL,
72244 +#else
72245 req,
72246 +#endif
72247 len);
72248 }
72249
72250 @@ -2438,7 +2459,12 @@ static void get_tcp4_sock(struct sock *s
72251 sock_i_uid(sk),
72252 icsk->icsk_probes_out,
72253 sock_i_ino(sk),
72254 - atomic_read(&sk->sk_refcnt), sk,
72255 + atomic_read(&sk->sk_refcnt),
72256 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72257 + NULL,
72258 +#else
72259 + sk,
72260 +#endif
72261 jiffies_to_clock_t(icsk->icsk_rto),
72262 jiffies_to_clock_t(icsk->icsk_ack.ato),
72263 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
72264 @@ -2466,7 +2492,13 @@ static void get_timewait4_sock(struct in
72265 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
72266 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
72267 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
72268 - atomic_read(&tw->tw_refcnt), tw, len);
72269 + atomic_read(&tw->tw_refcnt),
72270 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72271 + NULL,
72272 +#else
72273 + tw,
72274 +#endif
72275 + len);
72276 }
72277
72278 #define TMPSZ 150
72279 diff -urNp linux-3.0.7/net/ipv4/tcp_minisocks.c linux-3.0.7/net/ipv4/tcp_minisocks.c
72280 --- linux-3.0.7/net/ipv4/tcp_minisocks.c 2011-07-21 22:17:23.000000000 -0400
72281 +++ linux-3.0.7/net/ipv4/tcp_minisocks.c 2011-08-23 21:48:14.000000000 -0400
72282 @@ -27,6 +27,10 @@
72283 #include <net/inet_common.h>
72284 #include <net/xfrm.h>
72285
72286 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72287 +extern int grsec_enable_blackhole;
72288 +#endif
72289 +
72290 int sysctl_tcp_syncookies __read_mostly = 1;
72291 EXPORT_SYMBOL(sysctl_tcp_syncookies);
72292
72293 @@ -745,6 +749,10 @@ listen_overflow:
72294
72295 embryonic_reset:
72296 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
72297 +
72298 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72299 + if (!grsec_enable_blackhole)
72300 +#endif
72301 if (!(flg & TCP_FLAG_RST))
72302 req->rsk_ops->send_reset(sk, skb);
72303
72304 diff -urNp linux-3.0.7/net/ipv4/tcp_output.c linux-3.0.7/net/ipv4/tcp_output.c
72305 --- linux-3.0.7/net/ipv4/tcp_output.c 2011-07-21 22:17:23.000000000 -0400
72306 +++ linux-3.0.7/net/ipv4/tcp_output.c 2011-08-23 21:48:14.000000000 -0400
72307 @@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
72308 int mss;
72309 int s_data_desired = 0;
72310
72311 + pax_track_stack();
72312 +
72313 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
72314 s_data_desired = cvp->s_data_desired;
72315 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
72316 diff -urNp linux-3.0.7/net/ipv4/tcp_probe.c linux-3.0.7/net/ipv4/tcp_probe.c
72317 --- linux-3.0.7/net/ipv4/tcp_probe.c 2011-07-21 22:17:23.000000000 -0400
72318 +++ linux-3.0.7/net/ipv4/tcp_probe.c 2011-08-23 21:47:56.000000000 -0400
72319 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
72320 if (cnt + width >= len)
72321 break;
72322
72323 - if (copy_to_user(buf + cnt, tbuf, width))
72324 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
72325 return -EFAULT;
72326 cnt += width;
72327 }
72328 diff -urNp linux-3.0.7/net/ipv4/tcp_timer.c linux-3.0.7/net/ipv4/tcp_timer.c
72329 --- linux-3.0.7/net/ipv4/tcp_timer.c 2011-07-21 22:17:23.000000000 -0400
72330 +++ linux-3.0.7/net/ipv4/tcp_timer.c 2011-08-23 21:48:14.000000000 -0400
72331 @@ -22,6 +22,10 @@
72332 #include <linux/gfp.h>
72333 #include <net/tcp.h>
72334
72335 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72336 +extern int grsec_lastack_retries;
72337 +#endif
72338 +
72339 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
72340 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
72341 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
72342 @@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
72343 }
72344 }
72345
72346 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72347 + if ((sk->sk_state == TCP_LAST_ACK) &&
72348 + (grsec_lastack_retries > 0) &&
72349 + (grsec_lastack_retries < retry_until))
72350 + retry_until = grsec_lastack_retries;
72351 +#endif
72352 +
72353 if (retransmits_timed_out(sk, retry_until,
72354 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
72355 /* Has it gone just too far? */
72356 diff -urNp linux-3.0.7/net/ipv4/udp.c linux-3.0.7/net/ipv4/udp.c
72357 --- linux-3.0.7/net/ipv4/udp.c 2011-07-21 22:17:23.000000000 -0400
72358 +++ linux-3.0.7/net/ipv4/udp.c 2011-08-23 21:48:14.000000000 -0400
72359 @@ -86,6 +86,7 @@
72360 #include <linux/types.h>
72361 #include <linux/fcntl.h>
72362 #include <linux/module.h>
72363 +#include <linux/security.h>
72364 #include <linux/socket.h>
72365 #include <linux/sockios.h>
72366 #include <linux/igmp.h>
72367 @@ -107,6 +108,10 @@
72368 #include <net/xfrm.h>
72369 #include "udp_impl.h"
72370
72371 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72372 +extern int grsec_enable_blackhole;
72373 +#endif
72374 +
72375 struct udp_table udp_table __read_mostly;
72376 EXPORT_SYMBOL(udp_table);
72377
72378 @@ -564,6 +569,9 @@ found:
72379 return s;
72380 }
72381
72382 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
72383 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
72384 +
72385 /*
72386 * This routine is called by the ICMP module when it gets some
72387 * sort of error condition. If err < 0 then the socket should
72388 @@ -855,9 +863,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
72389 dport = usin->sin_port;
72390 if (dport == 0)
72391 return -EINVAL;
72392 +
72393 + err = gr_search_udp_sendmsg(sk, usin);
72394 + if (err)
72395 + return err;
72396 } else {
72397 if (sk->sk_state != TCP_ESTABLISHED)
72398 return -EDESTADDRREQ;
72399 +
72400 + err = gr_search_udp_sendmsg(sk, NULL);
72401 + if (err)
72402 + return err;
72403 +
72404 daddr = inet->inet_daddr;
72405 dport = inet->inet_dport;
72406 /* Open fast path for connected socket.
72407 @@ -1098,7 +1115,7 @@ static unsigned int first_packet_length(
72408 udp_lib_checksum_complete(skb)) {
72409 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
72410 IS_UDPLITE(sk));
72411 - atomic_inc(&sk->sk_drops);
72412 + atomic_inc_unchecked(&sk->sk_drops);
72413 __skb_unlink(skb, rcvq);
72414 __skb_queue_tail(&list_kill, skb);
72415 }
72416 @@ -1184,6 +1201,10 @@ try_again:
72417 if (!skb)
72418 goto out;
72419
72420 + err = gr_search_udp_recvmsg(sk, skb);
72421 + if (err)
72422 + goto out_free;
72423 +
72424 ulen = skb->len - sizeof(struct udphdr);
72425 if (len > ulen)
72426 len = ulen;
72427 @@ -1483,7 +1504,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
72428
72429 drop:
72430 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
72431 - atomic_inc(&sk->sk_drops);
72432 + atomic_inc_unchecked(&sk->sk_drops);
72433 kfree_skb(skb);
72434 return -1;
72435 }
72436 @@ -1502,7 +1523,7 @@ static void flush_stack(struct sock **st
72437 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
72438
72439 if (!skb1) {
72440 - atomic_inc(&sk->sk_drops);
72441 + atomic_inc_unchecked(&sk->sk_drops);
72442 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
72443 IS_UDPLITE(sk));
72444 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
72445 @@ -1671,6 +1692,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
72446 goto csum_error;
72447
72448 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
72449 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72450 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
72451 +#endif
72452 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
72453
72454 /*
72455 @@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock
72456 sk_wmem_alloc_get(sp),
72457 sk_rmem_alloc_get(sp),
72458 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72459 - atomic_read(&sp->sk_refcnt), sp,
72460 - atomic_read(&sp->sk_drops), len);
72461 + atomic_read(&sp->sk_refcnt),
72462 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72463 + NULL,
72464 +#else
72465 + sp,
72466 +#endif
72467 + atomic_read_unchecked(&sp->sk_drops), len);
72468 }
72469
72470 int udp4_seq_show(struct seq_file *seq, void *v)
72471 diff -urNp linux-3.0.7/net/ipv6/addrconf.c linux-3.0.7/net/ipv6/addrconf.c
72472 --- linux-3.0.7/net/ipv6/addrconf.c 2011-07-21 22:17:23.000000000 -0400
72473 +++ linux-3.0.7/net/ipv6/addrconf.c 2011-10-06 04:17:55.000000000 -0400
72474 @@ -2072,7 +2072,7 @@ int addrconf_set_dstaddr(struct net *net
72475 p.iph.ihl = 5;
72476 p.iph.protocol = IPPROTO_IPV6;
72477 p.iph.ttl = 64;
72478 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
72479 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
72480
72481 if (ops->ndo_do_ioctl) {
72482 mm_segment_t oldfs = get_fs();
72483 diff -urNp linux-3.0.7/net/ipv6/inet6_connection_sock.c linux-3.0.7/net/ipv6/inet6_connection_sock.c
72484 --- linux-3.0.7/net/ipv6/inet6_connection_sock.c 2011-07-21 22:17:23.000000000 -0400
72485 +++ linux-3.0.7/net/ipv6/inet6_connection_sock.c 2011-08-23 21:47:56.000000000 -0400
72486 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
72487 #ifdef CONFIG_XFRM
72488 {
72489 struct rt6_info *rt = (struct rt6_info *)dst;
72490 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
72491 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
72492 }
72493 #endif
72494 }
72495 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
72496 #ifdef CONFIG_XFRM
72497 if (dst) {
72498 struct rt6_info *rt = (struct rt6_info *)dst;
72499 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
72500 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
72501 __sk_dst_reset(sk);
72502 dst = NULL;
72503 }
72504 diff -urNp linux-3.0.7/net/ipv6/ipv6_sockglue.c linux-3.0.7/net/ipv6/ipv6_sockglue.c
72505 --- linux-3.0.7/net/ipv6/ipv6_sockglue.c 2011-10-16 21:54:54.000000000 -0400
72506 +++ linux-3.0.7/net/ipv6/ipv6_sockglue.c 2011-10-16 21:55:28.000000000 -0400
72507 @@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
72508 int val, valbool;
72509 int retv = -ENOPROTOOPT;
72510
72511 + pax_track_stack();
72512 +
72513 if (optval == NULL)
72514 val=0;
72515 else {
72516 @@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
72517 int len;
72518 int val;
72519
72520 + pax_track_stack();
72521 +
72522 if (ip6_mroute_opt(optname))
72523 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
72524
72525 @@ -960,7 +964,7 @@ static int do_ipv6_getsockopt(struct soc
72526 if (sk->sk_type != SOCK_STREAM)
72527 return -ENOPROTOOPT;
72528
72529 - msg.msg_control = optval;
72530 + msg.msg_control = (void __force_kernel *)optval;
72531 msg.msg_controllen = len;
72532 msg.msg_flags = flags;
72533
72534 diff -urNp linux-3.0.7/net/ipv6/raw.c linux-3.0.7/net/ipv6/raw.c
72535 --- linux-3.0.7/net/ipv6/raw.c 2011-07-21 22:17:23.000000000 -0400
72536 +++ linux-3.0.7/net/ipv6/raw.c 2011-08-23 21:48:14.000000000 -0400
72537 @@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
72538 {
72539 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
72540 skb_checksum_complete(skb)) {
72541 - atomic_inc(&sk->sk_drops);
72542 + atomic_inc_unchecked(&sk->sk_drops);
72543 kfree_skb(skb);
72544 return NET_RX_DROP;
72545 }
72546 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
72547 struct raw6_sock *rp = raw6_sk(sk);
72548
72549 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
72550 - atomic_inc(&sk->sk_drops);
72551 + atomic_inc_unchecked(&sk->sk_drops);
72552 kfree_skb(skb);
72553 return NET_RX_DROP;
72554 }
72555 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
72556
72557 if (inet->hdrincl) {
72558 if (skb_checksum_complete(skb)) {
72559 - atomic_inc(&sk->sk_drops);
72560 + atomic_inc_unchecked(&sk->sk_drops);
72561 kfree_skb(skb);
72562 return NET_RX_DROP;
72563 }
72564 @@ -601,7 +601,7 @@ out:
72565 return err;
72566 }
72567
72568 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
72569 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
72570 struct flowi6 *fl6, struct dst_entry **dstp,
72571 unsigned int flags)
72572 {
72573 @@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
72574 u16 proto;
72575 int err;
72576
72577 + pax_track_stack();
72578 +
72579 /* Rough check on arithmetic overflow,
72580 better check is made in ip6_append_data().
72581 */
72582 @@ -909,12 +911,15 @@ do_confirm:
72583 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
72584 char __user *optval, int optlen)
72585 {
72586 + struct icmp6_filter filter;
72587 +
72588 switch (optname) {
72589 case ICMPV6_FILTER:
72590 if (optlen > sizeof(struct icmp6_filter))
72591 optlen = sizeof(struct icmp6_filter);
72592 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
72593 + if (copy_from_user(&filter, optval, optlen))
72594 return -EFAULT;
72595 + raw6_sk(sk)->filter = filter;
72596 return 0;
72597 default:
72598 return -ENOPROTOOPT;
72599 @@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct so
72600 char __user *optval, int __user *optlen)
72601 {
72602 int len;
72603 + struct icmp6_filter filter;
72604
72605 switch (optname) {
72606 case ICMPV6_FILTER:
72607 @@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
72608 len = sizeof(struct icmp6_filter);
72609 if (put_user(len, optlen))
72610 return -EFAULT;
72611 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
72612 + filter = raw6_sk(sk)->filter;
72613 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
72614 return -EFAULT;
72615 return 0;
72616 default:
72617 @@ -1252,7 +1259,13 @@ static void raw6_sock_seq_show(struct se
72618 0, 0L, 0,
72619 sock_i_uid(sp), 0,
72620 sock_i_ino(sp),
72621 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72622 + atomic_read(&sp->sk_refcnt),
72623 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72624 + NULL,
72625 +#else
72626 + sp,
72627 +#endif
72628 + atomic_read_unchecked(&sp->sk_drops));
72629 }
72630
72631 static int raw6_seq_show(struct seq_file *seq, void *v)
72632 diff -urNp linux-3.0.7/net/ipv6/tcp_ipv6.c linux-3.0.7/net/ipv6/tcp_ipv6.c
72633 --- linux-3.0.7/net/ipv6/tcp_ipv6.c 2011-09-02 18:11:21.000000000 -0400
72634 +++ linux-3.0.7/net/ipv6/tcp_ipv6.c 2011-08-23 21:48:14.000000000 -0400
72635 @@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
72636 }
72637 #endif
72638
72639 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72640 +extern int grsec_enable_blackhole;
72641 +#endif
72642 +
72643 static void tcp_v6_hash(struct sock *sk)
72644 {
72645 if (sk->sk_state != TCP_CLOSE) {
72646 @@ -1662,6 +1666,9 @@ static int tcp_v6_do_rcv(struct sock *sk
72647 return 0;
72648
72649 reset:
72650 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72651 + if (!grsec_enable_blackhole)
72652 +#endif
72653 tcp_v6_send_reset(sk, skb);
72654 discard:
72655 if (opt_skb)
72656 @@ -1741,12 +1748,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
72657 TCP_SKB_CB(skb)->sacked = 0;
72658
72659 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
72660 - if (!sk)
72661 + if (!sk) {
72662 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72663 + ret = 1;
72664 +#endif
72665 goto no_tcp_socket;
72666 + }
72667
72668 process:
72669 - if (sk->sk_state == TCP_TIME_WAIT)
72670 + if (sk->sk_state == TCP_TIME_WAIT) {
72671 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72672 + ret = 2;
72673 +#endif
72674 goto do_time_wait;
72675 + }
72676
72677 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
72678 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
72679 @@ -1794,6 +1809,10 @@ no_tcp_socket:
72680 bad_packet:
72681 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
72682 } else {
72683 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72684 + if (!grsec_enable_blackhole || (ret == 1 &&
72685 + (skb->dev->flags & IFF_LOOPBACK)))
72686 +#endif
72687 tcp_v6_send_reset(NULL, skb);
72688 }
72689
72690 @@ -2054,7 +2073,13 @@ static void get_openreq6(struct seq_file
72691 uid,
72692 0, /* non standard timer */
72693 0, /* open_requests have no inode */
72694 - 0, req);
72695 + 0,
72696 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72697 + NULL
72698 +#else
72699 + req
72700 +#endif
72701 + );
72702 }
72703
72704 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
72705 @@ -2104,7 +2129,12 @@ static void get_tcp6_sock(struct seq_fil
72706 sock_i_uid(sp),
72707 icsk->icsk_probes_out,
72708 sock_i_ino(sp),
72709 - atomic_read(&sp->sk_refcnt), sp,
72710 + atomic_read(&sp->sk_refcnt),
72711 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72712 + NULL,
72713 +#else
72714 + sp,
72715 +#endif
72716 jiffies_to_clock_t(icsk->icsk_rto),
72717 jiffies_to_clock_t(icsk->icsk_ack.ato),
72718 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
72719 @@ -2139,7 +2169,13 @@ static void get_timewait6_sock(struct se
72720 dest->s6_addr32[2], dest->s6_addr32[3], destp,
72721 tw->tw_substate, 0, 0,
72722 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
72723 - atomic_read(&tw->tw_refcnt), tw);
72724 + atomic_read(&tw->tw_refcnt),
72725 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72726 + NULL
72727 +#else
72728 + tw
72729 +#endif
72730 + );
72731 }
72732
72733 static int tcp6_seq_show(struct seq_file *seq, void *v)
72734 diff -urNp linux-3.0.7/net/ipv6/udp.c linux-3.0.7/net/ipv6/udp.c
72735 --- linux-3.0.7/net/ipv6/udp.c 2011-10-17 23:17:09.000000000 -0400
72736 +++ linux-3.0.7/net/ipv6/udp.c 2011-10-17 23:17:19.000000000 -0400
72737 @@ -50,6 +50,10 @@
72738 #include <linux/seq_file.h>
72739 #include "udp_impl.h"
72740
72741 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72742 +extern int grsec_enable_blackhole;
72743 +#endif
72744 +
72745 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
72746 {
72747 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
72748 @@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
72749
72750 return 0;
72751 drop:
72752 - atomic_inc(&sk->sk_drops);
72753 + atomic_inc_unchecked(&sk->sk_drops);
72754 drop_no_sk_drops_inc:
72755 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
72756 kfree_skb(skb);
72757 @@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
72758 continue;
72759 }
72760 drop:
72761 - atomic_inc(&sk->sk_drops);
72762 + atomic_inc_unchecked(&sk->sk_drops);
72763 UDP6_INC_STATS_BH(sock_net(sk),
72764 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
72765 UDP6_INC_STATS_BH(sock_net(sk),
72766 @@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
72767 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
72768 proto == IPPROTO_UDPLITE);
72769
72770 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72771 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
72772 +#endif
72773 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
72774
72775 kfree_skb(skb);
72776 @@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
72777 if (!sock_owned_by_user(sk))
72778 udpv6_queue_rcv_skb(sk, skb);
72779 else if (sk_add_backlog(sk, skb)) {
72780 - atomic_inc(&sk->sk_drops);
72781 + atomic_inc_unchecked(&sk->sk_drops);
72782 bh_unlock_sock(sk);
72783 sock_put(sk);
72784 goto discard;
72785 @@ -1408,8 +1415,13 @@ static void udp6_sock_seq_show(struct se
72786 0, 0L, 0,
72787 sock_i_uid(sp), 0,
72788 sock_i_ino(sp),
72789 - atomic_read(&sp->sk_refcnt), sp,
72790 - atomic_read(&sp->sk_drops));
72791 + atomic_read(&sp->sk_refcnt),
72792 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72793 + NULL,
72794 +#else
72795 + sp,
72796 +#endif
72797 + atomic_read_unchecked(&sp->sk_drops));
72798 }
72799
72800 int udp6_seq_show(struct seq_file *seq, void *v)
72801 diff -urNp linux-3.0.7/net/irda/ircomm/ircomm_tty.c linux-3.0.7/net/irda/ircomm/ircomm_tty.c
72802 --- linux-3.0.7/net/irda/ircomm/ircomm_tty.c 2011-07-21 22:17:23.000000000 -0400
72803 +++ linux-3.0.7/net/irda/ircomm/ircomm_tty.c 2011-08-23 21:47:56.000000000 -0400
72804 @@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(st
72805 add_wait_queue(&self->open_wait, &wait);
72806
72807 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
72808 - __FILE__,__LINE__, tty->driver->name, self->open_count );
72809 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
72810
72811 /* As far as I can see, we protect open_count - Jean II */
72812 spin_lock_irqsave(&self->spinlock, flags);
72813 if (!tty_hung_up_p(filp)) {
72814 extra_count = 1;
72815 - self->open_count--;
72816 + local_dec(&self->open_count);
72817 }
72818 spin_unlock_irqrestore(&self->spinlock, flags);
72819 - self->blocked_open++;
72820 + local_inc(&self->blocked_open);
72821
72822 while (1) {
72823 if (tty->termios->c_cflag & CBAUD) {
72824 @@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(st
72825 }
72826
72827 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
72828 - __FILE__,__LINE__, tty->driver->name, self->open_count );
72829 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
72830
72831 schedule();
72832 }
72833 @@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(st
72834 if (extra_count) {
72835 /* ++ is not atomic, so this should be protected - Jean II */
72836 spin_lock_irqsave(&self->spinlock, flags);
72837 - self->open_count++;
72838 + local_inc(&self->open_count);
72839 spin_unlock_irqrestore(&self->spinlock, flags);
72840 }
72841 - self->blocked_open--;
72842 + local_dec(&self->blocked_open);
72843
72844 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
72845 - __FILE__,__LINE__, tty->driver->name, self->open_count);
72846 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
72847
72848 if (!retval)
72849 self->flags |= ASYNC_NORMAL_ACTIVE;
72850 @@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_st
72851 }
72852 /* ++ is not atomic, so this should be protected - Jean II */
72853 spin_lock_irqsave(&self->spinlock, flags);
72854 - self->open_count++;
72855 + local_inc(&self->open_count);
72856
72857 tty->driver_data = self;
72858 self->tty = tty;
72859 spin_unlock_irqrestore(&self->spinlock, flags);
72860
72861 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
72862 - self->line, self->open_count);
72863 + self->line, local_read(&self->open_count));
72864
72865 /* Not really used by us, but lets do it anyway */
72866 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
72867 @@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_
72868 return;
72869 }
72870
72871 - if ((tty->count == 1) && (self->open_count != 1)) {
72872 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
72873 /*
72874 * Uh, oh. tty->count is 1, which means that the tty
72875 * structure will be freed. state->count should always
72876 @@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_
72877 */
72878 IRDA_DEBUG(0, "%s(), bad serial port count; "
72879 "tty->count is 1, state->count is %d\n", __func__ ,
72880 - self->open_count);
72881 - self->open_count = 1;
72882 + local_read(&self->open_count));
72883 + local_set(&self->open_count, 1);
72884 }
72885
72886 - if (--self->open_count < 0) {
72887 + if (local_dec_return(&self->open_count) < 0) {
72888 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
72889 - __func__, self->line, self->open_count);
72890 - self->open_count = 0;
72891 + __func__, self->line, local_read(&self->open_count));
72892 + local_set(&self->open_count, 0);
72893 }
72894 - if (self->open_count) {
72895 + if (local_read(&self->open_count)) {
72896 spin_unlock_irqrestore(&self->spinlock, flags);
72897
72898 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
72899 @@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_
72900 tty->closing = 0;
72901 self->tty = NULL;
72902
72903 - if (self->blocked_open) {
72904 + if (local_read(&self->blocked_open)) {
72905 if (self->close_delay)
72906 schedule_timeout_interruptible(self->close_delay);
72907 wake_up_interruptible(&self->open_wait);
72908 @@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty
72909 spin_lock_irqsave(&self->spinlock, flags);
72910 self->flags &= ~ASYNC_NORMAL_ACTIVE;
72911 self->tty = NULL;
72912 - self->open_count = 0;
72913 + local_set(&self->open_count, 0);
72914 spin_unlock_irqrestore(&self->spinlock, flags);
72915
72916 wake_up_interruptible(&self->open_wait);
72917 @@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct
72918 seq_putc(m, '\n');
72919
72920 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
72921 - seq_printf(m, "Open count: %d\n", self->open_count);
72922 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
72923 seq_printf(m, "Max data size: %d\n", self->max_data_size);
72924 seq_printf(m, "Max header size: %d\n", self->max_header_size);
72925
72926 diff -urNp linux-3.0.7/net/iucv/af_iucv.c linux-3.0.7/net/iucv/af_iucv.c
72927 --- linux-3.0.7/net/iucv/af_iucv.c 2011-07-21 22:17:23.000000000 -0400
72928 +++ linux-3.0.7/net/iucv/af_iucv.c 2011-08-23 21:47:56.000000000 -0400
72929 @@ -648,10 +648,10 @@ static int iucv_sock_autobind(struct soc
72930
72931 write_lock_bh(&iucv_sk_list.lock);
72932
72933 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
72934 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
72935 while (__iucv_get_sock_by_name(name)) {
72936 sprintf(name, "%08x",
72937 - atomic_inc_return(&iucv_sk_list.autobind_name));
72938 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
72939 }
72940
72941 write_unlock_bh(&iucv_sk_list.lock);
72942 diff -urNp linux-3.0.7/net/key/af_key.c linux-3.0.7/net/key/af_key.c
72943 --- linux-3.0.7/net/key/af_key.c 2011-07-21 22:17:23.000000000 -0400
72944 +++ linux-3.0.7/net/key/af_key.c 2011-08-23 21:48:14.000000000 -0400
72945 @@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
72946 struct xfrm_migrate m[XFRM_MAX_DEPTH];
72947 struct xfrm_kmaddress k;
72948
72949 + pax_track_stack();
72950 +
72951 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
72952 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
72953 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
72954 @@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
72955 static u32 get_acqseq(void)
72956 {
72957 u32 res;
72958 - static atomic_t acqseq;
72959 + static atomic_unchecked_t acqseq;
72960
72961 do {
72962 - res = atomic_inc_return(&acqseq);
72963 + res = atomic_inc_return_unchecked(&acqseq);
72964 } while (!res);
72965 return res;
72966 }
72967 diff -urNp linux-3.0.7/net/lapb/lapb_iface.c linux-3.0.7/net/lapb/lapb_iface.c
72968 --- linux-3.0.7/net/lapb/lapb_iface.c 2011-07-21 22:17:23.000000000 -0400
72969 +++ linux-3.0.7/net/lapb/lapb_iface.c 2011-08-23 21:47:56.000000000 -0400
72970 @@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
72971 goto out;
72972
72973 lapb->dev = dev;
72974 - lapb->callbacks = *callbacks;
72975 + lapb->callbacks = callbacks;
72976
72977 __lapb_insert_cb(lapb);
72978
72979 @@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
72980
72981 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
72982 {
72983 - if (lapb->callbacks.connect_confirmation)
72984 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
72985 + if (lapb->callbacks->connect_confirmation)
72986 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
72987 }
72988
72989 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
72990 {
72991 - if (lapb->callbacks.connect_indication)
72992 - lapb->callbacks.connect_indication(lapb->dev, reason);
72993 + if (lapb->callbacks->connect_indication)
72994 + lapb->callbacks->connect_indication(lapb->dev, reason);
72995 }
72996
72997 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
72998 {
72999 - if (lapb->callbacks.disconnect_confirmation)
73000 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
73001 + if (lapb->callbacks->disconnect_confirmation)
73002 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
73003 }
73004
73005 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
73006 {
73007 - if (lapb->callbacks.disconnect_indication)
73008 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
73009 + if (lapb->callbacks->disconnect_indication)
73010 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
73011 }
73012
73013 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
73014 {
73015 - if (lapb->callbacks.data_indication)
73016 - return lapb->callbacks.data_indication(lapb->dev, skb);
73017 + if (lapb->callbacks->data_indication)
73018 + return lapb->callbacks->data_indication(lapb->dev, skb);
73019
73020 kfree_skb(skb);
73021 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
73022 @@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
73023 {
73024 int used = 0;
73025
73026 - if (lapb->callbacks.data_transmit) {
73027 - lapb->callbacks.data_transmit(lapb->dev, skb);
73028 + if (lapb->callbacks->data_transmit) {
73029 + lapb->callbacks->data_transmit(lapb->dev, skb);
73030 used = 1;
73031 }
73032
73033 diff -urNp linux-3.0.7/net/mac80211/debugfs_sta.c linux-3.0.7/net/mac80211/debugfs_sta.c
73034 --- linux-3.0.7/net/mac80211/debugfs_sta.c 2011-07-21 22:17:23.000000000 -0400
73035 +++ linux-3.0.7/net/mac80211/debugfs_sta.c 2011-08-23 21:48:14.000000000 -0400
73036 @@ -140,6 +140,8 @@ static ssize_t sta_agg_status_read(struc
73037 struct tid_ampdu_rx *tid_rx;
73038 struct tid_ampdu_tx *tid_tx;
73039
73040 + pax_track_stack();
73041 +
73042 rcu_read_lock();
73043
73044 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
73045 @@ -240,6 +242,8 @@ static ssize_t sta_ht_capa_read(struct f
73046 struct sta_info *sta = file->private_data;
73047 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
73048
73049 + pax_track_stack();
73050 +
73051 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
73052 htc->ht_supported ? "" : "not ");
73053 if (htc->ht_supported) {
73054 diff -urNp linux-3.0.7/net/mac80211/ieee80211_i.h linux-3.0.7/net/mac80211/ieee80211_i.h
73055 --- linux-3.0.7/net/mac80211/ieee80211_i.h 2011-07-21 22:17:23.000000000 -0400
73056 +++ linux-3.0.7/net/mac80211/ieee80211_i.h 2011-08-23 21:47:56.000000000 -0400
73057 @@ -27,6 +27,7 @@
73058 #include <net/ieee80211_radiotap.h>
73059 #include <net/cfg80211.h>
73060 #include <net/mac80211.h>
73061 +#include <asm/local.h>
73062 #include "key.h"
73063 #include "sta_info.h"
73064
73065 @@ -721,7 +722,7 @@ struct ieee80211_local {
73066 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
73067 spinlock_t queue_stop_reason_lock;
73068
73069 - int open_count;
73070 + local_t open_count;
73071 int monitors, cooked_mntrs;
73072 /* number of interfaces with corresponding FIF_ flags */
73073 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
73074 diff -urNp linux-3.0.7/net/mac80211/iface.c linux-3.0.7/net/mac80211/iface.c
73075 --- linux-3.0.7/net/mac80211/iface.c 2011-09-02 18:11:21.000000000 -0400
73076 +++ linux-3.0.7/net/mac80211/iface.c 2011-08-23 21:47:56.000000000 -0400
73077 @@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
73078 break;
73079 }
73080
73081 - if (local->open_count == 0) {
73082 + if (local_read(&local->open_count) == 0) {
73083 res = drv_start(local);
73084 if (res)
73085 goto err_del_bss;
73086 @@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
73087 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
73088
73089 if (!is_valid_ether_addr(dev->dev_addr)) {
73090 - if (!local->open_count)
73091 + if (!local_read(&local->open_count))
73092 drv_stop(local);
73093 return -EADDRNOTAVAIL;
73094 }
73095 @@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
73096 mutex_unlock(&local->mtx);
73097
73098 if (coming_up)
73099 - local->open_count++;
73100 + local_inc(&local->open_count);
73101
73102 if (hw_reconf_flags) {
73103 ieee80211_hw_config(local, hw_reconf_flags);
73104 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
73105 err_del_interface:
73106 drv_remove_interface(local, &sdata->vif);
73107 err_stop:
73108 - if (!local->open_count)
73109 + if (!local_read(&local->open_count))
73110 drv_stop(local);
73111 err_del_bss:
73112 sdata->bss = NULL;
73113 @@ -475,7 +475,7 @@ static void ieee80211_do_stop(struct iee
73114 }
73115
73116 if (going_down)
73117 - local->open_count--;
73118 + local_dec(&local->open_count);
73119
73120 switch (sdata->vif.type) {
73121 case NL80211_IFTYPE_AP_VLAN:
73122 @@ -534,7 +534,7 @@ static void ieee80211_do_stop(struct iee
73123
73124 ieee80211_recalc_ps(local, -1);
73125
73126 - if (local->open_count == 0) {
73127 + if (local_read(&local->open_count) == 0) {
73128 if (local->ops->napi_poll)
73129 napi_disable(&local->napi);
73130 ieee80211_clear_tx_pending(local);
73131 diff -urNp linux-3.0.7/net/mac80211/main.c linux-3.0.7/net/mac80211/main.c
73132 --- linux-3.0.7/net/mac80211/main.c 2011-07-21 22:17:23.000000000 -0400
73133 +++ linux-3.0.7/net/mac80211/main.c 2011-08-23 21:47:56.000000000 -0400
73134 @@ -209,7 +209,7 @@ int ieee80211_hw_config(struct ieee80211
73135 local->hw.conf.power_level = power;
73136 }
73137
73138 - if (changed && local->open_count) {
73139 + if (changed && local_read(&local->open_count)) {
73140 ret = drv_config(local, changed);
73141 /*
73142 * Goal:
73143 diff -urNp linux-3.0.7/net/mac80211/mlme.c linux-3.0.7/net/mac80211/mlme.c
73144 --- linux-3.0.7/net/mac80211/mlme.c 2011-09-02 18:11:21.000000000 -0400
73145 +++ linux-3.0.7/net/mac80211/mlme.c 2011-08-23 21:48:14.000000000 -0400
73146 @@ -1444,6 +1444,8 @@ static bool ieee80211_assoc_success(stru
73147 bool have_higher_than_11mbit = false;
73148 u16 ap_ht_cap_flags;
73149
73150 + pax_track_stack();
73151 +
73152 /* AssocResp and ReassocResp have identical structure */
73153
73154 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
73155 diff -urNp linux-3.0.7/net/mac80211/pm.c linux-3.0.7/net/mac80211/pm.c
73156 --- linux-3.0.7/net/mac80211/pm.c 2011-07-21 22:17:23.000000000 -0400
73157 +++ linux-3.0.7/net/mac80211/pm.c 2011-08-23 21:47:56.000000000 -0400
73158 @@ -47,7 +47,7 @@ int __ieee80211_suspend(struct ieee80211
73159 cancel_work_sync(&local->dynamic_ps_enable_work);
73160 del_timer_sync(&local->dynamic_ps_timer);
73161
73162 - local->wowlan = wowlan && local->open_count;
73163 + local->wowlan = wowlan && local_read(&local->open_count);
73164 if (local->wowlan) {
73165 int err = drv_suspend(local, wowlan);
73166 if (err) {
73167 @@ -111,7 +111,7 @@ int __ieee80211_suspend(struct ieee80211
73168 }
73169
73170 /* stop hardware - this must stop RX */
73171 - if (local->open_count)
73172 + if (local_read(&local->open_count))
73173 ieee80211_stop_device(local);
73174
73175 suspend:
73176 diff -urNp linux-3.0.7/net/mac80211/rate.c linux-3.0.7/net/mac80211/rate.c
73177 --- linux-3.0.7/net/mac80211/rate.c 2011-07-21 22:17:23.000000000 -0400
73178 +++ linux-3.0.7/net/mac80211/rate.c 2011-08-23 21:47:56.000000000 -0400
73179 @@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
73180
73181 ASSERT_RTNL();
73182
73183 - if (local->open_count)
73184 + if (local_read(&local->open_count))
73185 return -EBUSY;
73186
73187 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
73188 diff -urNp linux-3.0.7/net/mac80211/rc80211_pid_debugfs.c linux-3.0.7/net/mac80211/rc80211_pid_debugfs.c
73189 --- linux-3.0.7/net/mac80211/rc80211_pid_debugfs.c 2011-07-21 22:17:23.000000000 -0400
73190 +++ linux-3.0.7/net/mac80211/rc80211_pid_debugfs.c 2011-08-23 21:47:56.000000000 -0400
73191 @@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
73192
73193 spin_unlock_irqrestore(&events->lock, status);
73194
73195 - if (copy_to_user(buf, pb, p))
73196 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
73197 return -EFAULT;
73198
73199 return p;
73200 diff -urNp linux-3.0.7/net/mac80211/util.c linux-3.0.7/net/mac80211/util.c
73201 --- linux-3.0.7/net/mac80211/util.c 2011-07-21 22:17:23.000000000 -0400
73202 +++ linux-3.0.7/net/mac80211/util.c 2011-08-23 21:47:56.000000000 -0400
73203 @@ -1147,7 +1147,7 @@ int ieee80211_reconfig(struct ieee80211_
73204 #endif
73205
73206 /* restart hardware */
73207 - if (local->open_count) {
73208 + if (local_read(&local->open_count)) {
73209 /*
73210 * Upon resume hardware can sometimes be goofy due to
73211 * various platform / driver / bus issues, so restarting
73212 diff -urNp linux-3.0.7/net/netfilter/ipvs/ip_vs_conn.c linux-3.0.7/net/netfilter/ipvs/ip_vs_conn.c
73213 --- linux-3.0.7/net/netfilter/ipvs/ip_vs_conn.c 2011-07-21 22:17:23.000000000 -0400
73214 +++ linux-3.0.7/net/netfilter/ipvs/ip_vs_conn.c 2011-08-23 21:47:56.000000000 -0400
73215 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
73216 /* Increase the refcnt counter of the dest */
73217 atomic_inc(&dest->refcnt);
73218
73219 - conn_flags = atomic_read(&dest->conn_flags);
73220 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
73221 if (cp->protocol != IPPROTO_UDP)
73222 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
73223 /* Bind with the destination and its corresponding transmitter */
73224 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
73225 atomic_set(&cp->refcnt, 1);
73226
73227 atomic_set(&cp->n_control, 0);
73228 - atomic_set(&cp->in_pkts, 0);
73229 + atomic_set_unchecked(&cp->in_pkts, 0);
73230
73231 atomic_inc(&ipvs->conn_count);
73232 if (flags & IP_VS_CONN_F_NO_CPORT)
73233 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
73234
73235 /* Don't drop the entry if its number of incoming packets is not
73236 located in [0, 8] */
73237 - i = atomic_read(&cp->in_pkts);
73238 + i = atomic_read_unchecked(&cp->in_pkts);
73239 if (i > 8 || i < 0) return 0;
73240
73241 if (!todrop_rate[i]) return 0;
73242 diff -urNp linux-3.0.7/net/netfilter/ipvs/ip_vs_core.c linux-3.0.7/net/netfilter/ipvs/ip_vs_core.c
73243 --- linux-3.0.7/net/netfilter/ipvs/ip_vs_core.c 2011-07-21 22:17:23.000000000 -0400
73244 +++ linux-3.0.7/net/netfilter/ipvs/ip_vs_core.c 2011-08-23 21:47:56.000000000 -0400
73245 @@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
73246 ret = cp->packet_xmit(skb, cp, pd->pp);
73247 /* do not touch skb anymore */
73248
73249 - atomic_inc(&cp->in_pkts);
73250 + atomic_inc_unchecked(&cp->in_pkts);
73251 ip_vs_conn_put(cp);
73252 return ret;
73253 }
73254 @@ -1613,7 +1613,7 @@ ip_vs_in(unsigned int hooknum, struct sk
73255 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
73256 pkts = sysctl_sync_threshold(ipvs);
73257 else
73258 - pkts = atomic_add_return(1, &cp->in_pkts);
73259 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
73260
73261 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
73262 cp->protocol == IPPROTO_SCTP) {
73263 diff -urNp linux-3.0.7/net/netfilter/ipvs/ip_vs_ctl.c linux-3.0.7/net/netfilter/ipvs/ip_vs_ctl.c
73264 --- linux-3.0.7/net/netfilter/ipvs/ip_vs_ctl.c 2011-09-02 18:11:21.000000000 -0400
73265 +++ linux-3.0.7/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-23 21:48:14.000000000 -0400
73266 @@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
73267 ip_vs_rs_hash(ipvs, dest);
73268 write_unlock_bh(&ipvs->rs_lock);
73269 }
73270 - atomic_set(&dest->conn_flags, conn_flags);
73271 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
73272
73273 /* bind the service */
73274 if (!dest->svc) {
73275 @@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
73276 " %-7s %-6d %-10d %-10d\n",
73277 &dest->addr.in6,
73278 ntohs(dest->port),
73279 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
73280 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
73281 atomic_read(&dest->weight),
73282 atomic_read(&dest->activeconns),
73283 atomic_read(&dest->inactconns));
73284 @@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
73285 "%-7s %-6d %-10d %-10d\n",
73286 ntohl(dest->addr.ip),
73287 ntohs(dest->port),
73288 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
73289 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
73290 atomic_read(&dest->weight),
73291 atomic_read(&dest->activeconns),
73292 atomic_read(&dest->inactconns));
73293 @@ -2284,6 +2284,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
73294 struct ip_vs_dest_user *udest_compat;
73295 struct ip_vs_dest_user_kern udest;
73296
73297 + pax_track_stack();
73298 +
73299 if (!capable(CAP_NET_ADMIN))
73300 return -EPERM;
73301
73302 @@ -2498,7 +2500,7 @@ __ip_vs_get_dest_entries(struct net *net
73303
73304 entry.addr = dest->addr.ip;
73305 entry.port = dest->port;
73306 - entry.conn_flags = atomic_read(&dest->conn_flags);
73307 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
73308 entry.weight = atomic_read(&dest->weight);
73309 entry.u_threshold = dest->u_threshold;
73310 entry.l_threshold = dest->l_threshold;
73311 @@ -3026,7 +3028,7 @@ static int ip_vs_genl_fill_dest(struct s
73312 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
73313
73314 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
73315 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
73316 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
73317 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
73318 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
73319 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
73320 diff -urNp linux-3.0.7/net/netfilter/ipvs/ip_vs_sync.c linux-3.0.7/net/netfilter/ipvs/ip_vs_sync.c
73321 --- linux-3.0.7/net/netfilter/ipvs/ip_vs_sync.c 2011-07-21 22:17:23.000000000 -0400
73322 +++ linux-3.0.7/net/netfilter/ipvs/ip_vs_sync.c 2011-08-23 21:47:56.000000000 -0400
73323 @@ -648,7 +648,7 @@ control:
73324 * i.e only increment in_pkts for Templates.
73325 */
73326 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
73327 - int pkts = atomic_add_return(1, &cp->in_pkts);
73328 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
73329
73330 if (pkts % sysctl_sync_period(ipvs) != 1)
73331 return;
73332 @@ -794,7 +794,7 @@ static void ip_vs_proc_conn(struct net *
73333
73334 if (opt)
73335 memcpy(&cp->in_seq, opt, sizeof(*opt));
73336 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
73337 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
73338 cp->state = state;
73339 cp->old_state = cp->state;
73340 /*
73341 diff -urNp linux-3.0.7/net/netfilter/ipvs/ip_vs_xmit.c linux-3.0.7/net/netfilter/ipvs/ip_vs_xmit.c
73342 --- linux-3.0.7/net/netfilter/ipvs/ip_vs_xmit.c 2011-07-21 22:17:23.000000000 -0400
73343 +++ linux-3.0.7/net/netfilter/ipvs/ip_vs_xmit.c 2011-08-23 21:47:56.000000000 -0400
73344 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
73345 else
73346 rc = NF_ACCEPT;
73347 /* do not touch skb anymore */
73348 - atomic_inc(&cp->in_pkts);
73349 + atomic_inc_unchecked(&cp->in_pkts);
73350 goto out;
73351 }
73352
73353 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
73354 else
73355 rc = NF_ACCEPT;
73356 /* do not touch skb anymore */
73357 - atomic_inc(&cp->in_pkts);
73358 + atomic_inc_unchecked(&cp->in_pkts);
73359 goto out;
73360 }
73361
73362 diff -urNp linux-3.0.7/net/netfilter/Kconfig linux-3.0.7/net/netfilter/Kconfig
73363 --- linux-3.0.7/net/netfilter/Kconfig 2011-07-21 22:17:23.000000000 -0400
73364 +++ linux-3.0.7/net/netfilter/Kconfig 2011-08-23 21:48:14.000000000 -0400
73365 @@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
73366
73367 To compile it as a module, choose M here. If unsure, say N.
73368
73369 +config NETFILTER_XT_MATCH_GRADM
73370 + tristate '"gradm" match support'
73371 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
73372 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
73373 + ---help---
73374 + The gradm match allows to match on grsecurity RBAC being enabled.
73375 + It is useful when iptables rules are applied early on bootup to
73376 + prevent connections to the machine (except from a trusted host)
73377 + while the RBAC system is disabled.
73378 +
73379 config NETFILTER_XT_MATCH_HASHLIMIT
73380 tristate '"hashlimit" match support'
73381 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
73382 diff -urNp linux-3.0.7/net/netfilter/Makefile linux-3.0.7/net/netfilter/Makefile
73383 --- linux-3.0.7/net/netfilter/Makefile 2011-07-21 22:17:23.000000000 -0400
73384 +++ linux-3.0.7/net/netfilter/Makefile 2011-08-23 21:48:14.000000000 -0400
73385 @@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
73386 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
73387 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
73388 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
73389 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
73390 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
73391 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
73392 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
73393 diff -urNp linux-3.0.7/net/netfilter/nfnetlink_log.c linux-3.0.7/net/netfilter/nfnetlink_log.c
73394 --- linux-3.0.7/net/netfilter/nfnetlink_log.c 2011-07-21 22:17:23.000000000 -0400
73395 +++ linux-3.0.7/net/netfilter/nfnetlink_log.c 2011-08-23 21:47:56.000000000 -0400
73396 @@ -70,7 +70,7 @@ struct nfulnl_instance {
73397 };
73398
73399 static DEFINE_SPINLOCK(instances_lock);
73400 -static atomic_t global_seq;
73401 +static atomic_unchecked_t global_seq;
73402
73403 #define INSTANCE_BUCKETS 16
73404 static struct hlist_head instance_table[INSTANCE_BUCKETS];
73405 @@ -505,7 +505,7 @@ __build_packet_message(struct nfulnl_ins
73406 /* global sequence number */
73407 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
73408 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
73409 - htonl(atomic_inc_return(&global_seq)));
73410 + htonl(atomic_inc_return_unchecked(&global_seq)));
73411
73412 if (data_len) {
73413 struct nlattr *nla;
73414 diff -urNp linux-3.0.7/net/netfilter/nfnetlink_queue.c linux-3.0.7/net/netfilter/nfnetlink_queue.c
73415 --- linux-3.0.7/net/netfilter/nfnetlink_queue.c 2011-07-21 22:17:23.000000000 -0400
73416 +++ linux-3.0.7/net/netfilter/nfnetlink_queue.c 2011-08-23 21:47:56.000000000 -0400
73417 @@ -58,7 +58,7 @@ struct nfqnl_instance {
73418 */
73419 spinlock_t lock;
73420 unsigned int queue_total;
73421 - atomic_t id_sequence; /* 'sequence' of pkt ids */
73422 + atomic_unchecked_t id_sequence; /* 'sequence' of pkt ids */
73423 struct list_head queue_list; /* packets in queue */
73424 };
73425
73426 @@ -272,7 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_
73427 nfmsg->version = NFNETLINK_V0;
73428 nfmsg->res_id = htons(queue->queue_num);
73429
73430 - entry->id = atomic_inc_return(&queue->id_sequence);
73431 + entry->id = atomic_inc_return_unchecked(&queue->id_sequence);
73432 pmsg.packet_id = htonl(entry->id);
73433 pmsg.hw_protocol = entskb->protocol;
73434 pmsg.hook = entry->hook;
73435 @@ -870,7 +870,7 @@ static int seq_show(struct seq_file *s,
73436 inst->peer_pid, inst->queue_total,
73437 inst->copy_mode, inst->copy_range,
73438 inst->queue_dropped, inst->queue_user_dropped,
73439 - atomic_read(&inst->id_sequence), 1);
73440 + atomic_read_unchecked(&inst->id_sequence), 1);
73441 }
73442
73443 static const struct seq_operations nfqnl_seq_ops = {
73444 diff -urNp linux-3.0.7/net/netfilter/xt_gradm.c linux-3.0.7/net/netfilter/xt_gradm.c
73445 --- linux-3.0.7/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
73446 +++ linux-3.0.7/net/netfilter/xt_gradm.c 2011-08-23 21:48:14.000000000 -0400
73447 @@ -0,0 +1,51 @@
73448 +/*
73449 + * gradm match for netfilter
73450 + * Copyright © Zbigniew Krzystolik, 2010
73451 + *
73452 + * This program is free software; you can redistribute it and/or modify
73453 + * it under the terms of the GNU General Public License; either version
73454 + * 2 or 3 as published by the Free Software Foundation.
73455 + */
73456 +#include <linux/module.h>
73457 +#include <linux/moduleparam.h>
73458 +#include <linux/skbuff.h>
73459 +#include <linux/netfilter/x_tables.h>
73460 +#include <linux/grsecurity.h>
73461 +#include <linux/netfilter/xt_gradm.h>
73462 +
73463 +static bool
73464 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
73465 +{
73466 + const struct xt_gradm_mtinfo *info = par->matchinfo;
73467 + bool retval = false;
73468 + if (gr_acl_is_enabled())
73469 + retval = true;
73470 + return retval ^ info->invflags;
73471 +}
73472 +
73473 +static struct xt_match gradm_mt_reg __read_mostly = {
73474 + .name = "gradm",
73475 + .revision = 0,
73476 + .family = NFPROTO_UNSPEC,
73477 + .match = gradm_mt,
73478 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
73479 + .me = THIS_MODULE,
73480 +};
73481 +
73482 +static int __init gradm_mt_init(void)
73483 +{
73484 + return xt_register_match(&gradm_mt_reg);
73485 +}
73486 +
73487 +static void __exit gradm_mt_exit(void)
73488 +{
73489 + xt_unregister_match(&gradm_mt_reg);
73490 +}
73491 +
73492 +module_init(gradm_mt_init);
73493 +module_exit(gradm_mt_exit);
73494 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
73495 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
73496 +MODULE_LICENSE("GPL");
73497 +MODULE_ALIAS("ipt_gradm");
73498 +MODULE_ALIAS("ip6t_gradm");
73499 diff -urNp linux-3.0.7/net/netfilter/xt_statistic.c linux-3.0.7/net/netfilter/xt_statistic.c
73500 --- linux-3.0.7/net/netfilter/xt_statistic.c 2011-07-21 22:17:23.000000000 -0400
73501 +++ linux-3.0.7/net/netfilter/xt_statistic.c 2011-08-23 21:47:56.000000000 -0400
73502 @@ -18,7 +18,7 @@
73503 #include <linux/netfilter/x_tables.h>
73504
73505 struct xt_statistic_priv {
73506 - atomic_t count;
73507 + atomic_unchecked_t count;
73508 } ____cacheline_aligned_in_smp;
73509
73510 MODULE_LICENSE("GPL");
73511 @@ -41,9 +41,9 @@ statistic_mt(const struct sk_buff *skb,
73512 break;
73513 case XT_STATISTIC_MODE_NTH:
73514 do {
73515 - oval = atomic_read(&info->master->count);
73516 + oval = atomic_read_unchecked(&info->master->count);
73517 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
73518 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
73519 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
73520 if (nval == 0)
73521 ret = !ret;
73522 break;
73523 @@ -63,7 +63,7 @@ static int statistic_mt_check(const stru
73524 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
73525 if (info->master == NULL)
73526 return -ENOMEM;
73527 - atomic_set(&info->master->count, info->u.nth.count);
73528 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
73529
73530 return 0;
73531 }
73532 diff -urNp linux-3.0.7/net/netlink/af_netlink.c linux-3.0.7/net/netlink/af_netlink.c
73533 --- linux-3.0.7/net/netlink/af_netlink.c 2011-07-21 22:17:23.000000000 -0400
73534 +++ linux-3.0.7/net/netlink/af_netlink.c 2011-08-23 21:47:56.000000000 -0400
73535 @@ -742,7 +742,7 @@ static void netlink_overrun(struct sock
73536 sk->sk_error_report(sk);
73537 }
73538 }
73539 - atomic_inc(&sk->sk_drops);
73540 + atomic_inc_unchecked(&sk->sk_drops);
73541 }
73542
73543 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
73544 @@ -1994,7 +1994,7 @@ static int netlink_seq_show(struct seq_f
73545 sk_wmem_alloc_get(s),
73546 nlk->cb,
73547 atomic_read(&s->sk_refcnt),
73548 - atomic_read(&s->sk_drops),
73549 + atomic_read_unchecked(&s->sk_drops),
73550 sock_i_ino(s)
73551 );
73552
73553 diff -urNp linux-3.0.7/net/netrom/af_netrom.c linux-3.0.7/net/netrom/af_netrom.c
73554 --- linux-3.0.7/net/netrom/af_netrom.c 2011-07-21 22:17:23.000000000 -0400
73555 +++ linux-3.0.7/net/netrom/af_netrom.c 2011-08-23 21:48:14.000000000 -0400
73556 @@ -839,6 +839,7 @@ static int nr_getname(struct socket *soc
73557 struct sock *sk = sock->sk;
73558 struct nr_sock *nr = nr_sk(sk);
73559
73560 + memset(sax, 0, sizeof(*sax));
73561 lock_sock(sk);
73562 if (peer != 0) {
73563 if (sk->sk_state != TCP_ESTABLISHED) {
73564 @@ -853,7 +854,6 @@ static int nr_getname(struct socket *soc
73565 *uaddr_len = sizeof(struct full_sockaddr_ax25);
73566 } else {
73567 sax->fsa_ax25.sax25_family = AF_NETROM;
73568 - sax->fsa_ax25.sax25_ndigis = 0;
73569 sax->fsa_ax25.sax25_call = nr->source_addr;
73570 *uaddr_len = sizeof(struct sockaddr_ax25);
73571 }
73572 diff -urNp linux-3.0.7/net/packet/af_packet.c linux-3.0.7/net/packet/af_packet.c
73573 --- linux-3.0.7/net/packet/af_packet.c 2011-07-21 22:17:23.000000000 -0400
73574 +++ linux-3.0.7/net/packet/af_packet.c 2011-08-23 21:47:56.000000000 -0400
73575 @@ -647,14 +647,14 @@ static int packet_rcv(struct sk_buff *sk
73576
73577 spin_lock(&sk->sk_receive_queue.lock);
73578 po->stats.tp_packets++;
73579 - skb->dropcount = atomic_read(&sk->sk_drops);
73580 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
73581 __skb_queue_tail(&sk->sk_receive_queue, skb);
73582 spin_unlock(&sk->sk_receive_queue.lock);
73583 sk->sk_data_ready(sk, skb->len);
73584 return 0;
73585
73586 drop_n_acct:
73587 - po->stats.tp_drops = atomic_inc_return(&sk->sk_drops);
73588 + po->stats.tp_drops = atomic_inc_return_unchecked(&sk->sk_drops);
73589
73590 drop_n_restore:
73591 if (skb_head != skb->data && skb_shared(skb)) {
73592 @@ -2168,7 +2168,7 @@ static int packet_getsockopt(struct sock
73593 case PACKET_HDRLEN:
73594 if (len > sizeof(int))
73595 len = sizeof(int);
73596 - if (copy_from_user(&val, optval, len))
73597 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
73598 return -EFAULT;
73599 switch (val) {
73600 case TPACKET_V1:
73601 @@ -2206,7 +2206,7 @@ static int packet_getsockopt(struct sock
73602
73603 if (put_user(len, optlen))
73604 return -EFAULT;
73605 - if (copy_to_user(optval, data, len))
73606 + if (len > sizeof(st) || copy_to_user(optval, data, len))
73607 return -EFAULT;
73608 return 0;
73609 }
73610 diff -urNp linux-3.0.7/net/phonet/af_phonet.c linux-3.0.7/net/phonet/af_phonet.c
73611 --- linux-3.0.7/net/phonet/af_phonet.c 2011-07-21 22:17:23.000000000 -0400
73612 +++ linux-3.0.7/net/phonet/af_phonet.c 2011-08-23 21:48:14.000000000 -0400
73613 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_pr
73614 {
73615 struct phonet_protocol *pp;
73616
73617 - if (protocol >= PHONET_NPROTO)
73618 + if (protocol < 0 || protocol >= PHONET_NPROTO)
73619 return NULL;
73620
73621 rcu_read_lock();
73622 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_regist
73623 {
73624 int err = 0;
73625
73626 - if (protocol >= PHONET_NPROTO)
73627 + if (protocol < 0 || protocol >= PHONET_NPROTO)
73628 return -EINVAL;
73629
73630 err = proto_register(pp->prot, 1);
73631 diff -urNp linux-3.0.7/net/phonet/pep.c linux-3.0.7/net/phonet/pep.c
73632 --- linux-3.0.7/net/phonet/pep.c 2011-07-21 22:17:23.000000000 -0400
73633 +++ linux-3.0.7/net/phonet/pep.c 2011-08-23 21:47:56.000000000 -0400
73634 @@ -387,7 +387,7 @@ static int pipe_do_rcv(struct sock *sk,
73635
73636 case PNS_PEP_CTRL_REQ:
73637 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
73638 - atomic_inc(&sk->sk_drops);
73639 + atomic_inc_unchecked(&sk->sk_drops);
73640 break;
73641 }
73642 __skb_pull(skb, 4);
73643 @@ -408,7 +408,7 @@ static int pipe_do_rcv(struct sock *sk,
73644 }
73645
73646 if (pn->rx_credits == 0) {
73647 - atomic_inc(&sk->sk_drops);
73648 + atomic_inc_unchecked(&sk->sk_drops);
73649 err = -ENOBUFS;
73650 break;
73651 }
73652 @@ -556,7 +556,7 @@ static int pipe_handler_do_rcv(struct so
73653 }
73654
73655 if (pn->rx_credits == 0) {
73656 - atomic_inc(&sk->sk_drops);
73657 + atomic_inc_unchecked(&sk->sk_drops);
73658 err = NET_RX_DROP;
73659 break;
73660 }
73661 diff -urNp linux-3.0.7/net/phonet/socket.c linux-3.0.7/net/phonet/socket.c
73662 --- linux-3.0.7/net/phonet/socket.c 2011-07-21 22:17:23.000000000 -0400
73663 +++ linux-3.0.7/net/phonet/socket.c 2011-08-23 21:48:14.000000000 -0400
73664 @@ -612,8 +612,13 @@ static int pn_sock_seq_show(struct seq_f
73665 pn->resource, sk->sk_state,
73666 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
73667 sock_i_uid(sk), sock_i_ino(sk),
73668 - atomic_read(&sk->sk_refcnt), sk,
73669 - atomic_read(&sk->sk_drops), &len);
73670 + atomic_read(&sk->sk_refcnt),
73671 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73672 + NULL,
73673 +#else
73674 + sk,
73675 +#endif
73676 + atomic_read_unchecked(&sk->sk_drops), &len);
73677 }
73678 seq_printf(seq, "%*s\n", 127 - len, "");
73679 return 0;
73680 diff -urNp linux-3.0.7/net/rds/cong.c linux-3.0.7/net/rds/cong.c
73681 --- linux-3.0.7/net/rds/cong.c 2011-07-21 22:17:23.000000000 -0400
73682 +++ linux-3.0.7/net/rds/cong.c 2011-08-23 21:47:56.000000000 -0400
73683 @@ -77,7 +77,7 @@
73684 * finds that the saved generation number is smaller than the global generation
73685 * number, it wakes up the process.
73686 */
73687 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
73688 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
73689
73690 /*
73691 * Congestion monitoring
73692 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con
73693 rdsdebug("waking map %p for %pI4\n",
73694 map, &map->m_addr);
73695 rds_stats_inc(s_cong_update_received);
73696 - atomic_inc(&rds_cong_generation);
73697 + atomic_inc_unchecked(&rds_cong_generation);
73698 if (waitqueue_active(&map->m_waitq))
73699 wake_up(&map->m_waitq);
73700 if (waitqueue_active(&rds_poll_waitq))
73701 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
73702
73703 int rds_cong_updated_since(unsigned long *recent)
73704 {
73705 - unsigned long gen = atomic_read(&rds_cong_generation);
73706 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
73707
73708 if (likely(*recent == gen))
73709 return 0;
73710 diff -urNp linux-3.0.7/net/rds/ib_cm.c linux-3.0.7/net/rds/ib_cm.c
73711 --- linux-3.0.7/net/rds/ib_cm.c 2011-07-21 22:17:23.000000000 -0400
73712 +++ linux-3.0.7/net/rds/ib_cm.c 2011-08-23 21:47:56.000000000 -0400
73713 @@ -720,7 +720,7 @@ void rds_ib_conn_shutdown(struct rds_con
73714 /* Clear the ACK state */
73715 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
73716 #ifdef KERNEL_HAS_ATOMIC64
73717 - atomic64_set(&ic->i_ack_next, 0);
73718 + atomic64_set_unchecked(&ic->i_ack_next, 0);
73719 #else
73720 ic->i_ack_next = 0;
73721 #endif
73722 diff -urNp linux-3.0.7/net/rds/ib.h linux-3.0.7/net/rds/ib.h
73723 --- linux-3.0.7/net/rds/ib.h 2011-07-21 22:17:23.000000000 -0400
73724 +++ linux-3.0.7/net/rds/ib.h 2011-08-23 21:47:56.000000000 -0400
73725 @@ -127,7 +127,7 @@ struct rds_ib_connection {
73726 /* sending acks */
73727 unsigned long i_ack_flags;
73728 #ifdef KERNEL_HAS_ATOMIC64
73729 - atomic64_t i_ack_next; /* next ACK to send */
73730 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
73731 #else
73732 spinlock_t i_ack_lock; /* protect i_ack_next */
73733 u64 i_ack_next; /* next ACK to send */
73734 diff -urNp linux-3.0.7/net/rds/ib_recv.c linux-3.0.7/net/rds/ib_recv.c
73735 --- linux-3.0.7/net/rds/ib_recv.c 2011-07-21 22:17:23.000000000 -0400
73736 +++ linux-3.0.7/net/rds/ib_recv.c 2011-08-23 21:47:56.000000000 -0400
73737 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
73738 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
73739 int ack_required)
73740 {
73741 - atomic64_set(&ic->i_ack_next, seq);
73742 + atomic64_set_unchecked(&ic->i_ack_next, seq);
73743 if (ack_required) {
73744 smp_mb__before_clear_bit();
73745 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
73746 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
73747 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
73748 smp_mb__after_clear_bit();
73749
73750 - return atomic64_read(&ic->i_ack_next);
73751 + return atomic64_read_unchecked(&ic->i_ack_next);
73752 }
73753 #endif
73754
73755 diff -urNp linux-3.0.7/net/rds/iw_cm.c linux-3.0.7/net/rds/iw_cm.c
73756 --- linux-3.0.7/net/rds/iw_cm.c 2011-07-21 22:17:23.000000000 -0400
73757 +++ linux-3.0.7/net/rds/iw_cm.c 2011-08-23 21:47:56.000000000 -0400
73758 @@ -664,7 +664,7 @@ void rds_iw_conn_shutdown(struct rds_con
73759 /* Clear the ACK state */
73760 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
73761 #ifdef KERNEL_HAS_ATOMIC64
73762 - atomic64_set(&ic->i_ack_next, 0);
73763 + atomic64_set_unchecked(&ic->i_ack_next, 0);
73764 #else
73765 ic->i_ack_next = 0;
73766 #endif
73767 diff -urNp linux-3.0.7/net/rds/iw.h linux-3.0.7/net/rds/iw.h
73768 --- linux-3.0.7/net/rds/iw.h 2011-07-21 22:17:23.000000000 -0400
73769 +++ linux-3.0.7/net/rds/iw.h 2011-08-23 21:47:56.000000000 -0400
73770 @@ -133,7 +133,7 @@ struct rds_iw_connection {
73771 /* sending acks */
73772 unsigned long i_ack_flags;
73773 #ifdef KERNEL_HAS_ATOMIC64
73774 - atomic64_t i_ack_next; /* next ACK to send */
73775 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
73776 #else
73777 spinlock_t i_ack_lock; /* protect i_ack_next */
73778 u64 i_ack_next; /* next ACK to send */
73779 diff -urNp linux-3.0.7/net/rds/iw_rdma.c linux-3.0.7/net/rds/iw_rdma.c
73780 --- linux-3.0.7/net/rds/iw_rdma.c 2011-07-21 22:17:23.000000000 -0400
73781 +++ linux-3.0.7/net/rds/iw_rdma.c 2011-08-23 21:48:14.000000000 -0400
73782 @@ -182,6 +182,8 @@ int rds_iw_update_cm_id(struct rds_iw_de
73783 struct rdma_cm_id *pcm_id;
73784 int rc;
73785
73786 + pax_track_stack();
73787 +
73788 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
73789 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
73790
73791 diff -urNp linux-3.0.7/net/rds/iw_recv.c linux-3.0.7/net/rds/iw_recv.c
73792 --- linux-3.0.7/net/rds/iw_recv.c 2011-07-21 22:17:23.000000000 -0400
73793 +++ linux-3.0.7/net/rds/iw_recv.c 2011-08-23 21:47:56.000000000 -0400
73794 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
73795 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
73796 int ack_required)
73797 {
73798 - atomic64_set(&ic->i_ack_next, seq);
73799 + atomic64_set_unchecked(&ic->i_ack_next, seq);
73800 if (ack_required) {
73801 smp_mb__before_clear_bit();
73802 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
73803 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
73804 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
73805 smp_mb__after_clear_bit();
73806
73807 - return atomic64_read(&ic->i_ack_next);
73808 + return atomic64_read_unchecked(&ic->i_ack_next);
73809 }
73810 #endif
73811
73812 diff -urNp linux-3.0.7/net/rds/tcp.c linux-3.0.7/net/rds/tcp.c
73813 --- linux-3.0.7/net/rds/tcp.c 2011-07-21 22:17:23.000000000 -0400
73814 +++ linux-3.0.7/net/rds/tcp.c 2011-10-06 04:17:55.000000000 -0400
73815 @@ -58,7 +58,7 @@ void rds_tcp_nonagle(struct socket *sock
73816 int val = 1;
73817
73818 set_fs(KERNEL_DS);
73819 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
73820 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
73821 sizeof(val));
73822 set_fs(oldfs);
73823 }
73824 diff -urNp linux-3.0.7/net/rds/tcp_send.c linux-3.0.7/net/rds/tcp_send.c
73825 --- linux-3.0.7/net/rds/tcp_send.c 2011-07-21 22:17:23.000000000 -0400
73826 +++ linux-3.0.7/net/rds/tcp_send.c 2011-10-06 04:17:55.000000000 -0400
73827 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *
73828
73829 oldfs = get_fs();
73830 set_fs(KERNEL_DS);
73831 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
73832 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
73833 sizeof(val));
73834 set_fs(oldfs);
73835 }
73836 diff -urNp linux-3.0.7/net/rxrpc/af_rxrpc.c linux-3.0.7/net/rxrpc/af_rxrpc.c
73837 --- linux-3.0.7/net/rxrpc/af_rxrpc.c 2011-07-21 22:17:23.000000000 -0400
73838 +++ linux-3.0.7/net/rxrpc/af_rxrpc.c 2011-08-23 21:47:56.000000000 -0400
73839 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_
73840 __be32 rxrpc_epoch;
73841
73842 /* current debugging ID */
73843 -atomic_t rxrpc_debug_id;
73844 +atomic_unchecked_t rxrpc_debug_id;
73845
73846 /* count of skbs currently in use */
73847 atomic_t rxrpc_n_skbs;
73848 diff -urNp linux-3.0.7/net/rxrpc/ar-ack.c linux-3.0.7/net/rxrpc/ar-ack.c
73849 --- linux-3.0.7/net/rxrpc/ar-ack.c 2011-07-21 22:17:23.000000000 -0400
73850 +++ linux-3.0.7/net/rxrpc/ar-ack.c 2011-08-23 21:48:14.000000000 -0400
73851 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_ca
73852
73853 _enter("{%d,%d,%d,%d},",
73854 call->acks_hard, call->acks_unacked,
73855 - atomic_read(&call->sequence),
73856 + atomic_read_unchecked(&call->sequence),
73857 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
73858
73859 stop = 0;
73860 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_ca
73861
73862 /* each Tx packet has a new serial number */
73863 sp->hdr.serial =
73864 - htonl(atomic_inc_return(&call->conn->serial));
73865 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
73866
73867 hdr = (struct rxrpc_header *) txb->head;
73868 hdr->serial = sp->hdr.serial;
73869 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struc
73870 */
73871 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
73872 {
73873 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
73874 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
73875 }
73876
73877 /*
73878 @@ -629,7 +629,7 @@ process_further:
73879
73880 latest = ntohl(sp->hdr.serial);
73881 hard = ntohl(ack.firstPacket);
73882 - tx = atomic_read(&call->sequence);
73883 + tx = atomic_read_unchecked(&call->sequence);
73884
73885 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
73886 latest,
73887 @@ -842,6 +842,8 @@ void rxrpc_process_call(struct work_stru
73888 u32 abort_code = RX_PROTOCOL_ERROR;
73889 u8 *acks = NULL;
73890
73891 + pax_track_stack();
73892 +
73893 //printk("\n--------------------\n");
73894 _enter("{%d,%s,%lx} [%lu]",
73895 call->debug_id, rxrpc_call_states[call->state], call->events,
73896 @@ -1161,7 +1163,7 @@ void rxrpc_process_call(struct work_stru
73897 goto maybe_reschedule;
73898
73899 send_ACK_with_skew:
73900 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
73901 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
73902 ntohl(ack.serial));
73903 send_ACK:
73904 mtu = call->conn->trans->peer->if_mtu;
73905 @@ -1173,7 +1175,7 @@ send_ACK:
73906 ackinfo.rxMTU = htonl(5692);
73907 ackinfo.jumbo_max = htonl(4);
73908
73909 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
73910 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
73911 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
73912 ntohl(hdr.serial),
73913 ntohs(ack.maxSkew),
73914 @@ -1191,7 +1193,7 @@ send_ACK:
73915 send_message:
73916 _debug("send message");
73917
73918 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
73919 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
73920 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
73921 send_message_2:
73922
73923 diff -urNp linux-3.0.7/net/rxrpc/ar-call.c linux-3.0.7/net/rxrpc/ar-call.c
73924 --- linux-3.0.7/net/rxrpc/ar-call.c 2011-07-21 22:17:23.000000000 -0400
73925 +++ linux-3.0.7/net/rxrpc/ar-call.c 2011-08-23 21:47:56.000000000 -0400
73926 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
73927 spin_lock_init(&call->lock);
73928 rwlock_init(&call->state_lock);
73929 atomic_set(&call->usage, 1);
73930 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
73931 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
73932 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
73933
73934 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
73935 diff -urNp linux-3.0.7/net/rxrpc/ar-connection.c linux-3.0.7/net/rxrpc/ar-connection.c
73936 --- linux-3.0.7/net/rxrpc/ar-connection.c 2011-07-21 22:17:23.000000000 -0400
73937 +++ linux-3.0.7/net/rxrpc/ar-connection.c 2011-08-23 21:47:56.000000000 -0400
73938 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_al
73939 rwlock_init(&conn->lock);
73940 spin_lock_init(&conn->state_lock);
73941 atomic_set(&conn->usage, 1);
73942 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
73943 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
73944 conn->avail_calls = RXRPC_MAXCALLS;
73945 conn->size_align = 4;
73946 conn->header_size = sizeof(struct rxrpc_header);
73947 diff -urNp linux-3.0.7/net/rxrpc/ar-connevent.c linux-3.0.7/net/rxrpc/ar-connevent.c
73948 --- linux-3.0.7/net/rxrpc/ar-connevent.c 2011-07-21 22:17:23.000000000 -0400
73949 +++ linux-3.0.7/net/rxrpc/ar-connevent.c 2011-08-23 21:47:56.000000000 -0400
73950 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
73951
73952 len = iov[0].iov_len + iov[1].iov_len;
73953
73954 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
73955 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
73956 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
73957
73958 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
73959 diff -urNp linux-3.0.7/net/rxrpc/ar-input.c linux-3.0.7/net/rxrpc/ar-input.c
73960 --- linux-3.0.7/net/rxrpc/ar-input.c 2011-07-21 22:17:23.000000000 -0400
73961 +++ linux-3.0.7/net/rxrpc/ar-input.c 2011-08-23 21:47:56.000000000 -0400
73962 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rx
73963 /* track the latest serial number on this connection for ACK packet
73964 * information */
73965 serial = ntohl(sp->hdr.serial);
73966 - hi_serial = atomic_read(&call->conn->hi_serial);
73967 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
73968 while (serial > hi_serial)
73969 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
73970 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
73971 serial);
73972
73973 /* request ACK generation for any ACK or DATA packet that requests
73974 diff -urNp linux-3.0.7/net/rxrpc/ar-internal.h linux-3.0.7/net/rxrpc/ar-internal.h
73975 --- linux-3.0.7/net/rxrpc/ar-internal.h 2011-07-21 22:17:23.000000000 -0400
73976 +++ linux-3.0.7/net/rxrpc/ar-internal.h 2011-08-23 21:47:56.000000000 -0400
73977 @@ -272,8 +272,8 @@ struct rxrpc_connection {
73978 int error; /* error code for local abort */
73979 int debug_id; /* debug ID for printks */
73980 unsigned call_counter; /* call ID counter */
73981 - atomic_t serial; /* packet serial number counter */
73982 - atomic_t hi_serial; /* highest serial number received */
73983 + atomic_unchecked_t serial; /* packet serial number counter */
73984 + atomic_unchecked_t hi_serial; /* highest serial number received */
73985 u8 avail_calls; /* number of calls available */
73986 u8 size_align; /* data size alignment (for security) */
73987 u8 header_size; /* rxrpc + security header size */
73988 @@ -346,7 +346,7 @@ struct rxrpc_call {
73989 spinlock_t lock;
73990 rwlock_t state_lock; /* lock for state transition */
73991 atomic_t usage;
73992 - atomic_t sequence; /* Tx data packet sequence counter */
73993 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
73994 u32 abort_code; /* local/remote abort code */
73995 enum { /* current state of call */
73996 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
73997 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
73998 */
73999 extern atomic_t rxrpc_n_skbs;
74000 extern __be32 rxrpc_epoch;
74001 -extern atomic_t rxrpc_debug_id;
74002 +extern atomic_unchecked_t rxrpc_debug_id;
74003 extern struct workqueue_struct *rxrpc_workqueue;
74004
74005 /*
74006 diff -urNp linux-3.0.7/net/rxrpc/ar-local.c linux-3.0.7/net/rxrpc/ar-local.c
74007 --- linux-3.0.7/net/rxrpc/ar-local.c 2011-07-21 22:17:23.000000000 -0400
74008 +++ linux-3.0.7/net/rxrpc/ar-local.c 2011-08-23 21:47:56.000000000 -0400
74009 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
74010 spin_lock_init(&local->lock);
74011 rwlock_init(&local->services_lock);
74012 atomic_set(&local->usage, 1);
74013 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
74014 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74015 memcpy(&local->srx, srx, sizeof(*srx));
74016 }
74017
74018 diff -urNp linux-3.0.7/net/rxrpc/ar-output.c linux-3.0.7/net/rxrpc/ar-output.c
74019 --- linux-3.0.7/net/rxrpc/ar-output.c 2011-07-21 22:17:23.000000000 -0400
74020 +++ linux-3.0.7/net/rxrpc/ar-output.c 2011-08-23 21:47:56.000000000 -0400
74021 @@ -681,9 +681,9 @@ static int rxrpc_send_data(struct kiocb
74022 sp->hdr.cid = call->cid;
74023 sp->hdr.callNumber = call->call_id;
74024 sp->hdr.seq =
74025 - htonl(atomic_inc_return(&call->sequence));
74026 + htonl(atomic_inc_return_unchecked(&call->sequence));
74027 sp->hdr.serial =
74028 - htonl(atomic_inc_return(&conn->serial));
74029 + htonl(atomic_inc_return_unchecked(&conn->serial));
74030 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
74031 sp->hdr.userStatus = 0;
74032 sp->hdr.securityIndex = conn->security_ix;
74033 diff -urNp linux-3.0.7/net/rxrpc/ar-peer.c linux-3.0.7/net/rxrpc/ar-peer.c
74034 --- linux-3.0.7/net/rxrpc/ar-peer.c 2011-07-21 22:17:23.000000000 -0400
74035 +++ linux-3.0.7/net/rxrpc/ar-peer.c 2011-08-23 21:47:56.000000000 -0400
74036 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
74037 INIT_LIST_HEAD(&peer->error_targets);
74038 spin_lock_init(&peer->lock);
74039 atomic_set(&peer->usage, 1);
74040 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
74041 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74042 memcpy(&peer->srx, srx, sizeof(*srx));
74043
74044 rxrpc_assess_MTU_size(peer);
74045 diff -urNp linux-3.0.7/net/rxrpc/ar-proc.c linux-3.0.7/net/rxrpc/ar-proc.c
74046 --- linux-3.0.7/net/rxrpc/ar-proc.c 2011-07-21 22:17:23.000000000 -0400
74047 +++ linux-3.0.7/net/rxrpc/ar-proc.c 2011-08-23 21:47:56.000000000 -0400
74048 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
74049 atomic_read(&conn->usage),
74050 rxrpc_conn_states[conn->state],
74051 key_serial(conn->key),
74052 - atomic_read(&conn->serial),
74053 - atomic_read(&conn->hi_serial));
74054 + atomic_read_unchecked(&conn->serial),
74055 + atomic_read_unchecked(&conn->hi_serial));
74056
74057 return 0;
74058 }
74059 diff -urNp linux-3.0.7/net/rxrpc/ar-transport.c linux-3.0.7/net/rxrpc/ar-transport.c
74060 --- linux-3.0.7/net/rxrpc/ar-transport.c 2011-07-21 22:17:23.000000000 -0400
74061 +++ linux-3.0.7/net/rxrpc/ar-transport.c 2011-08-23 21:47:56.000000000 -0400
74062 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_all
74063 spin_lock_init(&trans->client_lock);
74064 rwlock_init(&trans->conn_lock);
74065 atomic_set(&trans->usage, 1);
74066 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
74067 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74068
74069 if (peer->srx.transport.family == AF_INET) {
74070 switch (peer->srx.transport_type) {
74071 diff -urNp linux-3.0.7/net/rxrpc/rxkad.c linux-3.0.7/net/rxrpc/rxkad.c
74072 --- linux-3.0.7/net/rxrpc/rxkad.c 2011-07-21 22:17:23.000000000 -0400
74073 +++ linux-3.0.7/net/rxrpc/rxkad.c 2011-08-23 21:48:14.000000000 -0400
74074 @@ -211,6 +211,8 @@ static int rxkad_secure_packet_encrypt(c
74075 u16 check;
74076 int nsg;
74077
74078 + pax_track_stack();
74079 +
74080 sp = rxrpc_skb(skb);
74081
74082 _enter("");
74083 @@ -338,6 +340,8 @@ static int rxkad_verify_packet_auth(cons
74084 u16 check;
74085 int nsg;
74086
74087 + pax_track_stack();
74088 +
74089 _enter("");
74090
74091 sp = rxrpc_skb(skb);
74092 @@ -610,7 +614,7 @@ static int rxkad_issue_challenge(struct
74093
74094 len = iov[0].iov_len + iov[1].iov_len;
74095
74096 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
74097 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
74098 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
74099
74100 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
74101 @@ -660,7 +664,7 @@ static int rxkad_send_response(struct rx
74102
74103 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
74104
74105 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
74106 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
74107 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
74108
74109 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
74110 diff -urNp linux-3.0.7/net/sctp/proc.c linux-3.0.7/net/sctp/proc.c
74111 --- linux-3.0.7/net/sctp/proc.c 2011-07-21 22:17:23.000000000 -0400
74112 +++ linux-3.0.7/net/sctp/proc.c 2011-08-23 21:48:14.000000000 -0400
74113 @@ -318,7 +318,8 @@ static int sctp_assocs_seq_show(struct s
74114 seq_printf(seq,
74115 "%8pK %8pK %-3d %-3d %-2d %-4d "
74116 "%4d %8d %8d %7d %5lu %-5d %5d ",
74117 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
74118 + assoc, sk,
74119 + sctp_sk(sk)->type, sk->sk_state,
74120 assoc->state, hash,
74121 assoc->assoc_id,
74122 assoc->sndbuf_used,
74123 diff -urNp linux-3.0.7/net/sctp/socket.c linux-3.0.7/net/sctp/socket.c
74124 --- linux-3.0.7/net/sctp/socket.c 2011-07-21 22:17:23.000000000 -0400
74125 +++ linux-3.0.7/net/sctp/socket.c 2011-08-23 21:47:56.000000000 -0400
74126 @@ -4452,7 +4452,7 @@ static int sctp_getsockopt_peer_addrs(st
74127 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
74128 if (space_left < addrlen)
74129 return -ENOMEM;
74130 - if (copy_to_user(to, &temp, addrlen))
74131 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
74132 return -EFAULT;
74133 to += addrlen;
74134 cnt++;
74135 diff -urNp linux-3.0.7/net/socket.c linux-3.0.7/net/socket.c
74136 --- linux-3.0.7/net/socket.c 2011-10-16 21:54:54.000000000 -0400
74137 +++ linux-3.0.7/net/socket.c 2011-10-16 21:55:28.000000000 -0400
74138 @@ -88,6 +88,7 @@
74139 #include <linux/nsproxy.h>
74140 #include <linux/magic.h>
74141 #include <linux/slab.h>
74142 +#include <linux/in.h>
74143
74144 #include <asm/uaccess.h>
74145 #include <asm/unistd.h>
74146 @@ -105,6 +106,8 @@
74147 #include <linux/sockios.h>
74148 #include <linux/atalk.h>
74149
74150 +#include <linux/grsock.h>
74151 +
74152 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
74153 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
74154 unsigned long nr_segs, loff_t pos);
74155 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struc
74156 &sockfs_dentry_operations, SOCKFS_MAGIC);
74157 }
74158
74159 -static struct vfsmount *sock_mnt __read_mostly;
74160 +struct vfsmount *sock_mnt __read_mostly;
74161
74162 static struct file_system_type sock_fs_type = {
74163 .name = "sockfs",
74164 @@ -1187,6 +1190,8 @@ int __sock_create(struct net *net, int f
74165 return -EAFNOSUPPORT;
74166 if (type < 0 || type >= SOCK_MAX)
74167 return -EINVAL;
74168 + if (protocol < 0)
74169 + return -EINVAL;
74170
74171 /* Compatibility.
74172
74173 @@ -1319,6 +1324,16 @@ SYSCALL_DEFINE3(socket, int, family, int
74174 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
74175 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
74176
74177 + if(!gr_search_socket(family, type, protocol)) {
74178 + retval = -EACCES;
74179 + goto out;
74180 + }
74181 +
74182 + if (gr_handle_sock_all(family, type, protocol)) {
74183 + retval = -EACCES;
74184 + goto out;
74185 + }
74186 +
74187 retval = sock_create(family, type, protocol, &sock);
74188 if (retval < 0)
74189 goto out;
74190 @@ -1431,6 +1446,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
74191 if (sock) {
74192 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
74193 if (err >= 0) {
74194 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
74195 + err = -EACCES;
74196 + goto error;
74197 + }
74198 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
74199 + if (err)
74200 + goto error;
74201 +
74202 err = security_socket_bind(sock,
74203 (struct sockaddr *)&address,
74204 addrlen);
74205 @@ -1439,6 +1462,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
74206 (struct sockaddr *)
74207 &address, addrlen);
74208 }
74209 +error:
74210 fput_light(sock->file, fput_needed);
74211 }
74212 return err;
74213 @@ -1462,10 +1486,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
74214 if ((unsigned)backlog > somaxconn)
74215 backlog = somaxconn;
74216
74217 + if (gr_handle_sock_server_other(sock->sk)) {
74218 + err = -EPERM;
74219 + goto error;
74220 + }
74221 +
74222 + err = gr_search_listen(sock);
74223 + if (err)
74224 + goto error;
74225 +
74226 err = security_socket_listen(sock, backlog);
74227 if (!err)
74228 err = sock->ops->listen(sock, backlog);
74229
74230 +error:
74231 fput_light(sock->file, fput_needed);
74232 }
74233 return err;
74234 @@ -1509,6 +1543,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
74235 newsock->type = sock->type;
74236 newsock->ops = sock->ops;
74237
74238 + if (gr_handle_sock_server_other(sock->sk)) {
74239 + err = -EPERM;
74240 + sock_release(newsock);
74241 + goto out_put;
74242 + }
74243 +
74244 + err = gr_search_accept(sock);
74245 + if (err) {
74246 + sock_release(newsock);
74247 + goto out_put;
74248 + }
74249 +
74250 /*
74251 * We don't need try_module_get here, as the listening socket (sock)
74252 * has the protocol module (sock->ops->owner) held.
74253 @@ -1547,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
74254 fd_install(newfd, newfile);
74255 err = newfd;
74256
74257 + gr_attach_curr_ip(newsock->sk);
74258 +
74259 out_put:
74260 fput_light(sock->file, fput_needed);
74261 out:
74262 @@ -1579,6 +1627,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
74263 int, addrlen)
74264 {
74265 struct socket *sock;
74266 + struct sockaddr *sck;
74267 struct sockaddr_storage address;
74268 int err, fput_needed;
74269
74270 @@ -1589,6 +1638,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
74271 if (err < 0)
74272 goto out_put;
74273
74274 + sck = (struct sockaddr *)&address;
74275 +
74276 + if (gr_handle_sock_client(sck)) {
74277 + err = -EACCES;
74278 + goto out_put;
74279 + }
74280 +
74281 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
74282 + if (err)
74283 + goto out_put;
74284 +
74285 err =
74286 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
74287 if (err)
74288 @@ -1890,6 +1950,8 @@ static int __sys_sendmsg(struct socket *
74289 unsigned char *ctl_buf = ctl;
74290 int err, ctl_len, iov_size, total_len;
74291
74292 + pax_track_stack();
74293 +
74294 err = -EFAULT;
74295 if (MSG_CMSG_COMPAT & flags) {
74296 if (get_compat_msghdr(msg_sys, msg_compat))
74297 @@ -1950,7 +2012,7 @@ static int __sys_sendmsg(struct socket *
74298 * checking falls down on this.
74299 */
74300 if (copy_from_user(ctl_buf,
74301 - (void __user __force *)msg_sys->msg_control,
74302 + (void __force_user *)msg_sys->msg_control,
74303 ctl_len))
74304 goto out_freectl;
74305 msg_sys->msg_control = ctl_buf;
74306 @@ -2120,7 +2182,7 @@ static int __sys_recvmsg(struct socket *
74307 * kernel msghdr to use the kernel address space)
74308 */
74309
74310 - uaddr = (__force void __user *)msg_sys->msg_name;
74311 + uaddr = (void __force_user *)msg_sys->msg_name;
74312 uaddr_len = COMPAT_NAMELEN(msg);
74313 if (MSG_CMSG_COMPAT & flags) {
74314 err = verify_compat_iovec(msg_sys, iov,
74315 @@ -2748,7 +2810,7 @@ static int ethtool_ioctl(struct net *net
74316 }
74317
74318 ifr = compat_alloc_user_space(buf_size);
74319 - rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
74320 + rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
74321
74322 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
74323 return -EFAULT;
74324 @@ -2772,12 +2834,12 @@ static int ethtool_ioctl(struct net *net
74325 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
74326
74327 if (copy_in_user(rxnfc, compat_rxnfc,
74328 - (void *)(&rxnfc->fs.m_ext + 1) -
74329 - (void *)rxnfc) ||
74330 + (void __user *)(&rxnfc->fs.m_ext + 1) -
74331 + (void __user *)rxnfc) ||
74332 copy_in_user(&rxnfc->fs.ring_cookie,
74333 &compat_rxnfc->fs.ring_cookie,
74334 - (void *)(&rxnfc->fs.location + 1) -
74335 - (void *)&rxnfc->fs.ring_cookie) ||
74336 + (void __user *)(&rxnfc->fs.location + 1) -
74337 + (void __user *)&rxnfc->fs.ring_cookie) ||
74338 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
74339 sizeof(rxnfc->rule_cnt)))
74340 return -EFAULT;
74341 @@ -2789,12 +2851,12 @@ static int ethtool_ioctl(struct net *net
74342
74343 if (convert_out) {
74344 if (copy_in_user(compat_rxnfc, rxnfc,
74345 - (const void *)(&rxnfc->fs.m_ext + 1) -
74346 - (const void *)rxnfc) ||
74347 + (const void __user *)(&rxnfc->fs.m_ext + 1) -
74348 + (const void __user *)rxnfc) ||
74349 copy_in_user(&compat_rxnfc->fs.ring_cookie,
74350 &rxnfc->fs.ring_cookie,
74351 - (const void *)(&rxnfc->fs.location + 1) -
74352 - (const void *)&rxnfc->fs.ring_cookie) ||
74353 + (const void __user *)(&rxnfc->fs.location + 1) -
74354 + (const void __user *)&rxnfc->fs.ring_cookie) ||
74355 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
74356 sizeof(rxnfc->rule_cnt)))
74357 return -EFAULT;
74358 @@ -2864,7 +2926,7 @@ static int bond_ioctl(struct net *net, u
74359 old_fs = get_fs();
74360 set_fs(KERNEL_DS);
74361 err = dev_ioctl(net, cmd,
74362 - (struct ifreq __user __force *) &kifr);
74363 + (struct ifreq __force_user *) &kifr);
74364 set_fs(old_fs);
74365
74366 return err;
74367 @@ -2973,7 +3035,7 @@ static int compat_sioc_ifmap(struct net
74368
74369 old_fs = get_fs();
74370 set_fs(KERNEL_DS);
74371 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
74372 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
74373 set_fs(old_fs);
74374
74375 if (cmd == SIOCGIFMAP && !err) {
74376 @@ -3078,7 +3140,7 @@ static int routing_ioctl(struct net *net
74377 ret |= __get_user(rtdev, &(ur4->rt_dev));
74378 if (rtdev) {
74379 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
74380 - r4.rt_dev = (char __user __force *)devname;
74381 + r4.rt_dev = (char __force_user *)devname;
74382 devname[15] = 0;
74383 } else
74384 r4.rt_dev = NULL;
74385 @@ -3318,8 +3380,8 @@ int kernel_getsockopt(struct socket *soc
74386 int __user *uoptlen;
74387 int err;
74388
74389 - uoptval = (char __user __force *) optval;
74390 - uoptlen = (int __user __force *) optlen;
74391 + uoptval = (char __force_user *) optval;
74392 + uoptlen = (int __force_user *) optlen;
74393
74394 set_fs(KERNEL_DS);
74395 if (level == SOL_SOCKET)
74396 @@ -3339,7 +3401,7 @@ int kernel_setsockopt(struct socket *soc
74397 char __user *uoptval;
74398 int err;
74399
74400 - uoptval = (char __user __force *) optval;
74401 + uoptval = (char __force_user *) optval;
74402
74403 set_fs(KERNEL_DS);
74404 if (level == SOL_SOCKET)
74405 diff -urNp linux-3.0.7/net/sunrpc/sched.c linux-3.0.7/net/sunrpc/sched.c
74406 --- linux-3.0.7/net/sunrpc/sched.c 2011-07-21 22:17:23.000000000 -0400
74407 +++ linux-3.0.7/net/sunrpc/sched.c 2011-08-23 21:47:56.000000000 -0400
74408 @@ -234,9 +234,9 @@ static int rpc_wait_bit_killable(void *w
74409 #ifdef RPC_DEBUG
74410 static void rpc_task_set_debuginfo(struct rpc_task *task)
74411 {
74412 - static atomic_t rpc_pid;
74413 + static atomic_unchecked_t rpc_pid;
74414
74415 - task->tk_pid = atomic_inc_return(&rpc_pid);
74416 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
74417 }
74418 #else
74419 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
74420 diff -urNp linux-3.0.7/net/sunrpc/svcsock.c linux-3.0.7/net/sunrpc/svcsock.c
74421 --- linux-3.0.7/net/sunrpc/svcsock.c 2011-07-21 22:17:23.000000000 -0400
74422 +++ linux-3.0.7/net/sunrpc/svcsock.c 2011-10-06 04:17:55.000000000 -0400
74423 @@ -392,7 +392,7 @@ static int svc_partial_recvfrom(struct s
74424 int buflen, unsigned int base)
74425 {
74426 size_t save_iovlen;
74427 - void __user *save_iovbase;
74428 + void *save_iovbase;
74429 unsigned int i;
74430 int ret;
74431
74432 diff -urNp linux-3.0.7/net/sunrpc/xprtrdma/svc_rdma.c linux-3.0.7/net/sunrpc/xprtrdma/svc_rdma.c
74433 --- linux-3.0.7/net/sunrpc/xprtrdma/svc_rdma.c 2011-07-21 22:17:23.000000000 -0400
74434 +++ linux-3.0.7/net/sunrpc/xprtrdma/svc_rdma.c 2011-08-23 21:47:56.000000000 -0400
74435 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCR
74436 static unsigned int min_max_inline = 4096;
74437 static unsigned int max_max_inline = 65536;
74438
74439 -atomic_t rdma_stat_recv;
74440 -atomic_t rdma_stat_read;
74441 -atomic_t rdma_stat_write;
74442 -atomic_t rdma_stat_sq_starve;
74443 -atomic_t rdma_stat_rq_starve;
74444 -atomic_t rdma_stat_rq_poll;
74445 -atomic_t rdma_stat_rq_prod;
74446 -atomic_t rdma_stat_sq_poll;
74447 -atomic_t rdma_stat_sq_prod;
74448 +atomic_unchecked_t rdma_stat_recv;
74449 +atomic_unchecked_t rdma_stat_read;
74450 +atomic_unchecked_t rdma_stat_write;
74451 +atomic_unchecked_t rdma_stat_sq_starve;
74452 +atomic_unchecked_t rdma_stat_rq_starve;
74453 +atomic_unchecked_t rdma_stat_rq_poll;
74454 +atomic_unchecked_t rdma_stat_rq_prod;
74455 +atomic_unchecked_t rdma_stat_sq_poll;
74456 +atomic_unchecked_t rdma_stat_sq_prod;
74457
74458 /* Temporary NFS request map and context caches */
74459 struct kmem_cache *svc_rdma_map_cachep;
74460 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *ta
74461 len -= *ppos;
74462 if (len > *lenp)
74463 len = *lenp;
74464 - if (len && copy_to_user(buffer, str_buf, len))
74465 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
74466 return -EFAULT;
74467 *lenp = len;
74468 *ppos += len;
74469 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] =
74470 {
74471 .procname = "rdma_stat_read",
74472 .data = &rdma_stat_read,
74473 - .maxlen = sizeof(atomic_t),
74474 + .maxlen = sizeof(atomic_unchecked_t),
74475 .mode = 0644,
74476 .proc_handler = read_reset_stat,
74477 },
74478 {
74479 .procname = "rdma_stat_recv",
74480 .data = &rdma_stat_recv,
74481 - .maxlen = sizeof(atomic_t),
74482 + .maxlen = sizeof(atomic_unchecked_t),
74483 .mode = 0644,
74484 .proc_handler = read_reset_stat,
74485 },
74486 {
74487 .procname = "rdma_stat_write",
74488 .data = &rdma_stat_write,
74489 - .maxlen = sizeof(atomic_t),
74490 + .maxlen = sizeof(atomic_unchecked_t),
74491 .mode = 0644,
74492 .proc_handler = read_reset_stat,
74493 },
74494 {
74495 .procname = "rdma_stat_sq_starve",
74496 .data = &rdma_stat_sq_starve,
74497 - .maxlen = sizeof(atomic_t),
74498 + .maxlen = sizeof(atomic_unchecked_t),
74499 .mode = 0644,
74500 .proc_handler = read_reset_stat,
74501 },
74502 {
74503 .procname = "rdma_stat_rq_starve",
74504 .data = &rdma_stat_rq_starve,
74505 - .maxlen = sizeof(atomic_t),
74506 + .maxlen = sizeof(atomic_unchecked_t),
74507 .mode = 0644,
74508 .proc_handler = read_reset_stat,
74509 },
74510 {
74511 .procname = "rdma_stat_rq_poll",
74512 .data = &rdma_stat_rq_poll,
74513 - .maxlen = sizeof(atomic_t),
74514 + .maxlen = sizeof(atomic_unchecked_t),
74515 .mode = 0644,
74516 .proc_handler = read_reset_stat,
74517 },
74518 {
74519 .procname = "rdma_stat_rq_prod",
74520 .data = &rdma_stat_rq_prod,
74521 - .maxlen = sizeof(atomic_t),
74522 + .maxlen = sizeof(atomic_unchecked_t),
74523 .mode = 0644,
74524 .proc_handler = read_reset_stat,
74525 },
74526 {
74527 .procname = "rdma_stat_sq_poll",
74528 .data = &rdma_stat_sq_poll,
74529 - .maxlen = sizeof(atomic_t),
74530 + .maxlen = sizeof(atomic_unchecked_t),
74531 .mode = 0644,
74532 .proc_handler = read_reset_stat,
74533 },
74534 {
74535 .procname = "rdma_stat_sq_prod",
74536 .data = &rdma_stat_sq_prod,
74537 - .maxlen = sizeof(atomic_t),
74538 + .maxlen = sizeof(atomic_unchecked_t),
74539 .mode = 0644,
74540 .proc_handler = read_reset_stat,
74541 },
74542 diff -urNp linux-3.0.7/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-3.0.7/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
74543 --- linux-3.0.7/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-07-21 22:17:23.000000000 -0400
74544 +++ linux-3.0.7/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-08-23 21:47:56.000000000 -0400
74545 @@ -499,7 +499,7 @@ next_sge:
74546 svc_rdma_put_context(ctxt, 0);
74547 goto out;
74548 }
74549 - atomic_inc(&rdma_stat_read);
74550 + atomic_inc_unchecked(&rdma_stat_read);
74551
74552 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
74553 chl_map->ch[ch_no].count -= read_wr.num_sge;
74554 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
74555 dto_q);
74556 list_del_init(&ctxt->dto_q);
74557 } else {
74558 - atomic_inc(&rdma_stat_rq_starve);
74559 + atomic_inc_unchecked(&rdma_stat_rq_starve);
74560 clear_bit(XPT_DATA, &xprt->xpt_flags);
74561 ctxt = NULL;
74562 }
74563 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
74564 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
74565 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
74566 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
74567 - atomic_inc(&rdma_stat_recv);
74568 + atomic_inc_unchecked(&rdma_stat_recv);
74569
74570 /* Build up the XDR from the receive buffers. */
74571 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
74572 diff -urNp linux-3.0.7/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-3.0.7/net/sunrpc/xprtrdma/svc_rdma_sendto.c
74573 --- linux-3.0.7/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-07-21 22:17:23.000000000 -0400
74574 +++ linux-3.0.7/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-08-23 21:47:56.000000000 -0400
74575 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdm
74576 write_wr.wr.rdma.remote_addr = to;
74577
74578 /* Post It */
74579 - atomic_inc(&rdma_stat_write);
74580 + atomic_inc_unchecked(&rdma_stat_write);
74581 if (svc_rdma_send(xprt, &write_wr))
74582 goto err;
74583 return 0;
74584 diff -urNp linux-3.0.7/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-3.0.7/net/sunrpc/xprtrdma/svc_rdma_transport.c
74585 --- linux-3.0.7/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-07-21 22:17:23.000000000 -0400
74586 +++ linux-3.0.7/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-08-23 21:47:56.000000000 -0400
74587 @@ -298,7 +298,7 @@ static void rq_cq_reap(struct svcxprt_rd
74588 return;
74589
74590 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
74591 - atomic_inc(&rdma_stat_rq_poll);
74592 + atomic_inc_unchecked(&rdma_stat_rq_poll);
74593
74594 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
74595 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
74596 @@ -320,7 +320,7 @@ static void rq_cq_reap(struct svcxprt_rd
74597 }
74598
74599 if (ctxt)
74600 - atomic_inc(&rdma_stat_rq_prod);
74601 + atomic_inc_unchecked(&rdma_stat_rq_prod);
74602
74603 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
74604 /*
74605 @@ -392,7 +392,7 @@ static void sq_cq_reap(struct svcxprt_rd
74606 return;
74607
74608 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
74609 - atomic_inc(&rdma_stat_sq_poll);
74610 + atomic_inc_unchecked(&rdma_stat_sq_poll);
74611 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
74612 if (wc.status != IB_WC_SUCCESS)
74613 /* Close the transport */
74614 @@ -410,7 +410,7 @@ static void sq_cq_reap(struct svcxprt_rd
74615 }
74616
74617 if (ctxt)
74618 - atomic_inc(&rdma_stat_sq_prod);
74619 + atomic_inc_unchecked(&rdma_stat_sq_prod);
74620 }
74621
74622 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
74623 @@ -1272,7 +1272,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
74624 spin_lock_bh(&xprt->sc_lock);
74625 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
74626 spin_unlock_bh(&xprt->sc_lock);
74627 - atomic_inc(&rdma_stat_sq_starve);
74628 + atomic_inc_unchecked(&rdma_stat_sq_starve);
74629
74630 /* See if we can opportunistically reap SQ WR to make room */
74631 sq_cq_reap(xprt);
74632 diff -urNp linux-3.0.7/net/sysctl_net.c linux-3.0.7/net/sysctl_net.c
74633 --- linux-3.0.7/net/sysctl_net.c 2011-07-21 22:17:23.000000000 -0400
74634 +++ linux-3.0.7/net/sysctl_net.c 2011-08-23 21:48:14.000000000 -0400
74635 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
74636 struct ctl_table *table)
74637 {
74638 /* Allow network administrator to have same access as root. */
74639 - if (capable(CAP_NET_ADMIN)) {
74640 + if (capable_nolog(CAP_NET_ADMIN)) {
74641 int mode = (table->mode >> 6) & 7;
74642 return (mode << 6) | (mode << 3) | mode;
74643 }
74644 diff -urNp linux-3.0.7/net/tipc/link.c linux-3.0.7/net/tipc/link.c
74645 --- linux-3.0.7/net/tipc/link.c 2011-07-21 22:17:23.000000000 -0400
74646 +++ linux-3.0.7/net/tipc/link.c 2011-10-06 04:17:55.000000000 -0400
74647 @@ -1170,7 +1170,7 @@ static int link_send_sections_long(struc
74648 struct tipc_msg fragm_hdr;
74649 struct sk_buff *buf, *buf_chain, *prev;
74650 u32 fragm_crs, fragm_rest, hsz, sect_rest;
74651 - const unchar *sect_crs;
74652 + const unchar __user *sect_crs;
74653 int curr_sect;
74654 u32 fragm_no;
74655
74656 @@ -1214,7 +1214,7 @@ again:
74657
74658 if (!sect_rest) {
74659 sect_rest = msg_sect[++curr_sect].iov_len;
74660 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
74661 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
74662 }
74663
74664 if (sect_rest < fragm_rest)
74665 @@ -1233,7 +1233,7 @@ error:
74666 }
74667 } else
74668 skb_copy_to_linear_data_offset(buf, fragm_crs,
74669 - sect_crs, sz);
74670 + (const void __force_kernel *)sect_crs, sz);
74671 sect_crs += sz;
74672 sect_rest -= sz;
74673 fragm_crs += sz;
74674 diff -urNp linux-3.0.7/net/tipc/msg.c linux-3.0.7/net/tipc/msg.c
74675 --- linux-3.0.7/net/tipc/msg.c 2011-07-21 22:17:23.000000000 -0400
74676 +++ linux-3.0.7/net/tipc/msg.c 2011-10-06 04:17:55.000000000 -0400
74677 @@ -101,7 +101,7 @@ int tipc_msg_build(struct tipc_msg *hdr,
74678 msg_sect[cnt].iov_len);
74679 else
74680 skb_copy_to_linear_data_offset(*buf, pos,
74681 - msg_sect[cnt].iov_base,
74682 + (const void __force_kernel *)msg_sect[cnt].iov_base,
74683 msg_sect[cnt].iov_len);
74684 pos += msg_sect[cnt].iov_len;
74685 }
74686 diff -urNp linux-3.0.7/net/tipc/subscr.c linux-3.0.7/net/tipc/subscr.c
74687 --- linux-3.0.7/net/tipc/subscr.c 2011-07-21 22:17:23.000000000 -0400
74688 +++ linux-3.0.7/net/tipc/subscr.c 2011-10-06 04:17:55.000000000 -0400
74689 @@ -101,7 +101,7 @@ static void subscr_send_event(struct sub
74690 {
74691 struct iovec msg_sect;
74692
74693 - msg_sect.iov_base = (void *)&sub->evt;
74694 + msg_sect.iov_base = (void __force_user *)&sub->evt;
74695 msg_sect.iov_len = sizeof(struct tipc_event);
74696
74697 sub->evt.event = htohl(event, sub->swap);
74698 diff -urNp linux-3.0.7/net/unix/af_unix.c linux-3.0.7/net/unix/af_unix.c
74699 --- linux-3.0.7/net/unix/af_unix.c 2011-07-21 22:17:23.000000000 -0400
74700 +++ linux-3.0.7/net/unix/af_unix.c 2011-08-23 21:48:14.000000000 -0400
74701 @@ -767,6 +767,12 @@ static struct sock *unix_find_other(stru
74702 err = -ECONNREFUSED;
74703 if (!S_ISSOCK(inode->i_mode))
74704 goto put_fail;
74705 +
74706 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
74707 + err = -EACCES;
74708 + goto put_fail;
74709 + }
74710 +
74711 u = unix_find_socket_byinode(inode);
74712 if (!u)
74713 goto put_fail;
74714 @@ -787,6 +793,13 @@ static struct sock *unix_find_other(stru
74715 if (u) {
74716 struct dentry *dentry;
74717 dentry = unix_sk(u)->dentry;
74718 +
74719 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
74720 + err = -EPERM;
74721 + sock_put(u);
74722 + goto fail;
74723 + }
74724 +
74725 if (dentry)
74726 touch_atime(unix_sk(u)->mnt, dentry);
74727 } else
74728 @@ -872,11 +885,18 @@ static int unix_bind(struct socket *sock
74729 err = security_path_mknod(&nd.path, dentry, mode, 0);
74730 if (err)
74731 goto out_mknod_drop_write;
74732 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
74733 + err = -EACCES;
74734 + goto out_mknod_drop_write;
74735 + }
74736 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
74737 out_mknod_drop_write:
74738 mnt_drop_write(nd.path.mnt);
74739 if (err)
74740 goto out_mknod_dput;
74741 +
74742 + gr_handle_create(dentry, nd.path.mnt);
74743 +
74744 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
74745 dput(nd.path.dentry);
74746 nd.path.dentry = dentry;
74747 diff -urNp linux-3.0.7/net/wireless/core.h linux-3.0.7/net/wireless/core.h
74748 --- linux-3.0.7/net/wireless/core.h 2011-07-21 22:17:23.000000000 -0400
74749 +++ linux-3.0.7/net/wireless/core.h 2011-08-23 21:47:56.000000000 -0400
74750 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
74751 struct mutex mtx;
74752
74753 /* rfkill support */
74754 - struct rfkill_ops rfkill_ops;
74755 + rfkill_ops_no_const rfkill_ops;
74756 struct rfkill *rfkill;
74757 struct work_struct rfkill_sync;
74758
74759 diff -urNp linux-3.0.7/net/wireless/wext-core.c linux-3.0.7/net/wireless/wext-core.c
74760 --- linux-3.0.7/net/wireless/wext-core.c 2011-07-21 22:17:23.000000000 -0400
74761 +++ linux-3.0.7/net/wireless/wext-core.c 2011-08-23 21:47:56.000000000 -0400
74762 @@ -746,8 +746,7 @@ static int ioctl_standard_iw_point(struc
74763 */
74764
74765 /* Support for very large requests */
74766 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
74767 - (user_length > descr->max_tokens)) {
74768 + if (user_length > descr->max_tokens) {
74769 /* Allow userspace to GET more than max so
74770 * we can support any size GET requests.
74771 * There is still a limit : -ENOMEM.
74772 @@ -784,22 +783,6 @@ static int ioctl_standard_iw_point(struc
74773 }
74774 }
74775
74776 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
74777 - /*
74778 - * If this is a GET, but not NOMAX, it means that the extra
74779 - * data is not bounded by userspace, but by max_tokens. Thus
74780 - * set the length to max_tokens. This matches the extra data
74781 - * allocation.
74782 - * The driver should fill it with the number of tokens it
74783 - * provided, and it may check iwp->length rather than having
74784 - * knowledge of max_tokens. If the driver doesn't change the
74785 - * iwp->length, this ioctl just copies back max_token tokens
74786 - * filled with zeroes. Hopefully the driver isn't claiming
74787 - * them to be valid data.
74788 - */
74789 - iwp->length = descr->max_tokens;
74790 - }
74791 -
74792 err = handler(dev, info, (union iwreq_data *) iwp, extra);
74793
74794 iwp->length += essid_compat;
74795 diff -urNp linux-3.0.7/net/xfrm/xfrm_policy.c linux-3.0.7/net/xfrm/xfrm_policy.c
74796 --- linux-3.0.7/net/xfrm/xfrm_policy.c 2011-07-21 22:17:23.000000000 -0400
74797 +++ linux-3.0.7/net/xfrm/xfrm_policy.c 2011-08-23 21:47:56.000000000 -0400
74798 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm
74799 {
74800 policy->walk.dead = 1;
74801
74802 - atomic_inc(&policy->genid);
74803 + atomic_inc_unchecked(&policy->genid);
74804
74805 if (del_timer(&policy->timer))
74806 xfrm_pol_put(policy);
74807 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct x
74808 hlist_add_head(&policy->bydst, chain);
74809 xfrm_pol_hold(policy);
74810 net->xfrm.policy_count[dir]++;
74811 - atomic_inc(&flow_cache_genid);
74812 + atomic_inc_unchecked(&flow_cache_genid);
74813 if (delpol)
74814 __xfrm_policy_unlink(delpol, dir);
74815 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
74816 @@ -1528,7 +1528,7 @@ free_dst:
74817 goto out;
74818 }
74819
74820 -static int inline
74821 +static inline int
74822 xfrm_dst_alloc_copy(void **target, const void *src, int size)
74823 {
74824 if (!*target) {
74825 @@ -1540,7 +1540,7 @@ xfrm_dst_alloc_copy(void **target, const
74826 return 0;
74827 }
74828
74829 -static int inline
74830 +static inline int
74831 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
74832 {
74833 #ifdef CONFIG_XFRM_SUB_POLICY
74834 @@ -1552,7 +1552,7 @@ xfrm_dst_update_parent(struct dst_entry
74835 #endif
74836 }
74837
74838 -static int inline
74839 +static inline int
74840 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
74841 {
74842 #ifdef CONFIG_XFRM_SUB_POLICY
74843 @@ -1646,7 +1646,7 @@ xfrm_resolve_and_create_bundle(struct xf
74844
74845 xdst->num_pols = num_pols;
74846 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
74847 - xdst->policy_genid = atomic_read(&pols[0]->genid);
74848 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
74849
74850 return xdst;
74851 }
74852 @@ -2333,7 +2333,7 @@ static int xfrm_bundle_ok(struct xfrm_ds
74853 if (xdst->xfrm_genid != dst->xfrm->genid)
74854 return 0;
74855 if (xdst->num_pols > 0 &&
74856 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
74857 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
74858 return 0;
74859
74860 mtu = dst_mtu(dst->child);
74861 @@ -2861,7 +2861,7 @@ static int xfrm_policy_migrate(struct xf
74862 sizeof(pol->xfrm_vec[i].saddr));
74863 pol->xfrm_vec[i].encap_family = mp->new_family;
74864 /* flush bundles */
74865 - atomic_inc(&pol->genid);
74866 + atomic_inc_unchecked(&pol->genid);
74867 }
74868 }
74869
74870 diff -urNp linux-3.0.7/net/xfrm/xfrm_user.c linux-3.0.7/net/xfrm/xfrm_user.c
74871 --- linux-3.0.7/net/xfrm/xfrm_user.c 2011-07-21 22:17:23.000000000 -0400
74872 +++ linux-3.0.7/net/xfrm/xfrm_user.c 2011-08-23 21:48:14.000000000 -0400
74873 @@ -1394,6 +1394,8 @@ static int copy_to_user_tmpl(struct xfrm
74874 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
74875 int i;
74876
74877 + pax_track_stack();
74878 +
74879 if (xp->xfrm_nr == 0)
74880 return 0;
74881
74882 @@ -2062,6 +2064,8 @@ static int xfrm_do_migrate(struct sk_buf
74883 int err;
74884 int n = 0;
74885
74886 + pax_track_stack();
74887 +
74888 if (attrs[XFRMA_MIGRATE] == NULL)
74889 return -EINVAL;
74890
74891 diff -urNp linux-3.0.7/scripts/basic/fixdep.c linux-3.0.7/scripts/basic/fixdep.c
74892 --- linux-3.0.7/scripts/basic/fixdep.c 2011-07-21 22:17:23.000000000 -0400
74893 +++ linux-3.0.7/scripts/basic/fixdep.c 2011-10-06 04:17:55.000000000 -0400
74894 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *
74895 /*
74896 * Lookup a value in the configuration string.
74897 */
74898 -static int is_defined_config(const char *name, int len, unsigned int hash)
74899 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
74900 {
74901 struct item *aux;
74902
74903 @@ -211,10 +211,10 @@ static void clear_config(void)
74904 /*
74905 * Record the use of a CONFIG_* word.
74906 */
74907 -static void use_config(const char *m, int slen)
74908 +static void use_config(const char *m, unsigned int slen)
74909 {
74910 unsigned int hash = strhash(m, slen);
74911 - int c, i;
74912 + unsigned int c, i;
74913
74914 if (is_defined_config(m, slen, hash))
74915 return;
74916 @@ -235,9 +235,9 @@ static void use_config(const char *m, in
74917
74918 static void parse_config_file(const char *map, size_t len)
74919 {
74920 - const int *end = (const int *) (map + len);
74921 + const unsigned int *end = (const unsigned int *) (map + len);
74922 /* start at +1, so that p can never be < map */
74923 - const int *m = (const int *) map + 1;
74924 + const unsigned int *m = (const unsigned int *) map + 1;
74925 const char *p, *q;
74926
74927 for (; m < end; m++) {
74928 @@ -405,7 +405,7 @@ static void print_deps(void)
74929 static void traps(void)
74930 {
74931 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
74932 - int *p = (int *)test;
74933 + unsigned int *p = (unsigned int *)test;
74934
74935 if (*p != INT_CONF) {
74936 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
74937 diff -urNp linux-3.0.7/scripts/gcc-plugin.sh linux-3.0.7/scripts/gcc-plugin.sh
74938 --- linux-3.0.7/scripts/gcc-plugin.sh 1969-12-31 19:00:00.000000000 -0500
74939 +++ linux-3.0.7/scripts/gcc-plugin.sh 2011-09-14 09:08:05.000000000 -0400
74940 @@ -0,0 +1,2 @@
74941 +#!/bin/sh
74942 +echo "#include \"gcc-plugin.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
74943 diff -urNp linux-3.0.7/scripts/Makefile.build linux-3.0.7/scripts/Makefile.build
74944 --- linux-3.0.7/scripts/Makefile.build 2011-07-21 22:17:23.000000000 -0400
74945 +++ linux-3.0.7/scripts/Makefile.build 2011-08-23 21:47:56.000000000 -0400
74946 @@ -109,7 +109,7 @@ endif
74947 endif
74948
74949 # Do not include host rules unless needed
74950 -ifneq ($(hostprogs-y)$(hostprogs-m),)
74951 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
74952 include scripts/Makefile.host
74953 endif
74954
74955 diff -urNp linux-3.0.7/scripts/Makefile.clean linux-3.0.7/scripts/Makefile.clean
74956 --- linux-3.0.7/scripts/Makefile.clean 2011-07-21 22:17:23.000000000 -0400
74957 +++ linux-3.0.7/scripts/Makefile.clean 2011-08-23 21:47:56.000000000 -0400
74958 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subd
74959 __clean-files := $(extra-y) $(always) \
74960 $(targets) $(clean-files) \
74961 $(host-progs) \
74962 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
74963 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
74964 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
74965
74966 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
74967
74968 diff -urNp linux-3.0.7/scripts/Makefile.host linux-3.0.7/scripts/Makefile.host
74969 --- linux-3.0.7/scripts/Makefile.host 2011-07-21 22:17:23.000000000 -0400
74970 +++ linux-3.0.7/scripts/Makefile.host 2011-08-23 21:47:56.000000000 -0400
74971 @@ -31,6 +31,7 @@
74972 # Note: Shared libraries consisting of C++ files are not supported
74973
74974 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
74975 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
74976
74977 # C code
74978 # Executables compiled from a single .c file
74979 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(hos
74980 # Shared libaries (only .c supported)
74981 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
74982 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
74983 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
74984 # Remove .so files from "xxx-objs"
74985 host-cobjs := $(filter-out %.so,$(host-cobjs))
74986
74987 diff -urNp linux-3.0.7/scripts/mod/file2alias.c linux-3.0.7/scripts/mod/file2alias.c
74988 --- linux-3.0.7/scripts/mod/file2alias.c 2011-07-21 22:17:23.000000000 -0400
74989 +++ linux-3.0.7/scripts/mod/file2alias.c 2011-10-06 04:17:55.000000000 -0400
74990 @@ -72,7 +72,7 @@ static void device_id_check(const char *
74991 unsigned long size, unsigned long id_size,
74992 void *symval)
74993 {
74994 - int i;
74995 + unsigned int i;
74996
74997 if (size % id_size || size < id_size) {
74998 if (cross_build != 0)
74999 @@ -102,7 +102,7 @@ static void device_id_check(const char *
75000 /* USB is special because the bcdDevice can be matched against a numeric range */
75001 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
75002 static void do_usb_entry(struct usb_device_id *id,
75003 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
75004 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
75005 unsigned char range_lo, unsigned char range_hi,
75006 unsigned char max, struct module *mod)
75007 {
75008 @@ -203,7 +203,7 @@ static void do_usb_entry_multi(struct us
75009 {
75010 unsigned int devlo, devhi;
75011 unsigned char chi, clo, max;
75012 - int ndigits;
75013 + unsigned int ndigits;
75014
75015 id->match_flags = TO_NATIVE(id->match_flags);
75016 id->idVendor = TO_NATIVE(id->idVendor);
75017 @@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *sy
75018 for (i = 0; i < count; i++) {
75019 const char *id = (char *)devs[i].id;
75020 char acpi_id[sizeof(devs[0].id)];
75021 - int j;
75022 + unsigned int j;
75023
75024 buf_printf(&mod->dev_table_buf,
75025 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
75026 @@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *sy
75027
75028 for (j = 0; j < PNP_MAX_DEVICES; j++) {
75029 const char *id = (char *)card->devs[j].id;
75030 - int i2, j2;
75031 + unsigned int i2, j2;
75032 int dup = 0;
75033
75034 if (!id[0])
75035 @@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *sy
75036 /* add an individual alias for every device entry */
75037 if (!dup) {
75038 char acpi_id[sizeof(card->devs[0].id)];
75039 - int k;
75040 + unsigned int k;
75041
75042 buf_printf(&mod->dev_table_buf,
75043 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
75044 @@ -786,7 +786,7 @@ static void dmi_ascii_filter(char *d, co
75045 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
75046 char *alias)
75047 {
75048 - int i, j;
75049 + unsigned int i, j;
75050
75051 sprintf(alias, "dmi*");
75052
75053 diff -urNp linux-3.0.7/scripts/mod/modpost.c linux-3.0.7/scripts/mod/modpost.c
75054 --- linux-3.0.7/scripts/mod/modpost.c 2011-07-21 22:17:23.000000000 -0400
75055 +++ linux-3.0.7/scripts/mod/modpost.c 2011-08-23 21:47:56.000000000 -0400
75056 @@ -892,6 +892,7 @@ enum mismatch {
75057 ANY_INIT_TO_ANY_EXIT,
75058 ANY_EXIT_TO_ANY_INIT,
75059 EXPORT_TO_INIT_EXIT,
75060 + DATA_TO_TEXT
75061 };
75062
75063 struct sectioncheck {
75064 @@ -1000,6 +1001,12 @@ const struct sectioncheck sectioncheck[]
75065 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
75066 .mismatch = EXPORT_TO_INIT_EXIT,
75067 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
75068 +},
75069 +/* Do not reference code from writable data */
75070 +{
75071 + .fromsec = { DATA_SECTIONS, NULL },
75072 + .tosec = { TEXT_SECTIONS, NULL },
75073 + .mismatch = DATA_TO_TEXT
75074 }
75075 };
75076
75077 @@ -1122,10 +1129,10 @@ static Elf_Sym *find_elf_symbol(struct e
75078 continue;
75079 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
75080 continue;
75081 - if (sym->st_value == addr)
75082 - return sym;
75083 /* Find a symbol nearby - addr are maybe negative */
75084 d = sym->st_value - addr;
75085 + if (d == 0)
75086 + return sym;
75087 if (d < 0)
75088 d = addr - sym->st_value;
75089 if (d < distance) {
75090 @@ -1404,6 +1411,14 @@ static void report_sec_mismatch(const ch
75091 tosym, prl_to, prl_to, tosym);
75092 free(prl_to);
75093 break;
75094 + case DATA_TO_TEXT:
75095 +/*
75096 + fprintf(stderr,
75097 + "The variable %s references\n"
75098 + "the %s %s%s%s\n",
75099 + fromsym, to, sec2annotation(tosec), tosym, to_p);
75100 +*/
75101 + break;
75102 }
75103 fprintf(stderr, "\n");
75104 }
75105 @@ -1629,7 +1644,7 @@ static void section_rel(const char *modn
75106 static void check_sec_ref(struct module *mod, const char *modname,
75107 struct elf_info *elf)
75108 {
75109 - int i;
75110 + unsigned int i;
75111 Elf_Shdr *sechdrs = elf->sechdrs;
75112
75113 /* Walk through all sections */
75114 @@ -1727,7 +1742,7 @@ void __attribute__((format(printf, 2, 3)
75115 va_end(ap);
75116 }
75117
75118 -void buf_write(struct buffer *buf, const char *s, int len)
75119 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
75120 {
75121 if (buf->size - buf->pos < len) {
75122 buf->size += len + SZ;
75123 @@ -1939,7 +1954,7 @@ static void write_if_changed(struct buff
75124 if (fstat(fileno(file), &st) < 0)
75125 goto close_write;
75126
75127 - if (st.st_size != b->pos)
75128 + if (st.st_size != (off_t)b->pos)
75129 goto close_write;
75130
75131 tmp = NOFAIL(malloc(b->pos));
75132 diff -urNp linux-3.0.7/scripts/mod/modpost.h linux-3.0.7/scripts/mod/modpost.h
75133 --- linux-3.0.7/scripts/mod/modpost.h 2011-07-21 22:17:23.000000000 -0400
75134 +++ linux-3.0.7/scripts/mod/modpost.h 2011-08-23 21:47:56.000000000 -0400
75135 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
75136
75137 struct buffer {
75138 char *p;
75139 - int pos;
75140 - int size;
75141 + unsigned int pos;
75142 + unsigned int size;
75143 };
75144
75145 void __attribute__((format(printf, 2, 3)))
75146 buf_printf(struct buffer *buf, const char *fmt, ...);
75147
75148 void
75149 -buf_write(struct buffer *buf, const char *s, int len);
75150 +buf_write(struct buffer *buf, const char *s, unsigned int len);
75151
75152 struct module {
75153 struct module *next;
75154 diff -urNp linux-3.0.7/scripts/mod/sumversion.c linux-3.0.7/scripts/mod/sumversion.c
75155 --- linux-3.0.7/scripts/mod/sumversion.c 2011-07-21 22:17:23.000000000 -0400
75156 +++ linux-3.0.7/scripts/mod/sumversion.c 2011-08-23 21:47:56.000000000 -0400
75157 @@ -470,7 +470,7 @@ static void write_version(const char *fi
75158 goto out;
75159 }
75160
75161 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
75162 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
75163 warn("writing sum in %s failed: %s\n",
75164 filename, strerror(errno));
75165 goto out;
75166 diff -urNp linux-3.0.7/scripts/pnmtologo.c linux-3.0.7/scripts/pnmtologo.c
75167 --- linux-3.0.7/scripts/pnmtologo.c 2011-07-21 22:17:23.000000000 -0400
75168 +++ linux-3.0.7/scripts/pnmtologo.c 2011-08-23 21:47:56.000000000 -0400
75169 @@ -237,14 +237,14 @@ static void write_header(void)
75170 fprintf(out, " * Linux logo %s\n", logoname);
75171 fputs(" */\n\n", out);
75172 fputs("#include <linux/linux_logo.h>\n\n", out);
75173 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
75174 + fprintf(out, "static unsigned char %s_data[] = {\n",
75175 logoname);
75176 }
75177
75178 static void write_footer(void)
75179 {
75180 fputs("\n};\n\n", out);
75181 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
75182 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
75183 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
75184 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
75185 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
75186 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
75187 fputs("\n};\n\n", out);
75188
75189 /* write logo clut */
75190 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
75191 + fprintf(out, "static unsigned char %s_clut[] = {\n",
75192 logoname);
75193 write_hex_cnt = 0;
75194 for (i = 0; i < logo_clutsize; i++) {
75195 diff -urNp linux-3.0.7/security/apparmor/lsm.c linux-3.0.7/security/apparmor/lsm.c
75196 --- linux-3.0.7/security/apparmor/lsm.c 2011-09-02 18:11:21.000000000 -0400
75197 +++ linux-3.0.7/security/apparmor/lsm.c 2011-08-23 21:48:14.000000000 -0400
75198 @@ -621,7 +621,7 @@ static int apparmor_task_setrlimit(struc
75199 return error;
75200 }
75201
75202 -static struct security_operations apparmor_ops = {
75203 +static struct security_operations apparmor_ops __read_only = {
75204 .name = "apparmor",
75205
75206 .ptrace_access_check = apparmor_ptrace_access_check,
75207 diff -urNp linux-3.0.7/security/commoncap.c linux-3.0.7/security/commoncap.c
75208 --- linux-3.0.7/security/commoncap.c 2011-07-21 22:17:23.000000000 -0400
75209 +++ linux-3.0.7/security/commoncap.c 2011-08-23 21:48:14.000000000 -0400
75210 @@ -28,6 +28,7 @@
75211 #include <linux/prctl.h>
75212 #include <linux/securebits.h>
75213 #include <linux/user_namespace.h>
75214 +#include <net/sock.h>
75215
75216 /*
75217 * If a non-root user executes a setuid-root binary in
75218 @@ -58,7 +59,7 @@ int cap_netlink_send(struct sock *sk, st
75219
75220 int cap_netlink_recv(struct sk_buff *skb, int cap)
75221 {
75222 - if (!cap_raised(current_cap(), cap))
75223 + if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap))
75224 return -EPERM;
75225 return 0;
75226 }
75227 @@ -575,6 +576,9 @@ int cap_bprm_secureexec(struct linux_bin
75228 {
75229 const struct cred *cred = current_cred();
75230
75231 + if (gr_acl_enable_at_secure())
75232 + return 1;
75233 +
75234 if (cred->uid != 0) {
75235 if (bprm->cap_effective)
75236 return 1;
75237 diff -urNp linux-3.0.7/security/integrity/ima/ima_api.c linux-3.0.7/security/integrity/ima/ima_api.c
75238 --- linux-3.0.7/security/integrity/ima/ima_api.c 2011-07-21 22:17:23.000000000 -0400
75239 +++ linux-3.0.7/security/integrity/ima/ima_api.c 2011-08-23 21:47:56.000000000 -0400
75240 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *ino
75241 int result;
75242
75243 /* can overflow, only indicator */
75244 - atomic_long_inc(&ima_htable.violations);
75245 + atomic_long_inc_unchecked(&ima_htable.violations);
75246
75247 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
75248 if (!entry) {
75249 diff -urNp linux-3.0.7/security/integrity/ima/ima_fs.c linux-3.0.7/security/integrity/ima/ima_fs.c
75250 --- linux-3.0.7/security/integrity/ima/ima_fs.c 2011-07-21 22:17:23.000000000 -0400
75251 +++ linux-3.0.7/security/integrity/ima/ima_fs.c 2011-08-23 21:47:56.000000000 -0400
75252 @@ -28,12 +28,12 @@
75253 static int valid_policy = 1;
75254 #define TMPBUFLEN 12
75255 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
75256 - loff_t *ppos, atomic_long_t *val)
75257 + loff_t *ppos, atomic_long_unchecked_t *val)
75258 {
75259 char tmpbuf[TMPBUFLEN];
75260 ssize_t len;
75261
75262 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
75263 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
75264 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
75265 }
75266
75267 diff -urNp linux-3.0.7/security/integrity/ima/ima.h linux-3.0.7/security/integrity/ima/ima.h
75268 --- linux-3.0.7/security/integrity/ima/ima.h 2011-07-21 22:17:23.000000000 -0400
75269 +++ linux-3.0.7/security/integrity/ima/ima.h 2011-08-23 21:47:56.000000000 -0400
75270 @@ -85,8 +85,8 @@ void ima_add_violation(struct inode *ino
75271 extern spinlock_t ima_queue_lock;
75272
75273 struct ima_h_table {
75274 - atomic_long_t len; /* number of stored measurements in the list */
75275 - atomic_long_t violations;
75276 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
75277 + atomic_long_unchecked_t violations;
75278 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
75279 };
75280 extern struct ima_h_table ima_htable;
75281 diff -urNp linux-3.0.7/security/integrity/ima/ima_queue.c linux-3.0.7/security/integrity/ima/ima_queue.c
75282 --- linux-3.0.7/security/integrity/ima/ima_queue.c 2011-07-21 22:17:23.000000000 -0400
75283 +++ linux-3.0.7/security/integrity/ima/ima_queue.c 2011-08-23 21:47:56.000000000 -0400
75284 @@ -79,7 +79,7 @@ static int ima_add_digest_entry(struct i
75285 INIT_LIST_HEAD(&qe->later);
75286 list_add_tail_rcu(&qe->later, &ima_measurements);
75287
75288 - atomic_long_inc(&ima_htable.len);
75289 + atomic_long_inc_unchecked(&ima_htable.len);
75290 key = ima_hash_key(entry->digest);
75291 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
75292 return 0;
75293 diff -urNp linux-3.0.7/security/Kconfig linux-3.0.7/security/Kconfig
75294 --- linux-3.0.7/security/Kconfig 2011-07-21 22:17:23.000000000 -0400
75295 +++ linux-3.0.7/security/Kconfig 2011-10-06 04:19:25.000000000 -0400
75296 @@ -4,6 +4,558 @@
75297
75298 menu "Security options"
75299
75300 +source grsecurity/Kconfig
75301 +
75302 +menu "PaX"
75303 +
75304 + config ARCH_TRACK_EXEC_LIMIT
75305 + bool
75306 +
75307 + config PAX_KERNEXEC_PLUGIN
75308 + bool
75309 +
75310 + config PAX_PER_CPU_PGD
75311 + bool
75312 +
75313 + config TASK_SIZE_MAX_SHIFT
75314 + int
75315 + depends on X86_64
75316 + default 47 if !PAX_PER_CPU_PGD
75317 + default 42 if PAX_PER_CPU_PGD
75318 +
75319 + config PAX_ENABLE_PAE
75320 + bool
75321 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
75322 +
75323 +config PAX
75324 + bool "Enable various PaX features"
75325 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
75326 + help
75327 + This allows you to enable various PaX features. PaX adds
75328 + intrusion prevention mechanisms to the kernel that reduce
75329 + the risks posed by exploitable memory corruption bugs.
75330 +
75331 +menu "PaX Control"
75332 + depends on PAX
75333 +
75334 +config PAX_SOFTMODE
75335 + bool 'Support soft mode'
75336 + select PAX_PT_PAX_FLAGS
75337 + help
75338 + Enabling this option will allow you to run PaX in soft mode, that
75339 + is, PaX features will not be enforced by default, only on executables
75340 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
75341 + is the only way to mark executables for soft mode use.
75342 +
75343 + Soft mode can be activated by using the "pax_softmode=1" kernel command
75344 + line option on boot. Furthermore you can control various PaX features
75345 + at runtime via the entries in /proc/sys/kernel/pax.
75346 +
75347 +config PAX_EI_PAX
75348 + bool 'Use legacy ELF header marking'
75349 + help
75350 + Enabling this option will allow you to control PaX features on
75351 + a per executable basis via the 'chpax' utility available at
75352 + http://pax.grsecurity.net/. The control flags will be read from
75353 + an otherwise reserved part of the ELF header. This marking has
75354 + numerous drawbacks (no support for soft-mode, toolchain does not
75355 + know about the non-standard use of the ELF header) therefore it
75356 + has been deprecated in favour of PT_PAX_FLAGS support.
75357 +
75358 + Note that if you enable PT_PAX_FLAGS marking support as well,
75359 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
75360 +
75361 +config PAX_PT_PAX_FLAGS
75362 + bool 'Use ELF program header marking'
75363 + help
75364 + Enabling this option will allow you to control PaX features on
75365 + a per executable basis via the 'paxctl' utility available at
75366 + http://pax.grsecurity.net/. The control flags will be read from
75367 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
75368 + has the benefits of supporting both soft mode and being fully
75369 + integrated into the toolchain (the binutils patch is available
75370 + from http://pax.grsecurity.net).
75371 +
75372 + If your toolchain does not support PT_PAX_FLAGS markings,
75373 + you can create one in most cases with 'paxctl -C'.
75374 +
75375 + Note that if you enable the legacy EI_PAX marking support as well,
75376 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
75377 +
75378 +choice
75379 + prompt 'MAC system integration'
75380 + default PAX_HAVE_ACL_FLAGS
75381 + help
75382 + Mandatory Access Control systems have the option of controlling
75383 + PaX flags on a per executable basis, choose the method supported
75384 + by your particular system.
75385 +
75386 + - "none": if your MAC system does not interact with PaX,
75387 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
75388 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
75389 +
75390 + NOTE: this option is for developers/integrators only.
75391 +
75392 + config PAX_NO_ACL_FLAGS
75393 + bool 'none'
75394 +
75395 + config PAX_HAVE_ACL_FLAGS
75396 + bool 'direct'
75397 +
75398 + config PAX_HOOK_ACL_FLAGS
75399 + bool 'hook'
75400 +endchoice
75401 +
75402 +endmenu
75403 +
75404 +menu "Non-executable pages"
75405 + depends on PAX
75406 +
75407 +config PAX_NOEXEC
75408 + bool "Enforce non-executable pages"
75409 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
75410 + help
75411 + By design some architectures do not allow for protecting memory
75412 + pages against execution or even if they do, Linux does not make
75413 + use of this feature. In practice this means that if a page is
75414 + readable (such as the stack or heap) it is also executable.
75415 +
75416 + There is a well known exploit technique that makes use of this
75417 + fact and a common programming mistake where an attacker can
75418 + introduce code of his choice somewhere in the attacked program's
75419 + memory (typically the stack or the heap) and then execute it.
75420 +
75421 + If the attacked program was running with different (typically
75422 + higher) privileges than that of the attacker, then he can elevate
75423 + his own privilege level (e.g. get a root shell, write to files for
75424 + which he does not have write access to, etc).
75425 +
75426 + Enabling this option will let you choose from various features
75427 + that prevent the injection and execution of 'foreign' code in
75428 + a program.
75429 +
75430 + This will also break programs that rely on the old behaviour and
75431 + expect that dynamically allocated memory via the malloc() family
75432 + of functions is executable (which it is not). Notable examples
75433 + are the XFree86 4.x server, the java runtime and wine.
75434 +
75435 +config PAX_PAGEEXEC
75436 + bool "Paging based non-executable pages"
75437 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
75438 + select S390_SWITCH_AMODE if S390
75439 + select S390_EXEC_PROTECT if S390
75440 + select ARCH_TRACK_EXEC_LIMIT if X86_32
75441 + help
75442 + This implementation is based on the paging feature of the CPU.
75443 + On i386 without hardware non-executable bit support there is a
75444 + variable but usually low performance impact, however on Intel's
75445 + P4 core based CPUs it is very high so you should not enable this
75446 + for kernels meant to be used on such CPUs.
75447 +
75448 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
75449 + with hardware non-executable bit support there is no performance
75450 + impact, on ppc the impact is negligible.
75451 +
75452 + Note that several architectures require various emulations due to
75453 + badly designed userland ABIs, this will cause a performance impact
75454 + but will disappear as soon as userland is fixed. For example, ppc
75455 + userland MUST have been built with secure-plt by a recent toolchain.
75456 +
75457 +config PAX_SEGMEXEC
75458 + bool "Segmentation based non-executable pages"
75459 + depends on PAX_NOEXEC && X86_32
75460 + help
75461 + This implementation is based on the segmentation feature of the
75462 + CPU and has a very small performance impact, however applications
75463 + will be limited to a 1.5 GB address space instead of the normal
75464 + 3 GB.
75465 +
75466 +config PAX_EMUTRAMP
75467 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
75468 + default y if PARISC
75469 + help
75470 + There are some programs and libraries that for one reason or
75471 + another attempt to execute special small code snippets from
75472 + non-executable memory pages. Most notable examples are the
75473 + signal handler return code generated by the kernel itself and
75474 + the GCC trampolines.
75475 +
75476 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
75477 + such programs will no longer work under your kernel.
75478 +
75479 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
75480 + utilities to enable trampoline emulation for the affected programs
75481 + yet still have the protection provided by the non-executable pages.
75482 +
75483 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
75484 + your system will not even boot.
75485 +
75486 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
75487 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
75488 + for the affected files.
75489 +
75490 + NOTE: enabling this feature *may* open up a loophole in the
75491 + protection provided by non-executable pages that an attacker
75492 + could abuse. Therefore the best solution is to not have any
75493 + files on your system that would require this option. This can
75494 + be achieved by not using libc5 (which relies on the kernel
75495 + signal handler return code) and not using or rewriting programs
75496 + that make use of the nested function implementation of GCC.
75497 + Skilled users can just fix GCC itself so that it implements
75498 + nested function calls in a way that does not interfere with PaX.
75499 +
75500 +config PAX_EMUSIGRT
75501 + bool "Automatically emulate sigreturn trampolines"
75502 + depends on PAX_EMUTRAMP && PARISC
75503 + default y
75504 + help
75505 + Enabling this option will have the kernel automatically detect
75506 + and emulate signal return trampolines executing on the stack
75507 + that would otherwise lead to task termination.
75508 +
75509 + This solution is intended as a temporary one for users with
75510 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
75511 + Modula-3 runtime, etc) or executables linked to such, basically
75512 + everything that does not specify its own SA_RESTORER function in
75513 + normal executable memory like glibc 2.1+ does.
75514 +
75515 + On parisc you MUST enable this option, otherwise your system will
75516 + not even boot.
75517 +
75518 + NOTE: this feature cannot be disabled on a per executable basis
75519 + and since it *does* open up a loophole in the protection provided
75520 + by non-executable pages, the best solution is to not have any
75521 + files on your system that would require this option.
75522 +
75523 +config PAX_MPROTECT
75524 + bool "Restrict mprotect()"
75525 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
75526 + help
75527 + Enabling this option will prevent programs from
75528 + - changing the executable status of memory pages that were
75529 + not originally created as executable,
75530 + - making read-only executable pages writable again,
75531 + - creating executable pages from anonymous memory,
75532 + - making read-only-after-relocations (RELRO) data pages writable again.
75533 +
75534 + You should say Y here to complete the protection provided by
75535 + the enforcement of non-executable pages.
75536 +
75537 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
75538 + this feature on a per file basis.
75539 +
75540 +config PAX_MPROTECT_COMPAT
75541 + bool "Use legacy/compat protection demoting (read help)"
75542 + depends on PAX_MPROTECT
75543 + default n
75544 + help
75545 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
75546 + by sending the proper error code to the application. For some broken
75547 + userland, this can cause problems with Python or other applications. The
75548 + current implementation however allows for applications like clamav to
75549 + detect if JIT compilation/execution is allowed and to fall back gracefully
75550 + to an interpreter-based mode if it does not. While we encourage everyone
75551 + to use the current implementation as-is and push upstream to fix broken
75552 + userland (note that the RWX logging option can assist with this), in some
75553 + environments this may not be possible. Having to disable MPROTECT
75554 + completely on certain binaries reduces the security benefit of PaX,
75555 + so this option is provided for those environments to revert to the old
75556 + behavior.
75557 +
75558 +config PAX_ELFRELOCS
75559 + bool "Allow ELF text relocations (read help)"
75560 + depends on PAX_MPROTECT
75561 + default n
75562 + help
75563 + Non-executable pages and mprotect() restrictions are effective
75564 + in preventing the introduction of new executable code into an
75565 + attacked task's address space. There remain only two venues
75566 + for this kind of attack: if the attacker can execute already
75567 + existing code in the attacked task then he can either have it
75568 + create and mmap() a file containing his code or have it mmap()
75569 + an already existing ELF library that does not have position
75570 + independent code in it and use mprotect() on it to make it
75571 + writable and copy his code there. While protecting against
75572 + the former approach is beyond PaX, the latter can be prevented
75573 + by having only PIC ELF libraries on one's system (which do not
75574 + need to relocate their code). If you are sure this is your case,
75575 + as is the case with all modern Linux distributions, then leave
75576 + this option disabled. You should say 'n' here.
75577 +
75578 +config PAX_ETEXECRELOCS
75579 + bool "Allow ELF ET_EXEC text relocations"
75580 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
75581 + select PAX_ELFRELOCS
75582 + default y
75583 + help
75584 + On some architectures there are incorrectly created applications
75585 + that require text relocations and would not work without enabling
75586 + this option. If you are an alpha, ia64 or parisc user, you should
75587 + enable this option and disable it once you have made sure that
75588 + none of your applications need it.
75589 +
75590 +config PAX_EMUPLT
75591 + bool "Automatically emulate ELF PLT"
75592 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
75593 + default y
75594 + help
75595 + Enabling this option will have the kernel automatically detect
75596 + and emulate the Procedure Linkage Table entries in ELF files.
75597 + On some architectures such entries are in writable memory, and
75598 + become non-executable leading to task termination. Therefore
75599 + it is mandatory that you enable this option on alpha, parisc,
75600 + sparc and sparc64, otherwise your system would not even boot.
75601 +
75602 + NOTE: this feature *does* open up a loophole in the protection
75603 + provided by the non-executable pages, therefore the proper
75604 + solution is to modify the toolchain to produce a PLT that does
75605 + not need to be writable.
75606 +
75607 +config PAX_DLRESOLVE
75608 + bool 'Emulate old glibc resolver stub'
75609 + depends on PAX_EMUPLT && SPARC
75610 + default n
75611 + help
75612 + This option is needed if userland has an old glibc (before 2.4)
75613 + that puts a 'save' instruction into the runtime generated resolver
75614 + stub that needs special emulation.
75615 +
75616 +config PAX_KERNEXEC
75617 + bool "Enforce non-executable kernel pages"
75618 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
75619 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
75620 + select PAX_KERNEXEC_PLUGIN if X86_64
75621 + help
75622 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
75623 + that is, enabling this option will make it harder to inject
75624 + and execute 'foreign' code in kernel memory itself.
75625 +
75626 + Note that on x86_64 kernels there is a known regression when
75627 + this feature and KVM/VMX are both enabled in the host kernel.
75628 +
75629 +config PAX_KERNEXEC_MODULE_TEXT
75630 + int "Minimum amount of memory reserved for module code"
75631 + default "4"
75632 + depends on PAX_KERNEXEC && X86_32 && MODULES
75633 + help
75634 + Due to implementation details the kernel must reserve a fixed
75635 + amount of memory for module code at compile time that cannot be
75636 + changed at runtime. Here you can specify the minimum amount
75637 + in MB that will be reserved. Due to the same implementation
75638 + details this size will always be rounded up to the next 2/4 MB
75639 + boundary (depends on PAE) so the actually available memory for
75640 + module code will usually be more than this minimum.
75641 +
75642 + The default 4 MB should be enough for most users but if you have
75643 + an excessive number of modules (e.g., most distribution configs
75644 + compile many drivers as modules) or use huge modules such as
75645 + nvidia's kernel driver, you will need to adjust this amount.
75646 + A good rule of thumb is to look at your currently loaded kernel
75647 + modules and add up their sizes.
75648 +
75649 +endmenu
75650 +
75651 +menu "Address Space Layout Randomization"
75652 + depends on PAX
75653 +
75654 +config PAX_ASLR
75655 + bool "Address Space Layout Randomization"
75656 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
75657 + help
75658 + Many if not most exploit techniques rely on the knowledge of
75659 + certain addresses in the attacked program. The following options
75660 + will allow the kernel to apply a certain amount of randomization
75661 + to specific parts of the program thereby forcing an attacker to
75662 + guess them in most cases. Any failed guess will most likely crash
75663 + the attacked program which allows the kernel to detect such attempts
75664 + and react on them. PaX itself provides no reaction mechanisms,
75665 + instead it is strongly encouraged that you make use of Nergal's
75666 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
75667 + (http://www.grsecurity.net/) built-in crash detection features or
75668 + develop one yourself.
75669 +
75670 + By saying Y here you can choose to randomize the following areas:
75671 + - top of the task's kernel stack
75672 + - top of the task's userland stack
75673 + - base address for mmap() requests that do not specify one
75674 + (this includes all libraries)
75675 + - base address of the main executable
75676 +
75677 + It is strongly recommended to say Y here as address space layout
75678 + randomization has negligible impact on performance yet it provides
75679 + a very effective protection.
75680 +
75681 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
75682 + this feature on a per file basis.
75683 +
75684 +config PAX_RANDKSTACK
75685 + bool "Randomize kernel stack base"
75686 + depends on X86_TSC && X86
75687 + help
75688 + By saying Y here the kernel will randomize every task's kernel
75689 + stack on every system call. This will not only force an attacker
75690 + to guess it but also prevent him from making use of possible
75691 + leaked information about it.
75692 +
75693 + Since the kernel stack is a rather scarce resource, randomization
75694 + may cause unexpected stack overflows, therefore you should very
75695 + carefully test your system. Note that once enabled in the kernel
75696 + configuration, this feature cannot be disabled on a per file basis.
75697 +
75698 +config PAX_RANDUSTACK
75699 + bool "Randomize user stack base"
75700 + depends on PAX_ASLR
75701 + help
75702 + By saying Y here the kernel will randomize every task's userland
75703 + stack. The randomization is done in two steps where the second
75704 + one may apply a big amount of shift to the top of the stack and
75705 + cause problems for programs that want to use lots of memory (more
75706 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
75707 + For this reason the second step can be controlled by 'chpax' or
75708 + 'paxctl' on a per file basis.
75709 +
75710 +config PAX_RANDMMAP
75711 + bool "Randomize mmap() base"
75712 + depends on PAX_ASLR
75713 + help
75714 + By saying Y here the kernel will use a randomized base address for
75715 + mmap() requests that do not specify one themselves. As a result
75716 + all dynamically loaded libraries will appear at random addresses
75717 + and therefore be harder to exploit by a technique where an attacker
75718 + attempts to execute library code for his purposes (e.g. spawn a
75719 + shell from an exploited program that is running at an elevated
75720 + privilege level).
75721 +
75722 + Furthermore, if a program is relinked as a dynamic ELF file, its
75723 + base address will be randomized as well, completing the full
75724 + randomization of the address space layout. Attacking such programs
75725 + becomes a guess game. You can find an example of doing this at
75726 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
75727 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
75728 +
75729 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
75730 + feature on a per file basis.
75731 +
75732 +endmenu
75733 +
75734 +menu "Miscellaneous hardening features"
75735 +
75736 +config PAX_MEMORY_SANITIZE
75737 + bool "Sanitize all freed memory"
75738 + help
75739 + By saying Y here the kernel will erase memory pages as soon as they
75740 + are freed. This in turn reduces the lifetime of data stored in the
75741 + pages, making it less likely that sensitive information such as
75742 + passwords, cryptographic secrets, etc stay in memory for too long.
75743 +
75744 + This is especially useful for programs whose runtime is short, long
75745 + lived processes and the kernel itself benefit from this as long as
75746 + they operate on whole memory pages and ensure timely freeing of pages
75747 + that may hold sensitive information.
75748 +
75749 + The tradeoff is performance impact, on a single CPU system kernel
75750 + compilation sees a 3% slowdown, other systems and workloads may vary
75751 + and you are advised to test this feature on your expected workload
75752 + before deploying it.
75753 +
75754 + Note that this feature does not protect data stored in live pages,
75755 + e.g., process memory swapped to disk may stay there for a long time.
75756 +
75757 +config PAX_MEMORY_STACKLEAK
75758 + bool "Sanitize kernel stack"
75759 + depends on X86
75760 + help
75761 + By saying Y here the kernel will erase the kernel stack before it
75762 + returns from a system call. This in turn reduces the information
75763 + that a kernel stack leak bug can reveal.
75764 +
75765 + Note that such a bug can still leak information that was put on
75766 + the stack by the current system call (the one eventually triggering
75767 + the bug) but traces of earlier system calls on the kernel stack
75768 + cannot leak anymore.
75769 +
75770 + The tradeoff is performance impact: on a single CPU system kernel
75771 + compilation sees a 1% slowdown, other systems and workloads may vary
75772 + and you are advised to test this feature on your expected workload
75773 + before deploying it.
75774 +
75775 + Note: full support for this feature requires gcc with plugin support
75776 + so make sure your compiler is at least gcc 4.5.0 (cross compilation
75777 + is not supported). Using older gcc versions means that functions
75778 + with large enough stack frames may leave uninitialized memory behind
75779 + that may be exposed to a later syscall leaking the stack.
75780 +
75781 +config PAX_MEMORY_UDEREF
75782 + bool "Prevent invalid userland pointer dereference"
75783 + depends on X86 && !UML_X86 && !XEN
75784 + select PAX_PER_CPU_PGD if X86_64
75785 + help
75786 + By saying Y here the kernel will be prevented from dereferencing
75787 + userland pointers in contexts where the kernel expects only kernel
75788 + pointers. This is both a useful runtime debugging feature and a
75789 + security measure that prevents exploiting a class of kernel bugs.
75790 +
75791 + The tradeoff is that some virtualization solutions may experience
75792 + a huge slowdown and therefore you should not enable this feature
75793 + for kernels meant to run in such environments. Whether a given VM
75794 + solution is affected or not is best determined by simply trying it
75795 + out, the performance impact will be obvious right on boot as this
75796 + mechanism engages from very early on. A good rule of thumb is that
75797 + VMs running on CPUs without hardware virtualization support (i.e.,
75798 + the majority of IA-32 CPUs) will likely experience the slowdown.
75799 +
75800 +config PAX_REFCOUNT
75801 + bool "Prevent various kernel object reference counter overflows"
75802 + depends on GRKERNSEC && (X86 || SPARC64)
75803 + help
75804 + By saying Y here the kernel will detect and prevent overflowing
75805 + various (but not all) kinds of object reference counters. Such
75806 + overflows can normally occur due to bugs only and are often, if
75807 + not always, exploitable.
75808 +
75809 + The tradeoff is that data structures protected by an overflowed
75810 + refcount will never be freed and therefore will leak memory. Note
75811 + that this leak also happens even without this protection but in
75812 + that case the overflow can eventually trigger the freeing of the
75813 + data structure while it is still being used elsewhere, resulting
75814 + in the exploitable situation that this feature prevents.
75815 +
75816 + Since this has a negligible performance impact, you should enable
75817 + this feature.
75818 +
75819 +config PAX_USERCOPY
75820 + bool "Harden heap object copies between kernel and userland"
75821 + depends on X86 || PPC || SPARC || ARM
75822 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
75823 + help
75824 + By saying Y here the kernel will enforce the size of heap objects
75825 + when they are copied in either direction between the kernel and
75826 + userland, even if only a part of the heap object is copied.
75827 +
75828 + Specifically, this checking prevents information leaking from the
75829 + kernel heap during kernel to userland copies (if the kernel heap
75830 + object is otherwise fully initialized) and prevents kernel heap
75831 + overflows during userland to kernel copies.
75832 +
75833 + Note that the current implementation provides the strictest bounds
75834 + checks for the SLUB allocator.
75835 +
75836 + Enabling this option also enables per-slab cache protection against
75837 + data in a given cache being copied into/out of via userland
75838 + accessors. Though the whitelist of regions will be reduced over
75839 + time, it notably protects important data structures like task structs.
75840 +
75841 + If frame pointers are enabled on x86, this option will also restrict
75842 + copies into and out of the kernel stack to local variables within a
75843 + single frame.
75844 +
75845 + Since this has a negligible performance impact, you should enable
75846 + this feature.
75847 +
75848 +endmenu
75849 +
75850 +endmenu
75851 +
75852 config KEYS
75853 bool "Enable access key retention support"
75854 help
75855 @@ -167,7 +719,7 @@ config INTEL_TXT
75856 config LSM_MMAP_MIN_ADDR
75857 int "Low address space for LSM to protect from user allocation"
75858 depends on SECURITY && SECURITY_SELINUX
75859 - default 32768 if ARM
75860 + default 32768 if ALPHA || ARM || PARISC || SPARC32
75861 default 65536
75862 help
75863 This is the portion of low virtual memory which should be protected
75864 diff -urNp linux-3.0.7/security/keys/compat.c linux-3.0.7/security/keys/compat.c
75865 --- linux-3.0.7/security/keys/compat.c 2011-07-21 22:17:23.000000000 -0400
75866 +++ linux-3.0.7/security/keys/compat.c 2011-10-06 04:17:55.000000000 -0400
75867 @@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
75868 if (ret == 0)
75869 goto no_payload_free;
75870
75871 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
75872 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
75873
75874 if (iov != iovstack)
75875 kfree(iov);
75876 diff -urNp linux-3.0.7/security/keys/keyctl.c linux-3.0.7/security/keys/keyctl.c
75877 --- linux-3.0.7/security/keys/keyctl.c 2011-07-21 22:17:23.000000000 -0400
75878 +++ linux-3.0.7/security/keys/keyctl.c 2011-10-06 04:17:55.000000000 -0400
75879 @@ -921,7 +921,7 @@ static int keyctl_change_reqkey_auth(str
75880 /*
75881 * Copy the iovec data from userspace
75882 */
75883 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
75884 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
75885 unsigned ioc)
75886 {
75887 for (; ioc > 0; ioc--) {
75888 @@ -943,7 +943,7 @@ static long copy_from_user_iovec(void *b
75889 * If successful, 0 will be returned.
75890 */
75891 long keyctl_instantiate_key_common(key_serial_t id,
75892 - const struct iovec *payload_iov,
75893 + const struct iovec __user *payload_iov,
75894 unsigned ioc,
75895 size_t plen,
75896 key_serial_t ringid)
75897 @@ -1038,7 +1038,7 @@ long keyctl_instantiate_key(key_serial_t
75898 [0].iov_len = plen
75899 };
75900
75901 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
75902 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
75903 }
75904
75905 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
75906 @@ -1071,7 +1071,7 @@ long keyctl_instantiate_key_iov(key_seri
75907 if (ret == 0)
75908 goto no_payload_free;
75909
75910 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
75911 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
75912
75913 if (iov != iovstack)
75914 kfree(iov);
75915 diff -urNp linux-3.0.7/security/keys/keyring.c linux-3.0.7/security/keys/keyring.c
75916 --- linux-3.0.7/security/keys/keyring.c 2011-07-21 22:17:23.000000000 -0400
75917 +++ linux-3.0.7/security/keys/keyring.c 2011-08-23 21:47:56.000000000 -0400
75918 @@ -215,15 +215,15 @@ static long keyring_read(const struct ke
75919 ret = -EFAULT;
75920
75921 for (loop = 0; loop < klist->nkeys; loop++) {
75922 + key_serial_t serial;
75923 key = klist->keys[loop];
75924 + serial = key->serial;
75925
75926 tmp = sizeof(key_serial_t);
75927 if (tmp > buflen)
75928 tmp = buflen;
75929
75930 - if (copy_to_user(buffer,
75931 - &key->serial,
75932 - tmp) != 0)
75933 + if (copy_to_user(buffer, &serial, tmp))
75934 goto error;
75935
75936 buflen -= tmp;
75937 diff -urNp linux-3.0.7/security/min_addr.c linux-3.0.7/security/min_addr.c
75938 --- linux-3.0.7/security/min_addr.c 2011-07-21 22:17:23.000000000 -0400
75939 +++ linux-3.0.7/security/min_addr.c 2011-08-23 21:48:14.000000000 -0400
75940 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
75941 */
75942 static void update_mmap_min_addr(void)
75943 {
75944 +#ifndef SPARC
75945 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
75946 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
75947 mmap_min_addr = dac_mmap_min_addr;
75948 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
75949 #else
75950 mmap_min_addr = dac_mmap_min_addr;
75951 #endif
75952 +#endif
75953 }
75954
75955 /*
75956 diff -urNp linux-3.0.7/security/security.c linux-3.0.7/security/security.c
75957 --- linux-3.0.7/security/security.c 2011-07-21 22:17:23.000000000 -0400
75958 +++ linux-3.0.7/security/security.c 2011-08-23 21:48:14.000000000 -0400
75959 @@ -25,8 +25,8 @@ static __initdata char chosen_lsm[SECURI
75960 /* things that live in capability.c */
75961 extern void __init security_fixup_ops(struct security_operations *ops);
75962
75963 -static struct security_operations *security_ops;
75964 -static struct security_operations default_security_ops = {
75965 +static struct security_operations *security_ops __read_only;
75966 +static struct security_operations default_security_ops __read_only = {
75967 .name = "default",
75968 };
75969
75970 @@ -67,7 +67,9 @@ int __init security_init(void)
75971
75972 void reset_security_ops(void)
75973 {
75974 + pax_open_kernel();
75975 security_ops = &default_security_ops;
75976 + pax_close_kernel();
75977 }
75978
75979 /* Save user chosen LSM */
75980 diff -urNp linux-3.0.7/security/selinux/hooks.c linux-3.0.7/security/selinux/hooks.c
75981 --- linux-3.0.7/security/selinux/hooks.c 2011-07-21 22:17:23.000000000 -0400
75982 +++ linux-3.0.7/security/selinux/hooks.c 2011-08-23 21:48:14.000000000 -0400
75983 @@ -93,7 +93,6 @@
75984 #define NUM_SEL_MNT_OPTS 5
75985
75986 extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
75987 -extern struct security_operations *security_ops;
75988
75989 /* SECMARK reference count */
75990 atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
75991 @@ -5454,7 +5453,7 @@ static int selinux_key_getsecurity(struc
75992
75993 #endif
75994
75995 -static struct security_operations selinux_ops = {
75996 +static struct security_operations selinux_ops __read_only = {
75997 .name = "selinux",
75998
75999 .ptrace_access_check = selinux_ptrace_access_check,
76000 diff -urNp linux-3.0.7/security/selinux/include/xfrm.h linux-3.0.7/security/selinux/include/xfrm.h
76001 --- linux-3.0.7/security/selinux/include/xfrm.h 2011-07-21 22:17:23.000000000 -0400
76002 +++ linux-3.0.7/security/selinux/include/xfrm.h 2011-08-23 21:47:56.000000000 -0400
76003 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s
76004
76005 static inline void selinux_xfrm_notify_policyload(void)
76006 {
76007 - atomic_inc(&flow_cache_genid);
76008 + atomic_inc_unchecked(&flow_cache_genid);
76009 }
76010 #else
76011 static inline int selinux_xfrm_enabled(void)
76012 diff -urNp linux-3.0.7/security/selinux/ss/services.c linux-3.0.7/security/selinux/ss/services.c
76013 --- linux-3.0.7/security/selinux/ss/services.c 2011-07-21 22:17:23.000000000 -0400
76014 +++ linux-3.0.7/security/selinux/ss/services.c 2011-08-23 21:48:14.000000000 -0400
76015 @@ -1814,6 +1814,8 @@ int security_load_policy(void *data, siz
76016 int rc = 0;
76017 struct policy_file file = { data, len }, *fp = &file;
76018
76019 + pax_track_stack();
76020 +
76021 if (!ss_initialized) {
76022 avtab_cache_init();
76023 rc = policydb_read(&policydb, fp);
76024 diff -urNp linux-3.0.7/security/smack/smack_lsm.c linux-3.0.7/security/smack/smack_lsm.c
76025 --- linux-3.0.7/security/smack/smack_lsm.c 2011-07-21 22:17:23.000000000 -0400
76026 +++ linux-3.0.7/security/smack/smack_lsm.c 2011-08-23 21:47:56.000000000 -0400
76027 @@ -3392,7 +3392,7 @@ static int smack_inode_getsecctx(struct
76028 return 0;
76029 }
76030
76031 -struct security_operations smack_ops = {
76032 +struct security_operations smack_ops __read_only = {
76033 .name = "smack",
76034
76035 .ptrace_access_check = smack_ptrace_access_check,
76036 diff -urNp linux-3.0.7/security/tomoyo/tomoyo.c linux-3.0.7/security/tomoyo/tomoyo.c
76037 --- linux-3.0.7/security/tomoyo/tomoyo.c 2011-07-21 22:17:23.000000000 -0400
76038 +++ linux-3.0.7/security/tomoyo/tomoyo.c 2011-08-23 21:47:56.000000000 -0400
76039 @@ -240,7 +240,7 @@ static int tomoyo_sb_pivotroot(struct pa
76040 * tomoyo_security_ops is a "struct security_operations" which is used for
76041 * registering TOMOYO.
76042 */
76043 -static struct security_operations tomoyo_security_ops = {
76044 +static struct security_operations tomoyo_security_ops __read_only = {
76045 .name = "tomoyo",
76046 .cred_alloc_blank = tomoyo_cred_alloc_blank,
76047 .cred_prepare = tomoyo_cred_prepare,
76048 diff -urNp linux-3.0.7/sound/aoa/codecs/onyx.c linux-3.0.7/sound/aoa/codecs/onyx.c
76049 --- linux-3.0.7/sound/aoa/codecs/onyx.c 2011-07-21 22:17:23.000000000 -0400
76050 +++ linux-3.0.7/sound/aoa/codecs/onyx.c 2011-08-23 21:47:56.000000000 -0400
76051 @@ -54,7 +54,7 @@ struct onyx {
76052 spdif_locked:1,
76053 analog_locked:1,
76054 original_mute:2;
76055 - int open_count;
76056 + local_t open_count;
76057 struct codec_info *codec_info;
76058
76059 /* mutex serializes concurrent access to the device
76060 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_i
76061 struct onyx *onyx = cii->codec_data;
76062
76063 mutex_lock(&onyx->mutex);
76064 - onyx->open_count++;
76065 + local_inc(&onyx->open_count);
76066 mutex_unlock(&onyx->mutex);
76067
76068 return 0;
76069 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_
76070 struct onyx *onyx = cii->codec_data;
76071
76072 mutex_lock(&onyx->mutex);
76073 - onyx->open_count--;
76074 - if (!onyx->open_count)
76075 + if (local_dec_and_test(&onyx->open_count))
76076 onyx->spdif_locked = onyx->analog_locked = 0;
76077 mutex_unlock(&onyx->mutex);
76078
76079 diff -urNp linux-3.0.7/sound/aoa/codecs/onyx.h linux-3.0.7/sound/aoa/codecs/onyx.h
76080 --- linux-3.0.7/sound/aoa/codecs/onyx.h 2011-07-21 22:17:23.000000000 -0400
76081 +++ linux-3.0.7/sound/aoa/codecs/onyx.h 2011-08-23 21:47:56.000000000 -0400
76082 @@ -11,6 +11,7 @@
76083 #include <linux/i2c.h>
76084 #include <asm/pmac_low_i2c.h>
76085 #include <asm/prom.h>
76086 +#include <asm/local.h>
76087
76088 /* PCM3052 register definitions */
76089
76090 diff -urNp linux-3.0.7/sound/core/oss/pcm_oss.c linux-3.0.7/sound/core/oss/pcm_oss.c
76091 --- linux-3.0.7/sound/core/oss/pcm_oss.c 2011-07-21 22:17:23.000000000 -0400
76092 +++ linux-3.0.7/sound/core/oss/pcm_oss.c 2011-10-06 04:17:55.000000000 -0400
76093 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(str
76094 if (in_kernel) {
76095 mm_segment_t fs;
76096 fs = snd_enter_user();
76097 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
76098 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
76099 snd_leave_user(fs);
76100 } else {
76101 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
76102 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
76103 }
76104 if (ret != -EPIPE && ret != -ESTRPIPE)
76105 break;
76106 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(stru
76107 if (in_kernel) {
76108 mm_segment_t fs;
76109 fs = snd_enter_user();
76110 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
76111 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
76112 snd_leave_user(fs);
76113 } else {
76114 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
76115 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
76116 }
76117 if (ret == -EPIPE) {
76118 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
76119 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct
76120 struct snd_pcm_plugin_channel *channels;
76121 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
76122 if (!in_kernel) {
76123 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
76124 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
76125 return -EFAULT;
76126 buf = runtime->oss.buffer;
76127 }
76128 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct
76129 }
76130 } else {
76131 tmp = snd_pcm_oss_write2(substream,
76132 - (const char __force *)buf,
76133 + (const char __force_kernel *)buf,
76134 runtime->oss.period_bytes, 0);
76135 if (tmp <= 0)
76136 goto err;
76137 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct
76138 struct snd_pcm_runtime *runtime = substream->runtime;
76139 snd_pcm_sframes_t frames, frames1;
76140 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
76141 - char __user *final_dst = (char __force __user *)buf;
76142 + char __user *final_dst = (char __force_user *)buf;
76143 if (runtime->oss.plugin_first) {
76144 struct snd_pcm_plugin_channel *channels;
76145 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
76146 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct
76147 xfer += tmp;
76148 runtime->oss.buffer_used -= tmp;
76149 } else {
76150 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
76151 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
76152 runtime->oss.period_bytes, 0);
76153 if (tmp <= 0)
76154 goto err;
76155 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_p
76156 size1);
76157 size1 /= runtime->channels; /* frames */
76158 fs = snd_enter_user();
76159 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
76160 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
76161 snd_leave_user(fs);
76162 }
76163 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
76164 diff -urNp linux-3.0.7/sound/core/pcm_compat.c linux-3.0.7/sound/core/pcm_compat.c
76165 --- linux-3.0.7/sound/core/pcm_compat.c 2011-09-02 18:11:21.000000000 -0400
76166 +++ linux-3.0.7/sound/core/pcm_compat.c 2011-10-06 04:17:55.000000000 -0400
76167 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(st
76168 int err;
76169
76170 fs = snd_enter_user();
76171 - err = snd_pcm_delay(substream, &delay);
76172 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
76173 snd_leave_user(fs);
76174 if (err < 0)
76175 return err;
76176 diff -urNp linux-3.0.7/sound/core/pcm_native.c linux-3.0.7/sound/core/pcm_native.c
76177 --- linux-3.0.7/sound/core/pcm_native.c 2011-07-21 22:17:23.000000000 -0400
76178 +++ linux-3.0.7/sound/core/pcm_native.c 2011-10-06 04:17:55.000000000 -0400
76179 @@ -2770,11 +2770,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_
76180 switch (substream->stream) {
76181 case SNDRV_PCM_STREAM_PLAYBACK:
76182 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
76183 - (void __user *)arg);
76184 + (void __force_user *)arg);
76185 break;
76186 case SNDRV_PCM_STREAM_CAPTURE:
76187 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
76188 - (void __user *)arg);
76189 + (void __force_user *)arg);
76190 break;
76191 default:
76192 result = -EINVAL;
76193 diff -urNp linux-3.0.7/sound/core/seq/seq_device.c linux-3.0.7/sound/core/seq/seq_device.c
76194 --- linux-3.0.7/sound/core/seq/seq_device.c 2011-07-21 22:17:23.000000000 -0400
76195 +++ linux-3.0.7/sound/core/seq/seq_device.c 2011-08-23 21:47:56.000000000 -0400
76196 @@ -63,7 +63,7 @@ struct ops_list {
76197 int argsize; /* argument size */
76198
76199 /* operators */
76200 - struct snd_seq_dev_ops ops;
76201 + struct snd_seq_dev_ops *ops;
76202
76203 /* registred devices */
76204 struct list_head dev_list; /* list of devices */
76205 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char
76206
76207 mutex_lock(&ops->reg_mutex);
76208 /* copy driver operators */
76209 - ops->ops = *entry;
76210 + ops->ops = entry;
76211 ops->driver |= DRIVER_LOADED;
76212 ops->argsize = argsize;
76213
76214 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_de
76215 dev->name, ops->id, ops->argsize, dev->argsize);
76216 return -EINVAL;
76217 }
76218 - if (ops->ops.init_device(dev) >= 0) {
76219 + if (ops->ops->init_device(dev) >= 0) {
76220 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
76221 ops->num_init_devices++;
76222 } else {
76223 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_de
76224 dev->name, ops->id, ops->argsize, dev->argsize);
76225 return -EINVAL;
76226 }
76227 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
76228 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
76229 dev->status = SNDRV_SEQ_DEVICE_FREE;
76230 dev->driver_data = NULL;
76231 ops->num_init_devices--;
76232 diff -urNp linux-3.0.7/sound/drivers/mts64.c linux-3.0.7/sound/drivers/mts64.c
76233 --- linux-3.0.7/sound/drivers/mts64.c 2011-07-21 22:17:23.000000000 -0400
76234 +++ linux-3.0.7/sound/drivers/mts64.c 2011-08-23 21:47:56.000000000 -0400
76235 @@ -28,6 +28,7 @@
76236 #include <sound/initval.h>
76237 #include <sound/rawmidi.h>
76238 #include <sound/control.h>
76239 +#include <asm/local.h>
76240
76241 #define CARD_NAME "Miditerminal 4140"
76242 #define DRIVER_NAME "MTS64"
76243 @@ -66,7 +67,7 @@ struct mts64 {
76244 struct pardevice *pardev;
76245 int pardev_claimed;
76246
76247 - int open_count;
76248 + local_t open_count;
76249 int current_midi_output_port;
76250 int current_midi_input_port;
76251 u8 mode[MTS64_NUM_INPUT_PORTS];
76252 @@ -696,7 +697,7 @@ static int snd_mts64_rawmidi_open(struct
76253 {
76254 struct mts64 *mts = substream->rmidi->private_data;
76255
76256 - if (mts->open_count == 0) {
76257 + if (local_read(&mts->open_count) == 0) {
76258 /* We don't need a spinlock here, because this is just called
76259 if the device has not been opened before.
76260 So there aren't any IRQs from the device */
76261 @@ -704,7 +705,7 @@ static int snd_mts64_rawmidi_open(struct
76262
76263 msleep(50);
76264 }
76265 - ++(mts->open_count);
76266 + local_inc(&mts->open_count);
76267
76268 return 0;
76269 }
76270 @@ -714,8 +715,7 @@ static int snd_mts64_rawmidi_close(struc
76271 struct mts64 *mts = substream->rmidi->private_data;
76272 unsigned long flags;
76273
76274 - --(mts->open_count);
76275 - if (mts->open_count == 0) {
76276 + if (local_dec_return(&mts->open_count) == 0) {
76277 /* We need the spinlock_irqsave here because we can still
76278 have IRQs at this point */
76279 spin_lock_irqsave(&mts->lock, flags);
76280 @@ -724,8 +724,8 @@ static int snd_mts64_rawmidi_close(struc
76281
76282 msleep(500);
76283
76284 - } else if (mts->open_count < 0)
76285 - mts->open_count = 0;
76286 + } else if (local_read(&mts->open_count) < 0)
76287 + local_set(&mts->open_count, 0);
76288
76289 return 0;
76290 }
76291 diff -urNp linux-3.0.7/sound/drivers/opl4/opl4_lib.c linux-3.0.7/sound/drivers/opl4/opl4_lib.c
76292 --- linux-3.0.7/sound/drivers/opl4/opl4_lib.c 2011-07-21 22:17:23.000000000 -0400
76293 +++ linux-3.0.7/sound/drivers/opl4/opl4_lib.c 2011-08-23 21:47:56.000000000 -0400
76294 @@ -28,7 +28,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@
76295 MODULE_DESCRIPTION("OPL4 driver");
76296 MODULE_LICENSE("GPL");
76297
76298 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
76299 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
76300 {
76301 int timeout = 10;
76302 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
76303 diff -urNp linux-3.0.7/sound/drivers/portman2x4.c linux-3.0.7/sound/drivers/portman2x4.c
76304 --- linux-3.0.7/sound/drivers/portman2x4.c 2011-07-21 22:17:23.000000000 -0400
76305 +++ linux-3.0.7/sound/drivers/portman2x4.c 2011-08-23 21:47:56.000000000 -0400
76306 @@ -47,6 +47,7 @@
76307 #include <sound/initval.h>
76308 #include <sound/rawmidi.h>
76309 #include <sound/control.h>
76310 +#include <asm/local.h>
76311
76312 #define CARD_NAME "Portman 2x4"
76313 #define DRIVER_NAME "portman"
76314 @@ -84,7 +85,7 @@ struct portman {
76315 struct pardevice *pardev;
76316 int pardev_claimed;
76317
76318 - int open_count;
76319 + local_t open_count;
76320 int mode[PORTMAN_NUM_INPUT_PORTS];
76321 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
76322 };
76323 diff -urNp linux-3.0.7/sound/firewire/amdtp.c linux-3.0.7/sound/firewire/amdtp.c
76324 --- linux-3.0.7/sound/firewire/amdtp.c 2011-07-21 22:17:23.000000000 -0400
76325 +++ linux-3.0.7/sound/firewire/amdtp.c 2011-08-23 21:47:56.000000000 -0400
76326 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdt
76327 ptr = s->pcm_buffer_pointer + data_blocks;
76328 if (ptr >= pcm->runtime->buffer_size)
76329 ptr -= pcm->runtime->buffer_size;
76330 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
76331 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
76332
76333 s->pcm_period_pointer += data_blocks;
76334 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
76335 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
76336 */
76337 void amdtp_out_stream_update(struct amdtp_out_stream *s)
76338 {
76339 - ACCESS_ONCE(s->source_node_id_field) =
76340 + ACCESS_ONCE_RW(s->source_node_id_field) =
76341 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
76342 }
76343 EXPORT_SYMBOL(amdtp_out_stream_update);
76344 diff -urNp linux-3.0.7/sound/firewire/amdtp.h linux-3.0.7/sound/firewire/amdtp.h
76345 --- linux-3.0.7/sound/firewire/amdtp.h 2011-07-21 22:17:23.000000000 -0400
76346 +++ linux-3.0.7/sound/firewire/amdtp.h 2011-08-23 21:47:56.000000000 -0400
76347 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_
76348 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
76349 struct snd_pcm_substream *pcm)
76350 {
76351 - ACCESS_ONCE(s->pcm) = pcm;
76352 + ACCESS_ONCE_RW(s->pcm) = pcm;
76353 }
76354
76355 /**
76356 diff -urNp linux-3.0.7/sound/firewire/isight.c linux-3.0.7/sound/firewire/isight.c
76357 --- linux-3.0.7/sound/firewire/isight.c 2011-07-21 22:17:23.000000000 -0400
76358 +++ linux-3.0.7/sound/firewire/isight.c 2011-08-23 21:47:56.000000000 -0400
76359 @@ -97,7 +97,7 @@ static void isight_update_pointers(struc
76360 ptr += count;
76361 if (ptr >= runtime->buffer_size)
76362 ptr -= runtime->buffer_size;
76363 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
76364 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
76365
76366 isight->period_counter += count;
76367 if (isight->period_counter >= runtime->period_size) {
76368 @@ -308,7 +308,7 @@ static int isight_hw_params(struct snd_p
76369 if (err < 0)
76370 return err;
76371
76372 - ACCESS_ONCE(isight->pcm_active) = true;
76373 + ACCESS_ONCE_RW(isight->pcm_active) = true;
76374
76375 return 0;
76376 }
76377 @@ -341,7 +341,7 @@ static int isight_hw_free(struct snd_pcm
76378 {
76379 struct isight *isight = substream->private_data;
76380
76381 - ACCESS_ONCE(isight->pcm_active) = false;
76382 + ACCESS_ONCE_RW(isight->pcm_active) = false;
76383
76384 mutex_lock(&isight->mutex);
76385 isight_stop_streaming(isight);
76386 @@ -434,10 +434,10 @@ static int isight_trigger(struct snd_pcm
76387
76388 switch (cmd) {
76389 case SNDRV_PCM_TRIGGER_START:
76390 - ACCESS_ONCE(isight->pcm_running) = true;
76391 + ACCESS_ONCE_RW(isight->pcm_running) = true;
76392 break;
76393 case SNDRV_PCM_TRIGGER_STOP:
76394 - ACCESS_ONCE(isight->pcm_running) = false;
76395 + ACCESS_ONCE_RW(isight->pcm_running) = false;
76396 break;
76397 default:
76398 return -EINVAL;
76399 diff -urNp linux-3.0.7/sound/isa/cmi8330.c linux-3.0.7/sound/isa/cmi8330.c
76400 --- linux-3.0.7/sound/isa/cmi8330.c 2011-07-21 22:17:23.000000000 -0400
76401 +++ linux-3.0.7/sound/isa/cmi8330.c 2011-08-23 21:47:56.000000000 -0400
76402 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
76403
76404 struct snd_pcm *pcm;
76405 struct snd_cmi8330_stream {
76406 - struct snd_pcm_ops ops;
76407 + snd_pcm_ops_no_const ops;
76408 snd_pcm_open_callback_t open;
76409 void *private_data; /* sb or wss */
76410 } streams[2];
76411 diff -urNp linux-3.0.7/sound/oss/sb_audio.c linux-3.0.7/sound/oss/sb_audio.c
76412 --- linux-3.0.7/sound/oss/sb_audio.c 2011-07-21 22:17:23.000000000 -0400
76413 +++ linux-3.0.7/sound/oss/sb_audio.c 2011-08-23 21:47:56.000000000 -0400
76414 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
76415 buf16 = (signed short *)(localbuf + localoffs);
76416 while (c)
76417 {
76418 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
76419 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
76420 if (copy_from_user(lbuf8,
76421 userbuf+useroffs + p,
76422 locallen))
76423 diff -urNp linux-3.0.7/sound/oss/swarm_cs4297a.c linux-3.0.7/sound/oss/swarm_cs4297a.c
76424 --- linux-3.0.7/sound/oss/swarm_cs4297a.c 2011-07-21 22:17:23.000000000 -0400
76425 +++ linux-3.0.7/sound/oss/swarm_cs4297a.c 2011-08-23 21:47:56.000000000 -0400
76426 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
76427 {
76428 struct cs4297a_state *s;
76429 u32 pwr, id;
76430 - mm_segment_t fs;
76431 int rval;
76432 #ifndef CONFIG_BCM_CS4297A_CSWARM
76433 u64 cfg;
76434 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
76435 if (!rval) {
76436 char *sb1250_duart_present;
76437
76438 +#if 0
76439 + mm_segment_t fs;
76440 fs = get_fs();
76441 set_fs(KERNEL_DS);
76442 -#if 0
76443 val = SOUND_MASK_LINE;
76444 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
76445 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
76446 val = initvol[i].vol;
76447 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
76448 }
76449 + set_fs(fs);
76450 // cs4297a_write_ac97(s, 0x18, 0x0808);
76451 #else
76452 // cs4297a_write_ac97(s, 0x5e, 0x180);
76453 cs4297a_write_ac97(s, 0x02, 0x0808);
76454 cs4297a_write_ac97(s, 0x18, 0x0808);
76455 #endif
76456 - set_fs(fs);
76457
76458 list_add(&s->list, &cs4297a_devs);
76459
76460 diff -urNp linux-3.0.7/sound/pci/hda/hda_codec.h linux-3.0.7/sound/pci/hda/hda_codec.h
76461 --- linux-3.0.7/sound/pci/hda/hda_codec.h 2011-07-21 22:17:23.000000000 -0400
76462 +++ linux-3.0.7/sound/pci/hda/hda_codec.h 2011-08-23 21:47:56.000000000 -0400
76463 @@ -615,7 +615,7 @@ struct hda_bus_ops {
76464 /* notify power-up/down from codec to controller */
76465 void (*pm_notify)(struct hda_bus *bus);
76466 #endif
76467 -};
76468 +} __no_const;
76469
76470 /* template to pass to the bus constructor */
76471 struct hda_bus_template {
76472 @@ -713,6 +713,7 @@ struct hda_codec_ops {
76473 #endif
76474 void (*reboot_notify)(struct hda_codec *codec);
76475 };
76476 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
76477
76478 /* record for amp information cache */
76479 struct hda_cache_head {
76480 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
76481 struct snd_pcm_substream *substream);
76482 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
76483 struct snd_pcm_substream *substream);
76484 -};
76485 +} __no_const;
76486
76487 /* PCM information for each substream */
76488 struct hda_pcm_stream {
76489 @@ -801,7 +802,7 @@ struct hda_codec {
76490 const char *modelname; /* model name for preset */
76491
76492 /* set by patch */
76493 - struct hda_codec_ops patch_ops;
76494 + hda_codec_ops_no_const patch_ops;
76495
76496 /* PCM to create, set by patch_ops.build_pcms callback */
76497 unsigned int num_pcms;
76498 diff -urNp linux-3.0.7/sound/pci/ice1712/ice1712.h linux-3.0.7/sound/pci/ice1712/ice1712.h
76499 --- linux-3.0.7/sound/pci/ice1712/ice1712.h 2011-07-21 22:17:23.000000000 -0400
76500 +++ linux-3.0.7/sound/pci/ice1712/ice1712.h 2011-08-23 21:47:56.000000000 -0400
76501 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
76502 unsigned int mask_flags; /* total mask bits */
76503 struct snd_akm4xxx_ops {
76504 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
76505 - } ops;
76506 + } __no_const ops;
76507 };
76508
76509 struct snd_ice1712_spdif {
76510 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
76511 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
76512 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
76513 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
76514 - } ops;
76515 + } __no_const ops;
76516 };
76517
76518
76519 diff -urNp linux-3.0.7/sound/pci/ymfpci/ymfpci_main.c linux-3.0.7/sound/pci/ymfpci/ymfpci_main.c
76520 --- linux-3.0.7/sound/pci/ymfpci/ymfpci_main.c 2011-07-21 22:17:23.000000000 -0400
76521 +++ linux-3.0.7/sound/pci/ymfpci/ymfpci_main.c 2011-08-23 21:47:56.000000000 -0400
76522 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
76523 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
76524 break;
76525 }
76526 - if (atomic_read(&chip->interrupt_sleep_count)) {
76527 - atomic_set(&chip->interrupt_sleep_count, 0);
76528 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
76529 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
76530 wake_up(&chip->interrupt_sleep);
76531 }
76532 __end:
76533 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
76534 continue;
76535 init_waitqueue_entry(&wait, current);
76536 add_wait_queue(&chip->interrupt_sleep, &wait);
76537 - atomic_inc(&chip->interrupt_sleep_count);
76538 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
76539 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
76540 remove_wait_queue(&chip->interrupt_sleep, &wait);
76541 }
76542 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
76543 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
76544 spin_unlock(&chip->reg_lock);
76545
76546 - if (atomic_read(&chip->interrupt_sleep_count)) {
76547 - atomic_set(&chip->interrupt_sleep_count, 0);
76548 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
76549 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
76550 wake_up(&chip->interrupt_sleep);
76551 }
76552 }
76553 @@ -2363,7 +2363,7 @@ int __devinit snd_ymfpci_create(struct s
76554 spin_lock_init(&chip->reg_lock);
76555 spin_lock_init(&chip->voice_lock);
76556 init_waitqueue_head(&chip->interrupt_sleep);
76557 - atomic_set(&chip->interrupt_sleep_count, 0);
76558 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
76559 chip->card = card;
76560 chip->pci = pci;
76561 chip->irq = -1;
76562 diff -urNp linux-3.0.7/sound/soc/soc-core.c linux-3.0.7/sound/soc/soc-core.c
76563 --- linux-3.0.7/sound/soc/soc-core.c 2011-09-02 18:11:21.000000000 -0400
76564 +++ linux-3.0.7/sound/soc/soc-core.c 2011-08-23 21:47:56.000000000 -0400
76565 @@ -1021,7 +1021,7 @@ static snd_pcm_uframes_t soc_pcm_pointer
76566 }
76567
76568 /* ASoC PCM operations */
76569 -static struct snd_pcm_ops soc_pcm_ops = {
76570 +static snd_pcm_ops_no_const soc_pcm_ops = {
76571 .open = soc_pcm_open,
76572 .close = soc_codec_close,
76573 .hw_params = soc_pcm_hw_params,
76574 @@ -2128,6 +2128,7 @@ static int soc_new_pcm(struct snd_soc_pc
76575 rtd->pcm = pcm;
76576 pcm->private_data = rtd;
76577 if (platform->driver->ops) {
76578 + /* this whole logic is broken... */
76579 soc_pcm_ops.mmap = platform->driver->ops->mmap;
76580 soc_pcm_ops.pointer = platform->driver->ops->pointer;
76581 soc_pcm_ops.ioctl = platform->driver->ops->ioctl;
76582 diff -urNp linux-3.0.7/sound/usb/card.h linux-3.0.7/sound/usb/card.h
76583 --- linux-3.0.7/sound/usb/card.h 2011-07-21 22:17:23.000000000 -0400
76584 +++ linux-3.0.7/sound/usb/card.h 2011-08-23 21:47:56.000000000 -0400
76585 @@ -44,6 +44,7 @@ struct snd_urb_ops {
76586 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
76587 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
76588 };
76589 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
76590
76591 struct snd_usb_substream {
76592 struct snd_usb_stream *stream;
76593 @@ -93,7 +94,7 @@ struct snd_usb_substream {
76594 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
76595 spinlock_t lock;
76596
76597 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
76598 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
76599 };
76600
76601 struct snd_usb_stream {
76602 diff -urNp linux-3.0.7/tools/gcc/checker_plugin.c linux-3.0.7/tools/gcc/checker_plugin.c
76603 --- linux-3.0.7/tools/gcc/checker_plugin.c 1969-12-31 19:00:00.000000000 -0500
76604 +++ linux-3.0.7/tools/gcc/checker_plugin.c 2011-10-06 04:17:55.000000000 -0400
76605 @@ -0,0 +1,169 @@
76606 +/*
76607 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
76608 + * Licensed under the GPL v2
76609 + *
76610 + * Note: the choice of the license means that the compilation process is
76611 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
76612 + * but for the kernel it doesn't matter since it doesn't link against
76613 + * any of the gcc libraries
76614 + *
76615 + * gcc plugin to implement various sparse (source code checker) features
76616 + *
76617 + * TODO:
76618 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
76619 + *
76620 + * BUGS:
76621 + * - none known
76622 + */
76623 +#include "gcc-plugin.h"
76624 +#include "config.h"
76625 +#include "system.h"
76626 +#include "coretypes.h"
76627 +#include "tree.h"
76628 +#include "tree-pass.h"
76629 +#include "intl.h"
76630 +#include "plugin-version.h"
76631 +#include "tm.h"
76632 +#include "toplev.h"
76633 +#include "basic-block.h"
76634 +#include "gimple.h"
76635 +//#include "expr.h" where are you...
76636 +#include "diagnostic.h"
76637 +#include "rtl.h"
76638 +#include "emit-rtl.h"
76639 +#include "function.h"
76640 +#include "tree-flow.h"
76641 +#include "target.h"
76642 +
76643 +extern void c_register_addr_space (const char *str, addr_space_t as);
76644 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
76645 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
76646 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
76647 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
76648 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
76649 +
76650 +extern void print_gimple_stmt(FILE *, gimple, int, int);
76651 +extern rtx emit_move_insn(rtx x, rtx y);
76652 +
76653 +int plugin_is_GPL_compatible;
76654 +
76655 +static struct plugin_info checker_plugin_info = {
76656 + .version = "201110031940",
76657 +};
76658 +
76659 +#define ADDR_SPACE_KERNEL 0
76660 +#define ADDR_SPACE_FORCE_KERNEL 1
76661 +#define ADDR_SPACE_USER 2
76662 +#define ADDR_SPACE_FORCE_USER 3
76663 +#define ADDR_SPACE_IOMEM 0
76664 +#define ADDR_SPACE_FORCE_IOMEM 0
76665 +#define ADDR_SPACE_PERCPU 0
76666 +#define ADDR_SPACE_FORCE_PERCPU 0
76667 +#define ADDR_SPACE_RCU 0
76668 +#define ADDR_SPACE_FORCE_RCU 0
76669 +
76670 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
76671 +{
76672 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
76673 +}
76674 +
76675 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
76676 +{
76677 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
76678 +}
76679 +
76680 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
76681 +{
76682 + return default_addr_space_valid_pointer_mode(mode, as);
76683 +}
76684 +
76685 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
76686 +{
76687 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
76688 +}
76689 +
76690 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
76691 +{
76692 + return default_addr_space_legitimize_address(x, oldx, mode, as);
76693 +}
76694 +
76695 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
76696 +{
76697 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
76698 + return true;
76699 +
76700 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
76701 + return true;
76702 +
76703 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
76704 + return true;
76705 +
76706 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
76707 + return true;
76708 +
76709 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
76710 + return true;
76711 +
76712 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
76713 + return true;
76714 +
76715 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
76716 + return true;
76717 +
76718 + return subset == superset;
76719 +}
76720 +
76721 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
76722 +{
76723 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
76724 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
76725 +
76726 + return op;
76727 +}
76728 +
76729 +static void register_checker_address_spaces(void *event_data, void *data)
76730 +{
76731 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
76732 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
76733 + c_register_addr_space("__user", ADDR_SPACE_USER);
76734 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
76735 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
76736 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
76737 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
76738 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
76739 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
76740 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
76741 +
76742 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
76743 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
76744 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
76745 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
76746 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
76747 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
76748 + targetm.addr_space.convert = checker_addr_space_convert;
76749 +}
76750 +
76751 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
76752 +{
76753 + const char * const plugin_name = plugin_info->base_name;
76754 + const int argc = plugin_info->argc;
76755 + const struct plugin_argument * const argv = plugin_info->argv;
76756 + int i;
76757 +
76758 + if (!plugin_default_version_check(version, &gcc_version)) {
76759 + error(G_("incompatible gcc/plugin versions"));
76760 + return 1;
76761 + }
76762 +
76763 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
76764 +
76765 + for (i = 0; i < argc; ++i)
76766 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
76767 +
76768 + if (TARGET_64BIT == 0)
76769 + return 0;
76770 +
76771 + register_callback (plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
76772 +
76773 + return 0;
76774 +}
76775 diff -urNp linux-3.0.7/tools/gcc/constify_plugin.c linux-3.0.7/tools/gcc/constify_plugin.c
76776 --- linux-3.0.7/tools/gcc/constify_plugin.c 1969-12-31 19:00:00.000000000 -0500
76777 +++ linux-3.0.7/tools/gcc/constify_plugin.c 2011-08-30 18:23:52.000000000 -0400
76778 @@ -0,0 +1,293 @@
76779 +/*
76780 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
76781 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
76782 + * Licensed under the GPL v2, or (at your option) v3
76783 + *
76784 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
76785 + *
76786 + * Homepage:
76787 + * http://www.grsecurity.net/~ephox/const_plugin/
76788 + *
76789 + * Usage:
76790 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
76791 + * $ gcc -fplugin=constify_plugin.so test.c -O2
76792 + */
76793 +
76794 +#include "gcc-plugin.h"
76795 +#include "config.h"
76796 +#include "system.h"
76797 +#include "coretypes.h"
76798 +#include "tree.h"
76799 +#include "tree-pass.h"
76800 +#include "intl.h"
76801 +#include "plugin-version.h"
76802 +#include "tm.h"
76803 +#include "toplev.h"
76804 +#include "function.h"
76805 +#include "tree-flow.h"
76806 +#include "plugin.h"
76807 +#include "diagnostic.h"
76808 +//#include "c-tree.h"
76809 +
76810 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
76811 +
76812 +int plugin_is_GPL_compatible;
76813 +
76814 +static struct plugin_info const_plugin_info = {
76815 + .version = "20110826",
76816 + .help = "no-constify\tturn off constification\n",
76817 +};
76818 +
76819 +static void constify_type(tree type);
76820 +static bool walk_struct(tree node);
76821 +
76822 +static tree deconstify_type(tree old_type)
76823 +{
76824 + tree new_type, field;
76825 +
76826 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
76827 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
76828 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
76829 + DECL_FIELD_CONTEXT(field) = new_type;
76830 + TYPE_READONLY(new_type) = 0;
76831 + C_TYPE_FIELDS_READONLY(new_type) = 0;
76832 + return new_type;
76833 +}
76834 +
76835 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
76836 +{
76837 + tree type;
76838 +
76839 + *no_add_attrs = true;
76840 + if (TREE_CODE(*node) == FUNCTION_DECL) {
76841 + error("%qE attribute does not apply to functions", name);
76842 + return NULL_TREE;
76843 + }
76844 +
76845 + if (TREE_CODE(*node) == VAR_DECL) {
76846 + error("%qE attribute does not apply to variables", name);
76847 + return NULL_TREE;
76848 + }
76849 +
76850 + if (TYPE_P(*node)) {
76851 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
76852 + *no_add_attrs = false;
76853 + else
76854 + error("%qE attribute applies to struct and union types only", name);
76855 + return NULL_TREE;
76856 + }
76857 +
76858 + type = TREE_TYPE(*node);
76859 +
76860 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
76861 + error("%qE attribute applies to struct and union types only", name);
76862 + return NULL_TREE;
76863 + }
76864 +
76865 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
76866 + error("%qE attribute is already applied to the type", name);
76867 + return NULL_TREE;
76868 + }
76869 +
76870 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
76871 + error("%qE attribute used on type that is not constified", name);
76872 + return NULL_TREE;
76873 + }
76874 +
76875 + if (TREE_CODE(*node) == TYPE_DECL) {
76876 + TREE_TYPE(*node) = deconstify_type(type);
76877 + TREE_READONLY(*node) = 0;
76878 + return NULL_TREE;
76879 + }
76880 +
76881 + return NULL_TREE;
76882 +}
76883 +
76884 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
76885 +{
76886 + *no_add_attrs = true;
76887 + if (!TYPE_P(*node)) {
76888 + error("%qE attribute applies to types only", name);
76889 + return NULL_TREE;
76890 + }
76891 +
76892 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
76893 + error("%qE attribute applies to struct and union types only", name);
76894 + return NULL_TREE;
76895 + }
76896 +
76897 + *no_add_attrs = false;
76898 + constify_type(*node);
76899 + return NULL_TREE;
76900 +}
76901 +
76902 +static struct attribute_spec no_const_attr = {
76903 + .name = "no_const",
76904 + .min_length = 0,
76905 + .max_length = 0,
76906 + .decl_required = false,
76907 + .type_required = false,
76908 + .function_type_required = false,
76909 + .handler = handle_no_const_attribute
76910 +};
76911 +
76912 +static struct attribute_spec do_const_attr = {
76913 + .name = "do_const",
76914 + .min_length = 0,
76915 + .max_length = 0,
76916 + .decl_required = false,
76917 + .type_required = false,
76918 + .function_type_required = false,
76919 + .handler = handle_do_const_attribute
76920 +};
76921 +
76922 +static void register_attributes(void *event_data, void *data)
76923 +{
76924 + register_attribute(&no_const_attr);
76925 + register_attribute(&do_const_attr);
76926 +}
76927 +
76928 +static void constify_type(tree type)
76929 +{
76930 + TYPE_READONLY(type) = 1;
76931 + C_TYPE_FIELDS_READONLY(type) = 1;
76932 +}
76933 +
76934 +static bool is_fptr(tree field)
76935 +{
76936 + tree ptr = TREE_TYPE(field);
76937 +
76938 + if (TREE_CODE(ptr) != POINTER_TYPE)
76939 + return false;
76940 +
76941 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
76942 +}
76943 +
76944 +static bool walk_struct(tree node)
76945 +{
76946 + tree field;
76947 +
76948 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
76949 + return false;
76950 +
76951 + if (TYPE_FIELDS(node) == NULL_TREE)
76952 + return false;
76953 +
76954 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
76955 + tree type = TREE_TYPE(field);
76956 + enum tree_code code = TREE_CODE(type);
76957 + if (code == RECORD_TYPE || code == UNION_TYPE) {
76958 + if (!(walk_struct(type)))
76959 + return false;
76960 + } else if (!is_fptr(field) && !TREE_READONLY(field))
76961 + return false;
76962 + }
76963 + return true;
76964 +}
76965 +
76966 +static void finish_type(void *event_data, void *data)
76967 +{
76968 + tree type = (tree)event_data;
76969 +
76970 + if (type == NULL_TREE)
76971 + return;
76972 +
76973 + if (TYPE_READONLY(type))
76974 + return;
76975 +
76976 + if (walk_struct(type))
76977 + constify_type(type);
76978 +}
76979 +
76980 +static unsigned int check_local_variables(void);
76981 +
76982 +struct gimple_opt_pass pass_local_variable = {
76983 + {
76984 + .type = GIMPLE_PASS,
76985 + .name = "check_local_variables",
76986 + .gate = NULL,
76987 + .execute = check_local_variables,
76988 + .sub = NULL,
76989 + .next = NULL,
76990 + .static_pass_number = 0,
76991 + .tv_id = TV_NONE,
76992 + .properties_required = 0,
76993 + .properties_provided = 0,
76994 + .properties_destroyed = 0,
76995 + .todo_flags_start = 0,
76996 + .todo_flags_finish = 0
76997 + }
76998 +};
76999 +
77000 +static unsigned int check_local_variables(void)
77001 +{
77002 + tree var;
77003 + referenced_var_iterator rvi;
77004 +
77005 +#if __GNUC__ == 4 && __GNUC_MINOR__ == 5
77006 + FOR_EACH_REFERENCED_VAR(var, rvi) {
77007 +#else
77008 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
77009 +#endif
77010 + tree type = TREE_TYPE(var);
77011 +
77012 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
77013 + continue;
77014 +
77015 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
77016 + continue;
77017 +
77018 + if (!TYPE_READONLY(type))
77019 + continue;
77020 +
77021 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
77022 +// continue;
77023 +
77024 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
77025 +// continue;
77026 +
77027 + if (walk_struct(type)) {
77028 + error("constified variable %qE cannot be local", var);
77029 + return 1;
77030 + }
77031 + }
77032 + return 0;
77033 +}
77034 +
77035 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
77036 +{
77037 + const char * const plugin_name = plugin_info->base_name;
77038 + const int argc = plugin_info->argc;
77039 + const struct plugin_argument * const argv = plugin_info->argv;
77040 + int i;
77041 + bool constify = true;
77042 +
77043 + struct register_pass_info local_variable_pass_info = {
77044 + .pass = &pass_local_variable.pass,
77045 + .reference_pass_name = "*referenced_vars",
77046 + .ref_pass_instance_number = 0,
77047 + .pos_op = PASS_POS_INSERT_AFTER
77048 + };
77049 +
77050 + if (!plugin_default_version_check(version, &gcc_version)) {
77051 + error(G_("incompatible gcc/plugin versions"));
77052 + return 1;
77053 + }
77054 +
77055 + for (i = 0; i < argc; ++i) {
77056 + if (!(strcmp(argv[i].key, "no-constify"))) {
77057 + constify = false;
77058 + continue;
77059 + }
77060 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
77061 + }
77062 +
77063 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
77064 + if (constify) {
77065 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
77066 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
77067 + }
77068 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
77069 +
77070 + return 0;
77071 +}
77072 diff -urNp linux-3.0.7/tools/gcc/kallocstat_plugin.c linux-3.0.7/tools/gcc/kallocstat_plugin.c
77073 --- linux-3.0.7/tools/gcc/kallocstat_plugin.c 1969-12-31 19:00:00.000000000 -0500
77074 +++ linux-3.0.7/tools/gcc/kallocstat_plugin.c 2011-10-06 04:17:55.000000000 -0400
77075 @@ -0,0 +1,165 @@
77076 +/*
77077 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
77078 + * Licensed under the GPL v2
77079 + *
77080 + * Note: the choice of the license means that the compilation process is
77081 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
77082 + * but for the kernel it doesn't matter since it doesn't link against
77083 + * any of the gcc libraries
77084 + *
77085 + * gcc plugin to find the distribution of k*alloc sizes
77086 + *
77087 + * TODO:
77088 + *
77089 + * BUGS:
77090 + * - none known
77091 + */
77092 +#include "gcc-plugin.h"
77093 +#include "config.h"
77094 +#include "system.h"
77095 +#include "coretypes.h"
77096 +#include "tree.h"
77097 +#include "tree-pass.h"
77098 +#include "intl.h"
77099 +#include "plugin-version.h"
77100 +#include "tm.h"
77101 +#include "toplev.h"
77102 +#include "basic-block.h"
77103 +#include "gimple.h"
77104 +//#include "expr.h" where are you...
77105 +#include "diagnostic.h"
77106 +#include "rtl.h"
77107 +#include "emit-rtl.h"
77108 +#include "function.h"
77109 +
77110 +extern void print_gimple_stmt(FILE *, gimple, int, int);
77111 +
77112 +int plugin_is_GPL_compatible;
77113 +
77114 +static const char * const kalloc_functions[] = {
77115 + "__kmalloc",
77116 + "kmalloc",
77117 + "kmalloc_large",
77118 + "kmalloc_node",
77119 + "kmalloc_order",
77120 + "kmalloc_order_trace",
77121 + "kmalloc_slab",
77122 + "kzalloc",
77123 + "kzalloc_node",
77124 +};
77125 +
77126 +static struct plugin_info kallocstat_plugin_info = {
77127 + .version = "201109121100",
77128 +};
77129 +
77130 +static unsigned int execute_kallocstat(void);
77131 +
77132 +static struct gimple_opt_pass kallocstat_pass = {
77133 + .pass = {
77134 + .type = GIMPLE_PASS,
77135 + .name = "kallocstat",
77136 + .gate = NULL,
77137 + .execute = execute_kallocstat,
77138 + .sub = NULL,
77139 + .next = NULL,
77140 + .static_pass_number = 0,
77141 + .tv_id = TV_NONE,
77142 + .properties_required = 0,
77143 + .properties_provided = 0,
77144 + .properties_destroyed = 0,
77145 + .todo_flags_start = 0,
77146 + .todo_flags_finish = 0
77147 + }
77148 +};
77149 +
77150 +static bool is_kalloc(const char *fnname)
77151 +{
77152 + size_t i;
77153 +
77154 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
77155 + if (!strcmp(fnname, kalloc_functions[i]))
77156 + return true;
77157 + return false;
77158 +}
77159 +
77160 +static unsigned int execute_kallocstat(void)
77161 +{
77162 + basic_block bb;
77163 +
77164 + // 1. loop through BBs and GIMPLE statements
77165 + FOR_EACH_BB(bb) {
77166 + gimple_stmt_iterator gsi;
77167 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
77168 + // gimple match:
77169 + tree fndecl, size;
77170 + gimple call_stmt;
77171 + const char *fnname;
77172 +
77173 + // is it a call
77174 + call_stmt = gsi_stmt(gsi);
77175 + if (!is_gimple_call(call_stmt))
77176 + continue;
77177 + fndecl = gimple_call_fndecl(call_stmt);
77178 + if (fndecl == NULL_TREE)
77179 + continue;
77180 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
77181 + continue;
77182 +
77183 + // is it a call to k*alloc
77184 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
77185 + if (!is_kalloc(fnname))
77186 + continue;
77187 +
77188 + // is the size arg the result of a simple const assignment
77189 + size = gimple_call_arg(call_stmt, 0);
77190 + while (true) {
77191 + gimple def_stmt;
77192 + expanded_location xloc;
77193 + size_t size_val;
77194 +
77195 + if (TREE_CODE(size) != SSA_NAME)
77196 + break;
77197 + def_stmt = SSA_NAME_DEF_STMT(size);
77198 + if (!def_stmt || !is_gimple_assign(def_stmt))
77199 + break;
77200 + if (gimple_num_ops(def_stmt) != 2)
77201 + break;
77202 + size = gimple_assign_rhs1(def_stmt);
77203 + if (!TREE_CONSTANT(size))
77204 + continue;
77205 + xloc = expand_location(gimple_location(def_stmt));
77206 + if (!xloc.file)
77207 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
77208 + size_val = TREE_INT_CST_LOW(size);
77209 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
77210 + break;
77211 + }
77212 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
77213 +//debug_tree(gimple_call_fn(call_stmt));
77214 +//print_node(stderr, "pax", fndecl, 4);
77215 + }
77216 + }
77217 +
77218 + return 0;
77219 +}
77220 +
77221 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
77222 +{
77223 + const char * const plugin_name = plugin_info->base_name;
77224 + struct register_pass_info kallocstat_pass_info = {
77225 + .pass = &kallocstat_pass.pass,
77226 + .reference_pass_name = "ssa",
77227 + .ref_pass_instance_number = 0,
77228 + .pos_op = PASS_POS_INSERT_AFTER
77229 + };
77230 +
77231 + if (!plugin_default_version_check(version, &gcc_version)) {
77232 + error(G_("incompatible gcc/plugin versions"));
77233 + return 1;
77234 + }
77235 +
77236 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
77237 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
77238 +
77239 + return 0;
77240 +}
77241 diff -urNp linux-3.0.7/tools/gcc/kernexec_plugin.c linux-3.0.7/tools/gcc/kernexec_plugin.c
77242 --- linux-3.0.7/tools/gcc/kernexec_plugin.c 1969-12-31 19:00:00.000000000 -0500
77243 +++ linux-3.0.7/tools/gcc/kernexec_plugin.c 2011-10-06 04:17:55.000000000 -0400
77244 @@ -0,0 +1,273 @@
77245 +/*
77246 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
77247 + * Licensed under the GPL v2
77248 + *
77249 + * Note: the choice of the license means that the compilation process is
77250 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
77251 + * but for the kernel it doesn't matter since it doesn't link against
77252 + * any of the gcc libraries
77253 + *
77254 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
77255 + *
77256 + * TODO:
77257 + *
77258 + * BUGS:
77259 + * - none known
77260 + */
77261 +#include "gcc-plugin.h"
77262 +#include "config.h"
77263 +#include "system.h"
77264 +#include "coretypes.h"
77265 +#include "tree.h"
77266 +#include "tree-pass.h"
77267 +#include "intl.h"
77268 +#include "plugin-version.h"
77269 +#include "tm.h"
77270 +#include "toplev.h"
77271 +#include "basic-block.h"
77272 +#include "gimple.h"
77273 +//#include "expr.h" where are you...
77274 +#include "diagnostic.h"
77275 +#include "rtl.h"
77276 +#include "emit-rtl.h"
77277 +#include "function.h"
77278 +#include "tree-flow.h"
77279 +
77280 +extern void print_gimple_stmt(FILE *, gimple, int, int);
77281 +extern rtx emit_move_insn(rtx x, rtx y);
77282 +
77283 +int plugin_is_GPL_compatible;
77284 +
77285 +static struct plugin_info kernexec_plugin_info = {
77286 + .version = "201110032145",
77287 +};
77288 +
77289 +static unsigned int execute_kernexec_fptr(void);
77290 +static unsigned int execute_kernexec_retaddr(void);
77291 +static bool kernexec_cmodel_check(void);
77292 +
77293 +static struct gimple_opt_pass kernexec_fptr_pass = {
77294 + .pass = {
77295 + .type = GIMPLE_PASS,
77296 + .name = "kernexec_fptr",
77297 + .gate = kernexec_cmodel_check,
77298 + .execute = execute_kernexec_fptr,
77299 + .sub = NULL,
77300 + .next = NULL,
77301 + .static_pass_number = 0,
77302 + .tv_id = TV_NONE,
77303 + .properties_required = 0,
77304 + .properties_provided = 0,
77305 + .properties_destroyed = 0,
77306 + .todo_flags_start = 0,
77307 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
77308 + }
77309 +};
77310 +
77311 +static struct rtl_opt_pass kernexec_retaddr_pass = {
77312 + .pass = {
77313 + .type = RTL_PASS,
77314 + .name = "kernexec_retaddr",
77315 + .gate = kernexec_cmodel_check,
77316 + .execute = execute_kernexec_retaddr,
77317 + .sub = NULL,
77318 + .next = NULL,
77319 + .static_pass_number = 0,
77320 + .tv_id = TV_NONE,
77321 + .properties_required = 0,
77322 + .properties_provided = 0,
77323 + .properties_destroyed = 0,
77324 + .todo_flags_start = 0,
77325 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
77326 + }
77327 +};
77328 +
77329 +static bool kernexec_cmodel_check(void)
77330 +{
77331 + tree section;
77332 +
77333 + if (ix86_cmodel != CM_KERNEL)
77334 + return false;
77335 +
77336 + section = lookup_attribute("__section__", DECL_ATTRIBUTES(current_function_decl));
77337 + if (!section || !TREE_VALUE(section))
77338 + return true;
77339 +
77340 + section = TREE_VALUE(TREE_VALUE(section));
77341 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
77342 + return true;
77343 +
77344 + return false;
77345 +}
77346 +
77347 +/*
77348 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
77349 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
77350 + */
77351 +static void kernexec_instrument_fptr(gimple_stmt_iterator gsi)
77352 +{
77353 + gimple assign_intptr, assign_new_fptr, call_stmt;
77354 + tree intptr, old_fptr, new_fptr, kernexec_mask;
77355 +
77356 + call_stmt = gsi_stmt(gsi);
77357 + old_fptr = gimple_call_fn(call_stmt);
77358 +
77359 + // create temporary unsigned long variable used for bitops and cast fptr to it
77360 + intptr = create_tmp_var(long_unsigned_type_node, NULL);
77361 + add_referenced_var(intptr);
77362 + mark_sym_for_renaming(intptr);
77363 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
77364 + update_stmt(assign_intptr);
77365 + gsi_insert_before(&gsi, assign_intptr, GSI_SAME_STMT);
77366 +
77367 + // apply logical or to temporary unsigned long and bitmask
77368 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
77369 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
77370 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
77371 + update_stmt(assign_intptr);
77372 + gsi_insert_before(&gsi, assign_intptr, GSI_SAME_STMT);
77373 +
77374 + // cast temporary unsigned long back to a temporary fptr variable
77375 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), NULL);
77376 + add_referenced_var(new_fptr);
77377 + mark_sym_for_renaming(new_fptr);
77378 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
77379 + update_stmt(assign_new_fptr);
77380 + gsi_insert_before(&gsi, assign_new_fptr, GSI_SAME_STMT);
77381 +
77382 + // replace call stmt fn with the new fptr
77383 + gimple_call_set_fn(call_stmt, new_fptr);
77384 + update_stmt(call_stmt);
77385 +}
77386 +
77387 +/*
77388 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
77389 + */
77390 +static unsigned int execute_kernexec_fptr(void)
77391 +{
77392 + basic_block bb;
77393 + gimple_stmt_iterator gsi;
77394 +
77395 + // 1. loop through BBs and GIMPLE statements
77396 + FOR_EACH_BB(bb) {
77397 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
77398 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
77399 + tree fn;
77400 + gimple call_stmt;
77401 +
77402 + // is it a call ...
77403 + call_stmt = gsi_stmt(gsi);
77404 + if (!is_gimple_call(call_stmt))
77405 + continue;
77406 + fn = gimple_call_fn(call_stmt);
77407 + if (TREE_CODE(fn) == ADDR_EXPR)
77408 + continue;
77409 + if (TREE_CODE(fn) != SSA_NAME)
77410 + gcc_unreachable();
77411 +
77412 + // ... through a function pointer
77413 + fn = SSA_NAME_VAR(fn);
77414 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
77415 + continue;
77416 + fn = TREE_TYPE(fn);
77417 + if (TREE_CODE(fn) != POINTER_TYPE)
77418 + continue;
77419 + fn = TREE_TYPE(fn);
77420 + if (TREE_CODE(fn) != FUNCTION_TYPE)
77421 + continue;
77422 +
77423 + kernexec_instrument_fptr(gsi);
77424 +
77425 +//debug_tree(gimple_call_fn(call_stmt));
77426 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
77427 + }
77428 + }
77429 +
77430 + return 0;
77431 +}
77432 +
77433 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
77434 +static void kernexec_instrument_retaddr(rtx insn)
77435 +{
77436 + rtx btsq;
77437 + rtvec argvec, constraintvec, labelvec;
77438 + int line;
77439 +
77440 + // create asm volatile("btsq $63,(%%rsp)":::)
77441 + argvec = rtvec_alloc(0);
77442 + constraintvec = rtvec_alloc(0);
77443 + labelvec = rtvec_alloc(0);
77444 + line = expand_location(RTL_LOCATION(insn)).line;
77445 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
77446 + MEM_VOLATILE_P(btsq) = 1;
77447 + RTX_FRAME_RELATED_P(btsq) = 1;
77448 + emit_insn_before(btsq, insn);
77449 +}
77450 +
77451 +/*
77452 + * find all asm level function returns and forcibly set the highest bit of the return address
77453 + */
77454 +static unsigned int execute_kernexec_retaddr(void)
77455 +{
77456 + rtx insn;
77457 +
77458 + // 1. find function returns
77459 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
77460 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
77461 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
77462 + rtx body;
77463 +
77464 + // is it a retn
77465 + if (!JUMP_P(insn))
77466 + continue;
77467 + body = PATTERN(insn);
77468 + if (GET_CODE(body) == PARALLEL)
77469 + body = XVECEXP(body, 0, 0);
77470 + if (GET_CODE(body) != RETURN)
77471 + continue;
77472 + kernexec_instrument_retaddr(insn);
77473 + }
77474 +
77475 +// print_simple_rtl(stderr, get_insns());
77476 +// print_rtl(stderr, get_insns());
77477 +
77478 + return 0;
77479 +}
77480 +
77481 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
77482 +{
77483 + const char * const plugin_name = plugin_info->base_name;
77484 + const int argc = plugin_info->argc;
77485 + const struct plugin_argument * const argv = plugin_info->argv;
77486 + int i;
77487 + struct register_pass_info kernexec_fptr_pass_info = {
77488 + .pass = &kernexec_fptr_pass.pass,
77489 + .reference_pass_name = "ssa",
77490 + .ref_pass_instance_number = 0,
77491 + .pos_op = PASS_POS_INSERT_AFTER
77492 + };
77493 + struct register_pass_info kernexec_retaddr_pass_info = {
77494 + .pass = &kernexec_retaddr_pass.pass,
77495 + .reference_pass_name = "pro_and_epilogue",
77496 + .ref_pass_instance_number = 0,
77497 + .pos_op = PASS_POS_INSERT_AFTER
77498 + };
77499 +
77500 + if (!plugin_default_version_check(version, &gcc_version)) {
77501 + error(G_("incompatible gcc/plugin versions"));
77502 + return 1;
77503 + }
77504 +
77505 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
77506 +
77507 + for (i = 0; i < argc; ++i)
77508 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
77509 +
77510 + if (TARGET_64BIT == 0)
77511 + return 0;
77512 +
77513 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
77514 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
77515 +
77516 + return 0;
77517 +}
77518 diff -urNp linux-3.0.7/tools/gcc/Makefile linux-3.0.7/tools/gcc/Makefile
77519 --- linux-3.0.7/tools/gcc/Makefile 1969-12-31 19:00:00.000000000 -0500
77520 +++ linux-3.0.7/tools/gcc/Makefile 2011-10-06 04:17:55.000000000 -0400
77521 @@ -0,0 +1,21 @@
77522 +#CC := gcc
77523 +#PLUGIN_SOURCE_FILES := pax_plugin.c
77524 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
77525 +GCCPLUGINS_DIR := $(shell $(HOSTCC) -print-file-name=plugin)
77526 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
77527 +
77528 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
77529 +
77530 +hostlibs-y := constify_plugin.so
77531 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
77532 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
77533 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
77534 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
77535 +
77536 +always := $(hostlibs-y)
77537 +
77538 +stackleak_plugin-objs := stackleak_plugin.o
77539 +constify_plugin-objs := constify_plugin.o
77540 +kallocstat_plugin-objs := kallocstat_plugin.o
77541 +kernexec_plugin-objs := kernexec_plugin.o
77542 +checker_plugin-objs := checker_plugin.o
77543 diff -urNp linux-3.0.7/tools/gcc/stackleak_plugin.c linux-3.0.7/tools/gcc/stackleak_plugin.c
77544 --- linux-3.0.7/tools/gcc/stackleak_plugin.c 1969-12-31 19:00:00.000000000 -0500
77545 +++ linux-3.0.7/tools/gcc/stackleak_plugin.c 2011-09-17 00:53:44.000000000 -0400
77546 @@ -0,0 +1,251 @@
77547 +/*
77548 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
77549 + * Licensed under the GPL v2
77550 + *
77551 + * Note: the choice of the license means that the compilation process is
77552 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
77553 + * but for the kernel it doesn't matter since it doesn't link against
77554 + * any of the gcc libraries
77555 + *
77556 + * gcc plugin to help implement various PaX features
77557 + *
77558 + * - track lowest stack pointer
77559 + *
77560 + * TODO:
77561 + * - initialize all local variables
77562 + *
77563 + * BUGS:
77564 + * - none known
77565 + */
77566 +#include "gcc-plugin.h"
77567 +#include "config.h"
77568 +#include "system.h"
77569 +#include "coretypes.h"
77570 +#include "tree.h"
77571 +#include "tree-pass.h"
77572 +#include "intl.h"
77573 +#include "plugin-version.h"
77574 +#include "tm.h"
77575 +#include "toplev.h"
77576 +#include "basic-block.h"
77577 +#include "gimple.h"
77578 +//#include "expr.h" where are you...
77579 +#include "diagnostic.h"
77580 +#include "rtl.h"
77581 +#include "emit-rtl.h"
77582 +#include "function.h"
77583 +
77584 +int plugin_is_GPL_compatible;
77585 +
77586 +static int track_frame_size = -1;
77587 +static const char track_function[] = "pax_track_stack";
77588 +static bool init_locals;
77589 +
77590 +static struct plugin_info stackleak_plugin_info = {
77591 + .version = "201109112100",
77592 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
77593 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
77594 +};
77595 +
77596 +static bool gate_stackleak_track_stack(void);
77597 +static unsigned int execute_stackleak_tree_instrument(void);
77598 +static unsigned int execute_stackleak_final(void);
77599 +
77600 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
77601 + .pass = {
77602 + .type = GIMPLE_PASS,
77603 + .name = "stackleak_tree_instrument",
77604 + .gate = gate_stackleak_track_stack,
77605 + .execute = execute_stackleak_tree_instrument,
77606 + .sub = NULL,
77607 + .next = NULL,
77608 + .static_pass_number = 0,
77609 + .tv_id = TV_NONE,
77610 + .properties_required = PROP_gimple_leh | PROP_cfg,
77611 + .properties_provided = 0,
77612 + .properties_destroyed = 0,
77613 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
77614 + .todo_flags_finish = TODO_verify_stmts | TODO_dump_func
77615 + }
77616 +};
77617 +
77618 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
77619 + .pass = {
77620 + .type = RTL_PASS,
77621 + .name = "stackleak_final",
77622 + .gate = gate_stackleak_track_stack,
77623 + .execute = execute_stackleak_final,
77624 + .sub = NULL,
77625 + .next = NULL,
77626 + .static_pass_number = 0,
77627 + .tv_id = TV_NONE,
77628 + .properties_required = 0,
77629 + .properties_provided = 0,
77630 + .properties_destroyed = 0,
77631 + .todo_flags_start = 0,
77632 + .todo_flags_finish = TODO_dump_func
77633 + }
77634 +};
77635 +
77636 +static bool gate_stackleak_track_stack(void)
77637 +{
77638 + return track_frame_size >= 0;
77639 +}
77640 +
77641 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi, bool before)
77642 +{
77643 + gimple call;
77644 + tree fndecl, type;
77645 +
77646 + // insert call to void pax_track_stack(void)
77647 + type = build_function_type_list(void_type_node, NULL_TREE);
77648 + fndecl = build_fn_decl(track_function, type);
77649 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
77650 + call = gimple_build_call(fndecl, 0);
77651 + if (before)
77652 + gsi_insert_before(gsi, call, GSI_CONTINUE_LINKING);
77653 + else
77654 + gsi_insert_after(gsi, call, GSI_CONTINUE_LINKING);
77655 +}
77656 +
77657 +static unsigned int execute_stackleak_tree_instrument(void)
77658 +{
77659 + basic_block bb, entry_bb;
77660 + gimple_stmt_iterator gsi;
77661 + bool prologue_instrumented = false;
77662 +
77663 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
77664 +
77665 + // 1. loop through BBs and GIMPLE statements
77666 + FOR_EACH_BB(bb) {
77667 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
77668 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
77669 + tree fndecl;
77670 + gimple stmt = gsi_stmt(gsi);
77671 +
77672 + if (!is_gimple_call(stmt))
77673 + continue;
77674 + fndecl = gimple_call_fndecl(stmt);
77675 + if (!fndecl)
77676 + continue;
77677 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
77678 + continue;
77679 + if (!DECL_BUILT_IN(fndecl))
77680 + continue;
77681 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
77682 + continue;
77683 + if (DECL_FUNCTION_CODE(fndecl) != BUILT_IN_ALLOCA)
77684 + continue;
77685 +
77686 + // 2. insert track call after each __builtin_alloca call
77687 + stackleak_add_instrumentation(&gsi, false);
77688 + if (bb == entry_bb)
77689 + prologue_instrumented = true;
77690 +// print_node(stderr, "pax", fndecl, 4);
77691 + }
77692 + }
77693 +
77694 + // 3. insert track call at the beginning
77695 + if (!prologue_instrumented) {
77696 + gsi = gsi_start_bb(entry_bb);
77697 + stackleak_add_instrumentation(&gsi, true);
77698 + }
77699 +
77700 + return 0;
77701 +}
77702 +
77703 +static unsigned int execute_stackleak_final(void)
77704 +{
77705 + rtx insn;
77706 +
77707 + if (cfun->calls_alloca)
77708 + return 0;
77709 +
77710 + // keep calls only if function frame is big enough
77711 + if (get_frame_size() >= track_frame_size)
77712 + return 0;
77713 +
77714 + // 1. find pax_track_stack calls
77715 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
77716 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
77717 + rtx body;
77718 +
77719 + if (!CALL_P(insn))
77720 + continue;
77721 + body = PATTERN(insn);
77722 + if (GET_CODE(body) != CALL)
77723 + continue;
77724 + body = XEXP(body, 0);
77725 + if (GET_CODE(body) != MEM)
77726 + continue;
77727 + body = XEXP(body, 0);
77728 + if (GET_CODE(body) != SYMBOL_REF)
77729 + continue;
77730 + if (strcmp(XSTR(body, 0), track_function))
77731 + continue;
77732 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
77733 + // 2. delete call
77734 + delete_insn_and_edges(insn);
77735 + }
77736 +
77737 +// print_simple_rtl(stderr, get_insns());
77738 +// print_rtl(stderr, get_insns());
77739 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
77740 +
77741 + return 0;
77742 +}
77743 +
77744 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
77745 +{
77746 + const char * const plugin_name = plugin_info->base_name;
77747 + const int argc = plugin_info->argc;
77748 + const struct plugin_argument * const argv = plugin_info->argv;
77749 + int i;
77750 + struct register_pass_info stackleak_tree_instrument_pass_info = {
77751 + .pass = &stackleak_tree_instrument_pass.pass,
77752 +// .reference_pass_name = "tree_profile",
77753 + .reference_pass_name = "optimized",
77754 + .ref_pass_instance_number = 0,
77755 + .pos_op = PASS_POS_INSERT_AFTER
77756 + };
77757 + struct register_pass_info stackleak_final_pass_info = {
77758 + .pass = &stackleak_final_rtl_opt_pass.pass,
77759 + .reference_pass_name = "final",
77760 + .ref_pass_instance_number = 0,
77761 + .pos_op = PASS_POS_INSERT_BEFORE
77762 + };
77763 +
77764 + if (!plugin_default_version_check(version, &gcc_version)) {
77765 + error(G_("incompatible gcc/plugin versions"));
77766 + return 1;
77767 + }
77768 +
77769 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
77770 +
77771 + for (i = 0; i < argc; ++i) {
77772 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
77773 + if (!argv[i].value) {
77774 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
77775 + continue;
77776 + }
77777 + track_frame_size = atoi(argv[i].value);
77778 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
77779 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
77780 + continue;
77781 + }
77782 + if (!strcmp(argv[i].key, "initialize-locals")) {
77783 + if (argv[i].value) {
77784 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
77785 + continue;
77786 + }
77787 + init_locals = true;
77788 + continue;
77789 + }
77790 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
77791 + }
77792 +
77793 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
77794 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
77795 +
77796 + return 0;
77797 +}
77798 diff -urNp linux-3.0.7/usr/gen_init_cpio.c linux-3.0.7/usr/gen_init_cpio.c
77799 --- linux-3.0.7/usr/gen_init_cpio.c 2011-07-21 22:17:23.000000000 -0400
77800 +++ linux-3.0.7/usr/gen_init_cpio.c 2011-08-23 21:47:56.000000000 -0400
77801 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name,
77802 int retval;
77803 int rc = -1;
77804 int namesize;
77805 - int i;
77806 + unsigned int i;
77807
77808 mode |= S_IFREG;
77809
77810 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_
77811 *env_var = *expanded = '\0';
77812 strncat(env_var, start + 2, end - start - 2);
77813 strncat(expanded, new_location, start - new_location);
77814 - strncat(expanded, getenv(env_var), PATH_MAX);
77815 - strncat(expanded, end + 1, PATH_MAX);
77816 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
77817 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
77818 strncpy(new_location, expanded, PATH_MAX);
77819 + new_location[PATH_MAX] = 0;
77820 } else
77821 break;
77822 }
77823 diff -urNp linux-3.0.7/virt/kvm/kvm_main.c linux-3.0.7/virt/kvm/kvm_main.c
77824 --- linux-3.0.7/virt/kvm/kvm_main.c 2011-07-21 22:17:23.000000000 -0400
77825 +++ linux-3.0.7/virt/kvm/kvm_main.c 2011-08-23 21:47:56.000000000 -0400
77826 @@ -73,7 +73,7 @@ LIST_HEAD(vm_list);
77827
77828 static cpumask_var_t cpus_hardware_enabled;
77829 static int kvm_usage_count = 0;
77830 -static atomic_t hardware_enable_failed;
77831 +static atomic_unchecked_t hardware_enable_failed;
77832
77833 struct kmem_cache *kvm_vcpu_cache;
77834 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
77835 @@ -2176,7 +2176,7 @@ static void hardware_enable_nolock(void
77836
77837 if (r) {
77838 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
77839 - atomic_inc(&hardware_enable_failed);
77840 + atomic_inc_unchecked(&hardware_enable_failed);
77841 printk(KERN_INFO "kvm: enabling virtualization on "
77842 "CPU%d failed\n", cpu);
77843 }
77844 @@ -2230,10 +2230,10 @@ static int hardware_enable_all(void)
77845
77846 kvm_usage_count++;
77847 if (kvm_usage_count == 1) {
77848 - atomic_set(&hardware_enable_failed, 0);
77849 + atomic_set_unchecked(&hardware_enable_failed, 0);
77850 on_each_cpu(hardware_enable_nolock, NULL, 1);
77851
77852 - if (atomic_read(&hardware_enable_failed)) {
77853 + if (atomic_read_unchecked(&hardware_enable_failed)) {
77854 hardware_disable_all_nolock();
77855 r = -EBUSY;
77856 }
77857 @@ -2498,7 +2498,7 @@ static void kvm_sched_out(struct preempt
77858 kvm_arch_vcpu_put(vcpu);
77859 }
77860
77861 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
77862 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
77863 struct module *module)
77864 {
77865 int r;
77866 @@ -2561,7 +2561,7 @@ int kvm_init(void *opaque, unsigned vcpu
77867 if (!vcpu_align)
77868 vcpu_align = __alignof__(struct kvm_vcpu);
77869 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
77870 - 0, NULL);
77871 + SLAB_USERCOPY, NULL);
77872 if (!kvm_vcpu_cache) {
77873 r = -ENOMEM;
77874 goto out_free_3;
77875 @@ -2571,9 +2571,11 @@ int kvm_init(void *opaque, unsigned vcpu
77876 if (r)
77877 goto out_free;
77878
77879 - kvm_chardev_ops.owner = module;
77880 - kvm_vm_fops.owner = module;
77881 - kvm_vcpu_fops.owner = module;
77882 + pax_open_kernel();
77883 + *(void **)&kvm_chardev_ops.owner = module;
77884 + *(void **)&kvm_vm_fops.owner = module;
77885 + *(void **)&kvm_vcpu_fops.owner = module;
77886 + pax_close_kernel();
77887
77888 r = misc_register(&kvm_dev);
77889 if (r) {