]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-3.0.3-201108232250.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.0.3-201108232250.patch
1 diff -urNp linux-3.0.3/arch/alpha/include/asm/elf.h linux-3.0.3/arch/alpha/include/asm/elf.h
2 --- linux-3.0.3/arch/alpha/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
3 +++ linux-3.0.3/arch/alpha/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
4 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8 +#ifdef CONFIG_PAX_ASLR
9 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10 +
11 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13 +#endif
14 +
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18 diff -urNp linux-3.0.3/arch/alpha/include/asm/pgtable.h linux-3.0.3/arch/alpha/include/asm/pgtable.h
19 --- linux-3.0.3/arch/alpha/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
20 +++ linux-3.0.3/arch/alpha/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
21 @@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25 +
26 +#ifdef CONFIG_PAX_PAGEEXEC
27 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30 +#else
31 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
32 +# define PAGE_COPY_NOEXEC PAGE_COPY
33 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
34 +#endif
35 +
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39 diff -urNp linux-3.0.3/arch/alpha/kernel/module.c linux-3.0.3/arch/alpha/kernel/module.c
40 --- linux-3.0.3/arch/alpha/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
41 +++ linux-3.0.3/arch/alpha/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
42 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46 - gp = (u64)me->module_core + me->core_size - 0x8000;
47 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51 diff -urNp linux-3.0.3/arch/alpha/kernel/osf_sys.c linux-3.0.3/arch/alpha/kernel/osf_sys.c
52 --- linux-3.0.3/arch/alpha/kernel/osf_sys.c 2011-07-21 22:17:23.000000000 -0400
53 +++ linux-3.0.3/arch/alpha/kernel/osf_sys.c 2011-08-23 21:47:55.000000000 -0400
54 @@ -1145,7 +1145,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58 - if (!vma || addr + len <= vma->vm_start)
59 + if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63 @@ -1181,6 +1181,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67 +#ifdef CONFIG_PAX_RANDMMAP
68 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69 +#endif
70 +
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74 @@ -1188,8 +1192,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79 - len, limit);
80 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81 +
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85 diff -urNp linux-3.0.3/arch/alpha/mm/fault.c linux-3.0.3/arch/alpha/mm/fault.c
86 --- linux-3.0.3/arch/alpha/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
87 +++ linux-3.0.3/arch/alpha/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
88 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92 +#ifdef CONFIG_PAX_PAGEEXEC
93 +/*
94 + * PaX: decide what to do with offenders (regs->pc = fault address)
95 + *
96 + * returns 1 when task should be killed
97 + * 2 when patched PLT trampoline was detected
98 + * 3 when unpatched PLT trampoline was detected
99 + */
100 +static int pax_handle_fetch_fault(struct pt_regs *regs)
101 +{
102 +
103 +#ifdef CONFIG_PAX_EMUPLT
104 + int err;
105 +
106 + do { /* PaX: patched PLT emulation #1 */
107 + unsigned int ldah, ldq, jmp;
108 +
109 + err = get_user(ldah, (unsigned int *)regs->pc);
110 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112 +
113 + if (err)
114 + break;
115 +
116 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118 + jmp == 0x6BFB0000U)
119 + {
120 + unsigned long r27, addr;
121 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123 +
124 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125 + err = get_user(r27, (unsigned long *)addr);
126 + if (err)
127 + break;
128 +
129 + regs->r27 = r27;
130 + regs->pc = r27;
131 + return 2;
132 + }
133 + } while (0);
134 +
135 + do { /* PaX: patched PLT emulation #2 */
136 + unsigned int ldah, lda, br;
137 +
138 + err = get_user(ldah, (unsigned int *)regs->pc);
139 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
140 + err |= get_user(br, (unsigned int *)(regs->pc+8));
141 +
142 + if (err)
143 + break;
144 +
145 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
147 + (br & 0xFFE00000U) == 0xC3E00000U)
148 + {
149 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152 +
153 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155 + return 2;
156 + }
157 + } while (0);
158 +
159 + do { /* PaX: unpatched PLT emulation */
160 + unsigned int br;
161 +
162 + err = get_user(br, (unsigned int *)regs->pc);
163 +
164 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165 + unsigned int br2, ldq, nop, jmp;
166 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167 +
168 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169 + err = get_user(br2, (unsigned int *)addr);
170 + err |= get_user(ldq, (unsigned int *)(addr+4));
171 + err |= get_user(nop, (unsigned int *)(addr+8));
172 + err |= get_user(jmp, (unsigned int *)(addr+12));
173 + err |= get_user(resolver, (unsigned long *)(addr+16));
174 +
175 + if (err)
176 + break;
177 +
178 + if (br2 == 0xC3600000U &&
179 + ldq == 0xA77B000CU &&
180 + nop == 0x47FF041FU &&
181 + jmp == 0x6B7B0000U)
182 + {
183 + regs->r28 = regs->pc+4;
184 + regs->r27 = addr+16;
185 + regs->pc = resolver;
186 + return 3;
187 + }
188 + }
189 + } while (0);
190 +#endif
191 +
192 + return 1;
193 +}
194 +
195 +void pax_report_insns(void *pc, void *sp)
196 +{
197 + unsigned long i;
198 +
199 + printk(KERN_ERR "PAX: bytes at PC: ");
200 + for (i = 0; i < 5; i++) {
201 + unsigned int c;
202 + if (get_user(c, (unsigned int *)pc+i))
203 + printk(KERN_CONT "???????? ");
204 + else
205 + printk(KERN_CONT "%08x ", c);
206 + }
207 + printk("\n");
208 +}
209 +#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217 - if (!(vma->vm_flags & VM_EXEC))
218 + if (!(vma->vm_flags & VM_EXEC)) {
219 +
220 +#ifdef CONFIG_PAX_PAGEEXEC
221 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222 + goto bad_area;
223 +
224 + up_read(&mm->mmap_sem);
225 + switch (pax_handle_fetch_fault(regs)) {
226 +
227 +#ifdef CONFIG_PAX_EMUPLT
228 + case 2:
229 + case 3:
230 + return;
231 +#endif
232 +
233 + }
234 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235 + do_group_exit(SIGKILL);
236 +#else
237 goto bad_area;
238 +#endif
239 +
240 + }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244 diff -urNp linux-3.0.3/arch/arm/include/asm/elf.h linux-3.0.3/arch/arm/include/asm/elf.h
245 --- linux-3.0.3/arch/arm/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
246 +++ linux-3.0.3/arch/arm/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
247 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253 +
254 +#ifdef CONFIG_PAX_ASLR
255 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256 +
257 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259 +#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263 @@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t
264 extern void elf_set_personality(const struct elf32_hdr *);
265 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
266
267 -struct mm_struct;
268 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
269 -#define arch_randomize_brk arch_randomize_brk
270 -
271 extern int vectors_user_mapping(void);
272 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
273 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
274 diff -urNp linux-3.0.3/arch/arm/include/asm/kmap_types.h linux-3.0.3/arch/arm/include/asm/kmap_types.h
275 --- linux-3.0.3/arch/arm/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
276 +++ linux-3.0.3/arch/arm/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
277 @@ -21,6 +21,7 @@ enum km_type {
278 KM_L1_CACHE,
279 KM_L2_CACHE,
280 KM_KDB,
281 + KM_CLEARPAGE,
282 KM_TYPE_NR
283 };
284
285 diff -urNp linux-3.0.3/arch/arm/include/asm/uaccess.h linux-3.0.3/arch/arm/include/asm/uaccess.h
286 --- linux-3.0.3/arch/arm/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
287 +++ linux-3.0.3/arch/arm/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
288 @@ -22,6 +22,8 @@
289 #define VERIFY_READ 0
290 #define VERIFY_WRITE 1
291
292 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
293 +
294 /*
295 * The exception table consists of pairs of addresses: the first is the
296 * address of an instruction that is allowed to fault, and the second is
297 @@ -387,8 +389,23 @@ do { \
298
299
300 #ifdef CONFIG_MMU
301 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
302 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
303 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
304 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
305 +
306 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
307 +{
308 + if (!__builtin_constant_p(n))
309 + check_object_size(to, n, false);
310 + return ___copy_from_user(to, from, n);
311 +}
312 +
313 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
314 +{
315 + if (!__builtin_constant_p(n))
316 + check_object_size(from, n, true);
317 + return ___copy_to_user(to, from, n);
318 +}
319 +
320 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
321 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
322 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
323 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
324
325 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
326 {
327 + if ((long)n < 0)
328 + return n;
329 +
330 if (access_ok(VERIFY_READ, from, n))
331 n = __copy_from_user(to, from, n);
332 else /* security hole - plug it */
333 @@ -412,6 +432,9 @@ static inline unsigned long __must_check
334
335 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
336 {
337 + if ((long)n < 0)
338 + return n;
339 +
340 if (access_ok(VERIFY_WRITE, to, n))
341 n = __copy_to_user(to, from, n);
342 return n;
343 diff -urNp linux-3.0.3/arch/arm/kernel/armksyms.c linux-3.0.3/arch/arm/kernel/armksyms.c
344 --- linux-3.0.3/arch/arm/kernel/armksyms.c 2011-07-21 22:17:23.000000000 -0400
345 +++ linux-3.0.3/arch/arm/kernel/armksyms.c 2011-08-23 21:47:55.000000000 -0400
346 @@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
347 #ifdef CONFIG_MMU
348 EXPORT_SYMBOL(copy_page);
349
350 -EXPORT_SYMBOL(__copy_from_user);
351 -EXPORT_SYMBOL(__copy_to_user);
352 +EXPORT_SYMBOL(___copy_from_user);
353 +EXPORT_SYMBOL(___copy_to_user);
354 EXPORT_SYMBOL(__clear_user);
355
356 EXPORT_SYMBOL(__get_user_1);
357 diff -urNp linux-3.0.3/arch/arm/kernel/process.c linux-3.0.3/arch/arm/kernel/process.c
358 --- linux-3.0.3/arch/arm/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
359 +++ linux-3.0.3/arch/arm/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
360 @@ -28,7 +28,6 @@
361 #include <linux/tick.h>
362 #include <linux/utsname.h>
363 #include <linux/uaccess.h>
364 -#include <linux/random.h>
365 #include <linux/hw_breakpoint.h>
366
367 #include <asm/cacheflush.h>
368 @@ -479,12 +478,6 @@ unsigned long get_wchan(struct task_stru
369 return 0;
370 }
371
372 -unsigned long arch_randomize_brk(struct mm_struct *mm)
373 -{
374 - unsigned long range_end = mm->brk + 0x02000000;
375 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
376 -}
377 -
378 #ifdef CONFIG_MMU
379 /*
380 * The vectors page is always readable from user space for the
381 diff -urNp linux-3.0.3/arch/arm/kernel/traps.c linux-3.0.3/arch/arm/kernel/traps.c
382 --- linux-3.0.3/arch/arm/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
383 +++ linux-3.0.3/arch/arm/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
384 @@ -257,6 +257,8 @@ static int __die(const char *str, int er
385
386 static DEFINE_SPINLOCK(die_lock);
387
388 +extern void gr_handle_kernel_exploit(void);
389 +
390 /*
391 * This function is protected against re-entrancy.
392 */
393 @@ -284,6 +286,9 @@ void die(const char *str, struct pt_regs
394 panic("Fatal exception in interrupt");
395 if (panic_on_oops)
396 panic("Fatal exception");
397 +
398 + gr_handle_kernel_exploit();
399 +
400 if (ret != NOTIFY_STOP)
401 do_exit(SIGSEGV);
402 }
403 diff -urNp linux-3.0.3/arch/arm/lib/copy_from_user.S linux-3.0.3/arch/arm/lib/copy_from_user.S
404 --- linux-3.0.3/arch/arm/lib/copy_from_user.S 2011-07-21 22:17:23.000000000 -0400
405 +++ linux-3.0.3/arch/arm/lib/copy_from_user.S 2011-08-23 21:47:55.000000000 -0400
406 @@ -16,7 +16,7 @@
407 /*
408 * Prototype:
409 *
410 - * size_t __copy_from_user(void *to, const void *from, size_t n)
411 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
412 *
413 * Purpose:
414 *
415 @@ -84,11 +84,11 @@
416
417 .text
418
419 -ENTRY(__copy_from_user)
420 +ENTRY(___copy_from_user)
421
422 #include "copy_template.S"
423
424 -ENDPROC(__copy_from_user)
425 +ENDPROC(___copy_from_user)
426
427 .pushsection .fixup,"ax"
428 .align 0
429 diff -urNp linux-3.0.3/arch/arm/lib/copy_to_user.S linux-3.0.3/arch/arm/lib/copy_to_user.S
430 --- linux-3.0.3/arch/arm/lib/copy_to_user.S 2011-07-21 22:17:23.000000000 -0400
431 +++ linux-3.0.3/arch/arm/lib/copy_to_user.S 2011-08-23 21:47:55.000000000 -0400
432 @@ -16,7 +16,7 @@
433 /*
434 * Prototype:
435 *
436 - * size_t __copy_to_user(void *to, const void *from, size_t n)
437 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
438 *
439 * Purpose:
440 *
441 @@ -88,11 +88,11 @@
442 .text
443
444 ENTRY(__copy_to_user_std)
445 -WEAK(__copy_to_user)
446 +WEAK(___copy_to_user)
447
448 #include "copy_template.S"
449
450 -ENDPROC(__copy_to_user)
451 +ENDPROC(___copy_to_user)
452 ENDPROC(__copy_to_user_std)
453
454 .pushsection .fixup,"ax"
455 diff -urNp linux-3.0.3/arch/arm/lib/uaccess.S linux-3.0.3/arch/arm/lib/uaccess.S
456 --- linux-3.0.3/arch/arm/lib/uaccess.S 2011-07-21 22:17:23.000000000 -0400
457 +++ linux-3.0.3/arch/arm/lib/uaccess.S 2011-08-23 21:47:55.000000000 -0400
458 @@ -20,7 +20,7 @@
459
460 #define PAGE_SHIFT 12
461
462 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
463 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
464 * Purpose : copy a block to user memory from kernel memory
465 * Params : to - user memory
466 * : from - kernel memory
467 @@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
468 sub r2, r2, ip
469 b .Lc2u_dest_aligned
470
471 -ENTRY(__copy_to_user)
472 +ENTRY(___copy_to_user)
473 stmfd sp!, {r2, r4 - r7, lr}
474 cmp r2, #4
475 blt .Lc2u_not_enough
476 @@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
477 ldrgtb r3, [r1], #0
478 USER( T(strgtb) r3, [r0], #1) @ May fault
479 b .Lc2u_finished
480 -ENDPROC(__copy_to_user)
481 +ENDPROC(___copy_to_user)
482
483 .pushsection .fixup,"ax"
484 .align 0
485 9001: ldmfd sp!, {r0, r4 - r7, pc}
486 .popsection
487
488 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
489 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
490 * Purpose : copy a block from user memory to kernel memory
491 * Params : to - kernel memory
492 * : from - user memory
493 @@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
494 sub r2, r2, ip
495 b .Lcfu_dest_aligned
496
497 -ENTRY(__copy_from_user)
498 +ENTRY(___copy_from_user)
499 stmfd sp!, {r0, r2, r4 - r7, lr}
500 cmp r2, #4
501 blt .Lcfu_not_enough
502 @@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
503 USER( T(ldrgtb) r3, [r1], #1) @ May fault
504 strgtb r3, [r0], #1
505 b .Lcfu_finished
506 -ENDPROC(__copy_from_user)
507 +ENDPROC(___copy_from_user)
508
509 .pushsection .fixup,"ax"
510 .align 0
511 diff -urNp linux-3.0.3/arch/arm/lib/uaccess_with_memcpy.c linux-3.0.3/arch/arm/lib/uaccess_with_memcpy.c
512 --- linux-3.0.3/arch/arm/lib/uaccess_with_memcpy.c 2011-07-21 22:17:23.000000000 -0400
513 +++ linux-3.0.3/arch/arm/lib/uaccess_with_memcpy.c 2011-08-23 21:47:55.000000000 -0400
514 @@ -103,7 +103,7 @@ out:
515 }
516
517 unsigned long
518 -__copy_to_user(void __user *to, const void *from, unsigned long n)
519 +___copy_to_user(void __user *to, const void *from, unsigned long n)
520 {
521 /*
522 * This test is stubbed out of the main function above to keep
523 diff -urNp linux-3.0.3/arch/arm/mach-ux500/mbox-db5500.c linux-3.0.3/arch/arm/mach-ux500/mbox-db5500.c
524 --- linux-3.0.3/arch/arm/mach-ux500/mbox-db5500.c 2011-07-21 22:17:23.000000000 -0400
525 +++ linux-3.0.3/arch/arm/mach-ux500/mbox-db5500.c 2011-08-23 21:48:14.000000000 -0400
526 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
527 return sprintf(buf, "0x%X\n", mbox_value);
528 }
529
530 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
531 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
532
533 static int mbox_show(struct seq_file *s, void *data)
534 {
535 diff -urNp linux-3.0.3/arch/arm/mm/fault.c linux-3.0.3/arch/arm/mm/fault.c
536 --- linux-3.0.3/arch/arm/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
537 +++ linux-3.0.3/arch/arm/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
538 @@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
539 }
540 #endif
541
542 +#ifdef CONFIG_PAX_PAGEEXEC
543 + if (fsr & FSR_LNX_PF) {
544 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
545 + do_group_exit(SIGKILL);
546 + }
547 +#endif
548 +
549 tsk->thread.address = addr;
550 tsk->thread.error_code = fsr;
551 tsk->thread.trap_no = 14;
552 @@ -379,6 +386,33 @@ do_page_fault(unsigned long addr, unsign
553 }
554 #endif /* CONFIG_MMU */
555
556 +#ifdef CONFIG_PAX_PAGEEXEC
557 +void pax_report_insns(void *pc, void *sp)
558 +{
559 + long i;
560 +
561 + printk(KERN_ERR "PAX: bytes at PC: ");
562 + for (i = 0; i < 20; i++) {
563 + unsigned char c;
564 + if (get_user(c, (__force unsigned char __user *)pc+i))
565 + printk(KERN_CONT "?? ");
566 + else
567 + printk(KERN_CONT "%02x ", c);
568 + }
569 + printk("\n");
570 +
571 + printk(KERN_ERR "PAX: bytes at SP-4: ");
572 + for (i = -1; i < 20; i++) {
573 + unsigned long c;
574 + if (get_user(c, (__force unsigned long __user *)sp+i))
575 + printk(KERN_CONT "???????? ");
576 + else
577 + printk(KERN_CONT "%08lx ", c);
578 + }
579 + printk("\n");
580 +}
581 +#endif
582 +
583 /*
584 * First Level Translation Fault Handler
585 *
586 diff -urNp linux-3.0.3/arch/arm/mm/mmap.c linux-3.0.3/arch/arm/mm/mmap.c
587 --- linux-3.0.3/arch/arm/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
588 +++ linux-3.0.3/arch/arm/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
589 @@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
590 if (len > TASK_SIZE)
591 return -ENOMEM;
592
593 +#ifdef CONFIG_PAX_RANDMMAP
594 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
595 +#endif
596 +
597 if (addr) {
598 if (do_align)
599 addr = COLOUR_ALIGN(addr, pgoff);
600 @@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
601 addr = PAGE_ALIGN(addr);
602
603 vma = find_vma(mm, addr);
604 - if (TASK_SIZE - len >= addr &&
605 - (!vma || addr + len <= vma->vm_start))
606 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
607 return addr;
608 }
609 if (len > mm->cached_hole_size) {
610 - start_addr = addr = mm->free_area_cache;
611 + start_addr = addr = mm->free_area_cache;
612 } else {
613 - start_addr = addr = TASK_UNMAPPED_BASE;
614 - mm->cached_hole_size = 0;
615 + start_addr = addr = mm->mmap_base;
616 + mm->cached_hole_size = 0;
617 }
618 /* 8 bits of randomness in 20 address space bits */
619 if ((current->flags & PF_RANDOMIZE) &&
620 @@ -100,14 +103,14 @@ full_search:
621 * Start a new search - just in case we missed
622 * some holes.
623 */
624 - if (start_addr != TASK_UNMAPPED_BASE) {
625 - start_addr = addr = TASK_UNMAPPED_BASE;
626 + if (start_addr != mm->mmap_base) {
627 + start_addr = addr = mm->mmap_base;
628 mm->cached_hole_size = 0;
629 goto full_search;
630 }
631 return -ENOMEM;
632 }
633 - if (!vma || addr + len <= vma->vm_start) {
634 + if (check_heap_stack_gap(vma, addr, len)) {
635 /*
636 * Remember the place where we stopped the search:
637 */
638 diff -urNp linux-3.0.3/arch/avr32/include/asm/elf.h linux-3.0.3/arch/avr32/include/asm/elf.h
639 --- linux-3.0.3/arch/avr32/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
640 +++ linux-3.0.3/arch/avr32/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
641 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
642 the loader. We need to make sure that it is out of the way of the program
643 that it will "exec", and that there is sufficient room for the brk. */
644
645 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
646 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
647
648 +#ifdef CONFIG_PAX_ASLR
649 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
650 +
651 +#define PAX_DELTA_MMAP_LEN 15
652 +#define PAX_DELTA_STACK_LEN 15
653 +#endif
654
655 /* This yields a mask that user programs can use to figure out what
656 instruction set this CPU supports. This could be done in user space,
657 diff -urNp linux-3.0.3/arch/avr32/include/asm/kmap_types.h linux-3.0.3/arch/avr32/include/asm/kmap_types.h
658 --- linux-3.0.3/arch/avr32/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
659 +++ linux-3.0.3/arch/avr32/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
660 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
661 D(11) KM_IRQ1,
662 D(12) KM_SOFTIRQ0,
663 D(13) KM_SOFTIRQ1,
664 -D(14) KM_TYPE_NR
665 +D(14) KM_CLEARPAGE,
666 +D(15) KM_TYPE_NR
667 };
668
669 #undef D
670 diff -urNp linux-3.0.3/arch/avr32/mm/fault.c linux-3.0.3/arch/avr32/mm/fault.c
671 --- linux-3.0.3/arch/avr32/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
672 +++ linux-3.0.3/arch/avr32/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
673 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
674
675 int exception_trace = 1;
676
677 +#ifdef CONFIG_PAX_PAGEEXEC
678 +void pax_report_insns(void *pc, void *sp)
679 +{
680 + unsigned long i;
681 +
682 + printk(KERN_ERR "PAX: bytes at PC: ");
683 + for (i = 0; i < 20; i++) {
684 + unsigned char c;
685 + if (get_user(c, (unsigned char *)pc+i))
686 + printk(KERN_CONT "???????? ");
687 + else
688 + printk(KERN_CONT "%02x ", c);
689 + }
690 + printk("\n");
691 +}
692 +#endif
693 +
694 /*
695 * This routine handles page faults. It determines the address and the
696 * problem, and then passes it off to one of the appropriate routines.
697 @@ -156,6 +173,16 @@ bad_area:
698 up_read(&mm->mmap_sem);
699
700 if (user_mode(regs)) {
701 +
702 +#ifdef CONFIG_PAX_PAGEEXEC
703 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
704 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
705 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
706 + do_group_exit(SIGKILL);
707 + }
708 + }
709 +#endif
710 +
711 if (exception_trace && printk_ratelimit())
712 printk("%s%s[%d]: segfault at %08lx pc %08lx "
713 "sp %08lx ecr %lu\n",
714 diff -urNp linux-3.0.3/arch/frv/include/asm/kmap_types.h linux-3.0.3/arch/frv/include/asm/kmap_types.h
715 --- linux-3.0.3/arch/frv/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
716 +++ linux-3.0.3/arch/frv/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
717 @@ -23,6 +23,7 @@ enum km_type {
718 KM_IRQ1,
719 KM_SOFTIRQ0,
720 KM_SOFTIRQ1,
721 + KM_CLEARPAGE,
722 KM_TYPE_NR
723 };
724
725 diff -urNp linux-3.0.3/arch/frv/mm/elf-fdpic.c linux-3.0.3/arch/frv/mm/elf-fdpic.c
726 --- linux-3.0.3/arch/frv/mm/elf-fdpic.c 2011-07-21 22:17:23.000000000 -0400
727 +++ linux-3.0.3/arch/frv/mm/elf-fdpic.c 2011-08-23 21:47:55.000000000 -0400
728 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
729 if (addr) {
730 addr = PAGE_ALIGN(addr);
731 vma = find_vma(current->mm, addr);
732 - if (TASK_SIZE - len >= addr &&
733 - (!vma || addr + len <= vma->vm_start))
734 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
735 goto success;
736 }
737
738 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
739 for (; vma; vma = vma->vm_next) {
740 if (addr > limit)
741 break;
742 - if (addr + len <= vma->vm_start)
743 + if (check_heap_stack_gap(vma, addr, len))
744 goto success;
745 addr = vma->vm_end;
746 }
747 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
748 for (; vma; vma = vma->vm_next) {
749 if (addr > limit)
750 break;
751 - if (addr + len <= vma->vm_start)
752 + if (check_heap_stack_gap(vma, addr, len))
753 goto success;
754 addr = vma->vm_end;
755 }
756 diff -urNp linux-3.0.3/arch/ia64/include/asm/elf.h linux-3.0.3/arch/ia64/include/asm/elf.h
757 --- linux-3.0.3/arch/ia64/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
758 +++ linux-3.0.3/arch/ia64/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
759 @@ -42,6 +42,13 @@
760 */
761 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
762
763 +#ifdef CONFIG_PAX_ASLR
764 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
765 +
766 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
767 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
768 +#endif
769 +
770 #define PT_IA_64_UNWIND 0x70000001
771
772 /* IA-64 relocations: */
773 diff -urNp linux-3.0.3/arch/ia64/include/asm/pgtable.h linux-3.0.3/arch/ia64/include/asm/pgtable.h
774 --- linux-3.0.3/arch/ia64/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
775 +++ linux-3.0.3/arch/ia64/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
776 @@ -12,7 +12,7 @@
777 * David Mosberger-Tang <davidm@hpl.hp.com>
778 */
779
780 -
781 +#include <linux/const.h>
782 #include <asm/mman.h>
783 #include <asm/page.h>
784 #include <asm/processor.h>
785 @@ -143,6 +143,17 @@
786 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
787 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
788 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
789 +
790 +#ifdef CONFIG_PAX_PAGEEXEC
791 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
792 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
793 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
794 +#else
795 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
796 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
797 +# define PAGE_COPY_NOEXEC PAGE_COPY
798 +#endif
799 +
800 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
801 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
802 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
803 diff -urNp linux-3.0.3/arch/ia64/include/asm/spinlock.h linux-3.0.3/arch/ia64/include/asm/spinlock.h
804 --- linux-3.0.3/arch/ia64/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
805 +++ linux-3.0.3/arch/ia64/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
806 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
807 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
808
809 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
810 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
811 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
812 }
813
814 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
815 diff -urNp linux-3.0.3/arch/ia64/include/asm/uaccess.h linux-3.0.3/arch/ia64/include/asm/uaccess.h
816 --- linux-3.0.3/arch/ia64/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
817 +++ linux-3.0.3/arch/ia64/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
818 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
819 const void *__cu_from = (from); \
820 long __cu_len = (n); \
821 \
822 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
823 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
824 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
825 __cu_len; \
826 })
827 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
828 long __cu_len = (n); \
829 \
830 __chk_user_ptr(__cu_from); \
831 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
832 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
833 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
834 __cu_len; \
835 })
836 diff -urNp linux-3.0.3/arch/ia64/kernel/module.c linux-3.0.3/arch/ia64/kernel/module.c
837 --- linux-3.0.3/arch/ia64/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
838 +++ linux-3.0.3/arch/ia64/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
839 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
840 void
841 module_free (struct module *mod, void *module_region)
842 {
843 - if (mod && mod->arch.init_unw_table &&
844 - module_region == mod->module_init) {
845 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
846 unw_remove_unwind_table(mod->arch.init_unw_table);
847 mod->arch.init_unw_table = NULL;
848 }
849 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
850 }
851
852 static inline int
853 +in_init_rx (const struct module *mod, uint64_t addr)
854 +{
855 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
856 +}
857 +
858 +static inline int
859 +in_init_rw (const struct module *mod, uint64_t addr)
860 +{
861 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
862 +}
863 +
864 +static inline int
865 in_init (const struct module *mod, uint64_t addr)
866 {
867 - return addr - (uint64_t) mod->module_init < mod->init_size;
868 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
869 +}
870 +
871 +static inline int
872 +in_core_rx (const struct module *mod, uint64_t addr)
873 +{
874 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
875 +}
876 +
877 +static inline int
878 +in_core_rw (const struct module *mod, uint64_t addr)
879 +{
880 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
881 }
882
883 static inline int
884 in_core (const struct module *mod, uint64_t addr)
885 {
886 - return addr - (uint64_t) mod->module_core < mod->core_size;
887 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
888 }
889
890 static inline int
891 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
892 break;
893
894 case RV_BDREL:
895 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
896 + if (in_init_rx(mod, val))
897 + val -= (uint64_t) mod->module_init_rx;
898 + else if (in_init_rw(mod, val))
899 + val -= (uint64_t) mod->module_init_rw;
900 + else if (in_core_rx(mod, val))
901 + val -= (uint64_t) mod->module_core_rx;
902 + else if (in_core_rw(mod, val))
903 + val -= (uint64_t) mod->module_core_rw;
904 break;
905
906 case RV_LTV:
907 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
908 * addresses have been selected...
909 */
910 uint64_t gp;
911 - if (mod->core_size > MAX_LTOFF)
912 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
913 /*
914 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
915 * at the end of the module.
916 */
917 - gp = mod->core_size - MAX_LTOFF / 2;
918 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
919 else
920 - gp = mod->core_size / 2;
921 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
922 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
923 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
924 mod->arch.gp = gp;
925 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
926 }
927 diff -urNp linux-3.0.3/arch/ia64/kernel/sys_ia64.c linux-3.0.3/arch/ia64/kernel/sys_ia64.c
928 --- linux-3.0.3/arch/ia64/kernel/sys_ia64.c 2011-07-21 22:17:23.000000000 -0400
929 +++ linux-3.0.3/arch/ia64/kernel/sys_ia64.c 2011-08-23 21:47:55.000000000 -0400
930 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
931 if (REGION_NUMBER(addr) == RGN_HPAGE)
932 addr = 0;
933 #endif
934 +
935 +#ifdef CONFIG_PAX_RANDMMAP
936 + if (mm->pax_flags & MF_PAX_RANDMMAP)
937 + addr = mm->free_area_cache;
938 + else
939 +#endif
940 +
941 if (!addr)
942 addr = mm->free_area_cache;
943
944 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
945 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
946 /* At this point: (!vma || addr < vma->vm_end). */
947 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
948 - if (start_addr != TASK_UNMAPPED_BASE) {
949 + if (start_addr != mm->mmap_base) {
950 /* Start a new search --- just in case we missed some holes. */
951 - addr = TASK_UNMAPPED_BASE;
952 + addr = mm->mmap_base;
953 goto full_search;
954 }
955 return -ENOMEM;
956 }
957 - if (!vma || addr + len <= vma->vm_start) {
958 + if (check_heap_stack_gap(vma, addr, len)) {
959 /* Remember the address where we stopped this search: */
960 mm->free_area_cache = addr + len;
961 return addr;
962 diff -urNp linux-3.0.3/arch/ia64/kernel/vmlinux.lds.S linux-3.0.3/arch/ia64/kernel/vmlinux.lds.S
963 --- linux-3.0.3/arch/ia64/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
964 +++ linux-3.0.3/arch/ia64/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
965 @@ -199,7 +199,7 @@ SECTIONS {
966 /* Per-cpu data: */
967 . = ALIGN(PERCPU_PAGE_SIZE);
968 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
969 - __phys_per_cpu_start = __per_cpu_load;
970 + __phys_per_cpu_start = per_cpu_load;
971 /*
972 * ensure percpu data fits
973 * into percpu page size
974 diff -urNp linux-3.0.3/arch/ia64/mm/fault.c linux-3.0.3/arch/ia64/mm/fault.c
975 --- linux-3.0.3/arch/ia64/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
976 +++ linux-3.0.3/arch/ia64/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
977 @@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned
978 return pte_present(pte);
979 }
980
981 +#ifdef CONFIG_PAX_PAGEEXEC
982 +void pax_report_insns(void *pc, void *sp)
983 +{
984 + unsigned long i;
985 +
986 + printk(KERN_ERR "PAX: bytes at PC: ");
987 + for (i = 0; i < 8; i++) {
988 + unsigned int c;
989 + if (get_user(c, (unsigned int *)pc+i))
990 + printk(KERN_CONT "???????? ");
991 + else
992 + printk(KERN_CONT "%08x ", c);
993 + }
994 + printk("\n");
995 +}
996 +#endif
997 +
998 void __kprobes
999 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1000 {
1001 @@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long addres
1002 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1003 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1004
1005 - if ((vma->vm_flags & mask) != mask)
1006 + if ((vma->vm_flags & mask) != mask) {
1007 +
1008 +#ifdef CONFIG_PAX_PAGEEXEC
1009 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1010 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1011 + goto bad_area;
1012 +
1013 + up_read(&mm->mmap_sem);
1014 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1015 + do_group_exit(SIGKILL);
1016 + }
1017 +#endif
1018 +
1019 goto bad_area;
1020
1021 + }
1022 +
1023 /*
1024 * If for any reason at all we couldn't handle the fault, make
1025 * sure we exit gracefully rather than endlessly redo the
1026 diff -urNp linux-3.0.3/arch/ia64/mm/hugetlbpage.c linux-3.0.3/arch/ia64/mm/hugetlbpage.c
1027 --- linux-3.0.3/arch/ia64/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
1028 +++ linux-3.0.3/arch/ia64/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
1029 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1030 /* At this point: (!vmm || addr < vmm->vm_end). */
1031 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1032 return -ENOMEM;
1033 - if (!vmm || (addr + len) <= vmm->vm_start)
1034 + if (check_heap_stack_gap(vmm, addr, len))
1035 return addr;
1036 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1037 }
1038 diff -urNp linux-3.0.3/arch/ia64/mm/init.c linux-3.0.3/arch/ia64/mm/init.c
1039 --- linux-3.0.3/arch/ia64/mm/init.c 2011-07-21 22:17:23.000000000 -0400
1040 +++ linux-3.0.3/arch/ia64/mm/init.c 2011-08-23 21:47:55.000000000 -0400
1041 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1042 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1043 vma->vm_end = vma->vm_start + PAGE_SIZE;
1044 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1045 +
1046 +#ifdef CONFIG_PAX_PAGEEXEC
1047 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1048 + vma->vm_flags &= ~VM_EXEC;
1049 +
1050 +#ifdef CONFIG_PAX_MPROTECT
1051 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1052 + vma->vm_flags &= ~VM_MAYEXEC;
1053 +#endif
1054 +
1055 + }
1056 +#endif
1057 +
1058 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1059 down_write(&current->mm->mmap_sem);
1060 if (insert_vm_struct(current->mm, vma)) {
1061 diff -urNp linux-3.0.3/arch/m32r/lib/usercopy.c linux-3.0.3/arch/m32r/lib/usercopy.c
1062 --- linux-3.0.3/arch/m32r/lib/usercopy.c 2011-07-21 22:17:23.000000000 -0400
1063 +++ linux-3.0.3/arch/m32r/lib/usercopy.c 2011-08-23 21:47:55.000000000 -0400
1064 @@ -14,6 +14,9 @@
1065 unsigned long
1066 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1067 {
1068 + if ((long)n < 0)
1069 + return n;
1070 +
1071 prefetch(from);
1072 if (access_ok(VERIFY_WRITE, to, n))
1073 __copy_user(to,from,n);
1074 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1075 unsigned long
1076 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1077 {
1078 + if ((long)n < 0)
1079 + return n;
1080 +
1081 prefetchw(to);
1082 if (access_ok(VERIFY_READ, from, n))
1083 __copy_user_zeroing(to,from,n);
1084 diff -urNp linux-3.0.3/arch/mips/include/asm/elf.h linux-3.0.3/arch/mips/include/asm/elf.h
1085 --- linux-3.0.3/arch/mips/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1086 +++ linux-3.0.3/arch/mips/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1087 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
1088 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1089 #endif
1090
1091 +#ifdef CONFIG_PAX_ASLR
1092 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1093 +
1094 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1095 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1096 +#endif
1097 +
1098 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1099 struct linux_binprm;
1100 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1101 int uses_interp);
1102
1103 -struct mm_struct;
1104 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1105 -#define arch_randomize_brk arch_randomize_brk
1106 -
1107 #endif /* _ASM_ELF_H */
1108 diff -urNp linux-3.0.3/arch/mips/include/asm/page.h linux-3.0.3/arch/mips/include/asm/page.h
1109 --- linux-3.0.3/arch/mips/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1110 +++ linux-3.0.3/arch/mips/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1111 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1112 #ifdef CONFIG_CPU_MIPS32
1113 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1114 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1115 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1116 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1117 #else
1118 typedef struct { unsigned long long pte; } pte_t;
1119 #define pte_val(x) ((x).pte)
1120 diff -urNp linux-3.0.3/arch/mips/include/asm/system.h linux-3.0.3/arch/mips/include/asm/system.h
1121 --- linux-3.0.3/arch/mips/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1122 +++ linux-3.0.3/arch/mips/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1123 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1124 */
1125 #define __ARCH_WANT_UNLOCKED_CTXSW
1126
1127 -extern unsigned long arch_align_stack(unsigned long sp);
1128 +#define arch_align_stack(x) ((x) & ~0xfUL)
1129
1130 #endif /* _ASM_SYSTEM_H */
1131 diff -urNp linux-3.0.3/arch/mips/kernel/binfmt_elfn32.c linux-3.0.3/arch/mips/kernel/binfmt_elfn32.c
1132 --- linux-3.0.3/arch/mips/kernel/binfmt_elfn32.c 2011-07-21 22:17:23.000000000 -0400
1133 +++ linux-3.0.3/arch/mips/kernel/binfmt_elfn32.c 2011-08-23 21:47:55.000000000 -0400
1134 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1135 #undef ELF_ET_DYN_BASE
1136 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1137
1138 +#ifdef CONFIG_PAX_ASLR
1139 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1140 +
1141 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1142 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1143 +#endif
1144 +
1145 #include <asm/processor.h>
1146 #include <linux/module.h>
1147 #include <linux/elfcore.h>
1148 diff -urNp linux-3.0.3/arch/mips/kernel/binfmt_elfo32.c linux-3.0.3/arch/mips/kernel/binfmt_elfo32.c
1149 --- linux-3.0.3/arch/mips/kernel/binfmt_elfo32.c 2011-07-21 22:17:23.000000000 -0400
1150 +++ linux-3.0.3/arch/mips/kernel/binfmt_elfo32.c 2011-08-23 21:47:55.000000000 -0400
1151 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1152 #undef ELF_ET_DYN_BASE
1153 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1154
1155 +#ifdef CONFIG_PAX_ASLR
1156 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1157 +
1158 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1159 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1160 +#endif
1161 +
1162 #include <asm/processor.h>
1163
1164 /*
1165 diff -urNp linux-3.0.3/arch/mips/kernel/process.c linux-3.0.3/arch/mips/kernel/process.c
1166 --- linux-3.0.3/arch/mips/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
1167 +++ linux-3.0.3/arch/mips/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
1168 @@ -473,15 +473,3 @@ unsigned long get_wchan(struct task_stru
1169 out:
1170 return pc;
1171 }
1172 -
1173 -/*
1174 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1175 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1176 - */
1177 -unsigned long arch_align_stack(unsigned long sp)
1178 -{
1179 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1180 - sp -= get_random_int() & ~PAGE_MASK;
1181 -
1182 - return sp & ALMASK;
1183 -}
1184 diff -urNp linux-3.0.3/arch/mips/mm/fault.c linux-3.0.3/arch/mips/mm/fault.c
1185 --- linux-3.0.3/arch/mips/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1186 +++ linux-3.0.3/arch/mips/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1187 @@ -28,6 +28,23 @@
1188 #include <asm/highmem.h> /* For VMALLOC_END */
1189 #include <linux/kdebug.h>
1190
1191 +#ifdef CONFIG_PAX_PAGEEXEC
1192 +void pax_report_insns(void *pc, void *sp)
1193 +{
1194 + unsigned long i;
1195 +
1196 + printk(KERN_ERR "PAX: bytes at PC: ");
1197 + for (i = 0; i < 5; i++) {
1198 + unsigned int c;
1199 + if (get_user(c, (unsigned int *)pc+i))
1200 + printk(KERN_CONT "???????? ");
1201 + else
1202 + printk(KERN_CONT "%08x ", c);
1203 + }
1204 + printk("\n");
1205 +}
1206 +#endif
1207 +
1208 /*
1209 * This routine handles page faults. It determines the address,
1210 * and the problem, and then passes it off to one of the appropriate
1211 diff -urNp linux-3.0.3/arch/mips/mm/mmap.c linux-3.0.3/arch/mips/mm/mmap.c
1212 --- linux-3.0.3/arch/mips/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
1213 +++ linux-3.0.3/arch/mips/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
1214 @@ -48,14 +48,18 @@ unsigned long arch_get_unmapped_area(str
1215 do_color_align = 0;
1216 if (filp || (flags & MAP_SHARED))
1217 do_color_align = 1;
1218 +
1219 +#ifdef CONFIG_PAX_RANDMMAP
1220 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1221 +#endif
1222 +
1223 if (addr) {
1224 if (do_color_align)
1225 addr = COLOUR_ALIGN(addr, pgoff);
1226 else
1227 addr = PAGE_ALIGN(addr);
1228 vmm = find_vma(current->mm, addr);
1229 - if (TASK_SIZE - len >= addr &&
1230 - (!vmm || addr + len <= vmm->vm_start))
1231 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1232 return addr;
1233 }
1234 addr = current->mm->mmap_base;
1235 @@ -68,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
1236 /* At this point: (!vmm || addr < vmm->vm_end). */
1237 if (TASK_SIZE - len < addr)
1238 return -ENOMEM;
1239 - if (!vmm || addr + len <= vmm->vm_start)
1240 + if (check_heap_stack_gap(vmm, addr, len))
1241 return addr;
1242 addr = vmm->vm_end;
1243 if (do_color_align)
1244 @@ -93,30 +97,3 @@ void arch_pick_mmap_layout(struct mm_str
1245 mm->get_unmapped_area = arch_get_unmapped_area;
1246 mm->unmap_area = arch_unmap_area;
1247 }
1248 -
1249 -static inline unsigned long brk_rnd(void)
1250 -{
1251 - unsigned long rnd = get_random_int();
1252 -
1253 - rnd = rnd << PAGE_SHIFT;
1254 - /* 8MB for 32bit, 256MB for 64bit */
1255 - if (TASK_IS_32BIT_ADDR)
1256 - rnd = rnd & 0x7ffffful;
1257 - else
1258 - rnd = rnd & 0xffffffful;
1259 -
1260 - return rnd;
1261 -}
1262 -
1263 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1264 -{
1265 - unsigned long base = mm->brk;
1266 - unsigned long ret;
1267 -
1268 - ret = PAGE_ALIGN(base + brk_rnd());
1269 -
1270 - if (ret < mm->brk)
1271 - return mm->brk;
1272 -
1273 - return ret;
1274 -}
1275 diff -urNp linux-3.0.3/arch/parisc/include/asm/elf.h linux-3.0.3/arch/parisc/include/asm/elf.h
1276 --- linux-3.0.3/arch/parisc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1277 +++ linux-3.0.3/arch/parisc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1278 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1279
1280 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1281
1282 +#ifdef CONFIG_PAX_ASLR
1283 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1284 +
1285 +#define PAX_DELTA_MMAP_LEN 16
1286 +#define PAX_DELTA_STACK_LEN 16
1287 +#endif
1288 +
1289 /* This yields a mask that user programs can use to figure out what
1290 instruction set this CPU supports. This could be done in user space,
1291 but it's not easy, and we've already done it here. */
1292 diff -urNp linux-3.0.3/arch/parisc/include/asm/pgtable.h linux-3.0.3/arch/parisc/include/asm/pgtable.h
1293 --- linux-3.0.3/arch/parisc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1294 +++ linux-3.0.3/arch/parisc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1295 @@ -210,6 +210,17 @@ struct vm_area_struct;
1296 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1297 #define PAGE_COPY PAGE_EXECREAD
1298 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1299 +
1300 +#ifdef CONFIG_PAX_PAGEEXEC
1301 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1302 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1303 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1304 +#else
1305 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1306 +# define PAGE_COPY_NOEXEC PAGE_COPY
1307 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1308 +#endif
1309 +
1310 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1311 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1312 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1313 diff -urNp linux-3.0.3/arch/parisc/kernel/module.c linux-3.0.3/arch/parisc/kernel/module.c
1314 --- linux-3.0.3/arch/parisc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
1315 +++ linux-3.0.3/arch/parisc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
1316 @@ -98,16 +98,38 @@
1317
1318 /* three functions to determine where in the module core
1319 * or init pieces the location is */
1320 +static inline int in_init_rx(struct module *me, void *loc)
1321 +{
1322 + return (loc >= me->module_init_rx &&
1323 + loc < (me->module_init_rx + me->init_size_rx));
1324 +}
1325 +
1326 +static inline int in_init_rw(struct module *me, void *loc)
1327 +{
1328 + return (loc >= me->module_init_rw &&
1329 + loc < (me->module_init_rw + me->init_size_rw));
1330 +}
1331 +
1332 static inline int in_init(struct module *me, void *loc)
1333 {
1334 - return (loc >= me->module_init &&
1335 - loc <= (me->module_init + me->init_size));
1336 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1337 +}
1338 +
1339 +static inline int in_core_rx(struct module *me, void *loc)
1340 +{
1341 + return (loc >= me->module_core_rx &&
1342 + loc < (me->module_core_rx + me->core_size_rx));
1343 +}
1344 +
1345 +static inline int in_core_rw(struct module *me, void *loc)
1346 +{
1347 + return (loc >= me->module_core_rw &&
1348 + loc < (me->module_core_rw + me->core_size_rw));
1349 }
1350
1351 static inline int in_core(struct module *me, void *loc)
1352 {
1353 - return (loc >= me->module_core &&
1354 - loc <= (me->module_core + me->core_size));
1355 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1356 }
1357
1358 static inline int in_local(struct module *me, void *loc)
1359 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_
1360 }
1361
1362 /* align things a bit */
1363 - me->core_size = ALIGN(me->core_size, 16);
1364 - me->arch.got_offset = me->core_size;
1365 - me->core_size += gots * sizeof(struct got_entry);
1366 -
1367 - me->core_size = ALIGN(me->core_size, 16);
1368 - me->arch.fdesc_offset = me->core_size;
1369 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1370 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1371 + me->arch.got_offset = me->core_size_rw;
1372 + me->core_size_rw += gots * sizeof(struct got_entry);
1373 +
1374 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1375 + me->arch.fdesc_offset = me->core_size_rw;
1376 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1377
1378 me->arch.got_max = gots;
1379 me->arch.fdesc_max = fdescs;
1380 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module
1381
1382 BUG_ON(value == 0);
1383
1384 - got = me->module_core + me->arch.got_offset;
1385 + got = me->module_core_rw + me->arch.got_offset;
1386 for (i = 0; got[i].addr; i++)
1387 if (got[i].addr == value)
1388 goto out;
1389 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module
1390 #ifdef CONFIG_64BIT
1391 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1392 {
1393 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1394 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1395
1396 if (!value) {
1397 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1398 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module
1399
1400 /* Create new one */
1401 fdesc->addr = value;
1402 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1403 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1404 return (Elf_Addr)fdesc;
1405 }
1406 #endif /* CONFIG_64BIT */
1407 @@ -857,7 +879,7 @@ register_unwind_table(struct module *me,
1408
1409 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1410 end = table + sechdrs[me->arch.unwind_section].sh_size;
1411 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1412 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1413
1414 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1415 me->arch.unwind_section, table, end, gp);
1416 diff -urNp linux-3.0.3/arch/parisc/kernel/sys_parisc.c linux-3.0.3/arch/parisc/kernel/sys_parisc.c
1417 --- linux-3.0.3/arch/parisc/kernel/sys_parisc.c 2011-07-21 22:17:23.000000000 -0400
1418 +++ linux-3.0.3/arch/parisc/kernel/sys_parisc.c 2011-08-23 21:47:55.000000000 -0400
1419 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1420 /* At this point: (!vma || addr < vma->vm_end). */
1421 if (TASK_SIZE - len < addr)
1422 return -ENOMEM;
1423 - if (!vma || addr + len <= vma->vm_start)
1424 + if (check_heap_stack_gap(vma, addr, len))
1425 return addr;
1426 addr = vma->vm_end;
1427 }
1428 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1429 /* At this point: (!vma || addr < vma->vm_end). */
1430 if (TASK_SIZE - len < addr)
1431 return -ENOMEM;
1432 - if (!vma || addr + len <= vma->vm_start)
1433 + if (check_heap_stack_gap(vma, addr, len))
1434 return addr;
1435 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1436 if (addr < vma->vm_end) /* handle wraparound */
1437 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1438 if (flags & MAP_FIXED)
1439 return addr;
1440 if (!addr)
1441 - addr = TASK_UNMAPPED_BASE;
1442 + addr = current->mm->mmap_base;
1443
1444 if (filp) {
1445 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1446 diff -urNp linux-3.0.3/arch/parisc/kernel/traps.c linux-3.0.3/arch/parisc/kernel/traps.c
1447 --- linux-3.0.3/arch/parisc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
1448 +++ linux-3.0.3/arch/parisc/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
1449 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1450
1451 down_read(&current->mm->mmap_sem);
1452 vma = find_vma(current->mm,regs->iaoq[0]);
1453 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1454 - && (vma->vm_flags & VM_EXEC)) {
1455 -
1456 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1457 fault_address = regs->iaoq[0];
1458 fault_space = regs->iasq[0];
1459
1460 diff -urNp linux-3.0.3/arch/parisc/mm/fault.c linux-3.0.3/arch/parisc/mm/fault.c
1461 --- linux-3.0.3/arch/parisc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1462 +++ linux-3.0.3/arch/parisc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1463 @@ -15,6 +15,7 @@
1464 #include <linux/sched.h>
1465 #include <linux/interrupt.h>
1466 #include <linux/module.h>
1467 +#include <linux/unistd.h>
1468
1469 #include <asm/uaccess.h>
1470 #include <asm/traps.h>
1471 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1472 static unsigned long
1473 parisc_acctyp(unsigned long code, unsigned int inst)
1474 {
1475 - if (code == 6 || code == 16)
1476 + if (code == 6 || code == 7 || code == 16)
1477 return VM_EXEC;
1478
1479 switch (inst & 0xf0000000) {
1480 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1481 }
1482 #endif
1483
1484 +#ifdef CONFIG_PAX_PAGEEXEC
1485 +/*
1486 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1487 + *
1488 + * returns 1 when task should be killed
1489 + * 2 when rt_sigreturn trampoline was detected
1490 + * 3 when unpatched PLT trampoline was detected
1491 + */
1492 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1493 +{
1494 +
1495 +#ifdef CONFIG_PAX_EMUPLT
1496 + int err;
1497 +
1498 + do { /* PaX: unpatched PLT emulation */
1499 + unsigned int bl, depwi;
1500 +
1501 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1502 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1503 +
1504 + if (err)
1505 + break;
1506 +
1507 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1508 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1509 +
1510 + err = get_user(ldw, (unsigned int *)addr);
1511 + err |= get_user(bv, (unsigned int *)(addr+4));
1512 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1513 +
1514 + if (err)
1515 + break;
1516 +
1517 + if (ldw == 0x0E801096U &&
1518 + bv == 0xEAC0C000U &&
1519 + ldw2 == 0x0E881095U)
1520 + {
1521 + unsigned int resolver, map;
1522 +
1523 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1524 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1525 + if (err)
1526 + break;
1527 +
1528 + regs->gr[20] = instruction_pointer(regs)+8;
1529 + regs->gr[21] = map;
1530 + regs->gr[22] = resolver;
1531 + regs->iaoq[0] = resolver | 3UL;
1532 + regs->iaoq[1] = regs->iaoq[0] + 4;
1533 + return 3;
1534 + }
1535 + }
1536 + } while (0);
1537 +#endif
1538 +
1539 +#ifdef CONFIG_PAX_EMUTRAMP
1540 +
1541 +#ifndef CONFIG_PAX_EMUSIGRT
1542 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1543 + return 1;
1544 +#endif
1545 +
1546 + do { /* PaX: rt_sigreturn emulation */
1547 + unsigned int ldi1, ldi2, bel, nop;
1548 +
1549 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1550 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1551 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1552 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1553 +
1554 + if (err)
1555 + break;
1556 +
1557 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1558 + ldi2 == 0x3414015AU &&
1559 + bel == 0xE4008200U &&
1560 + nop == 0x08000240U)
1561 + {
1562 + regs->gr[25] = (ldi1 & 2) >> 1;
1563 + regs->gr[20] = __NR_rt_sigreturn;
1564 + regs->gr[31] = regs->iaoq[1] + 16;
1565 + regs->sr[0] = regs->iasq[1];
1566 + regs->iaoq[0] = 0x100UL;
1567 + regs->iaoq[1] = regs->iaoq[0] + 4;
1568 + regs->iasq[0] = regs->sr[2];
1569 + regs->iasq[1] = regs->sr[2];
1570 + return 2;
1571 + }
1572 + } while (0);
1573 +#endif
1574 +
1575 + return 1;
1576 +}
1577 +
1578 +void pax_report_insns(void *pc, void *sp)
1579 +{
1580 + unsigned long i;
1581 +
1582 + printk(KERN_ERR "PAX: bytes at PC: ");
1583 + for (i = 0; i < 5; i++) {
1584 + unsigned int c;
1585 + if (get_user(c, (unsigned int *)pc+i))
1586 + printk(KERN_CONT "???????? ");
1587 + else
1588 + printk(KERN_CONT "%08x ", c);
1589 + }
1590 + printk("\n");
1591 +}
1592 +#endif
1593 +
1594 int fixup_exception(struct pt_regs *regs)
1595 {
1596 const struct exception_table_entry *fix;
1597 @@ -192,8 +303,33 @@ good_area:
1598
1599 acc_type = parisc_acctyp(code,regs->iir);
1600
1601 - if ((vma->vm_flags & acc_type) != acc_type)
1602 + if ((vma->vm_flags & acc_type) != acc_type) {
1603 +
1604 +#ifdef CONFIG_PAX_PAGEEXEC
1605 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1606 + (address & ~3UL) == instruction_pointer(regs))
1607 + {
1608 + up_read(&mm->mmap_sem);
1609 + switch (pax_handle_fetch_fault(regs)) {
1610 +
1611 +#ifdef CONFIG_PAX_EMUPLT
1612 + case 3:
1613 + return;
1614 +#endif
1615 +
1616 +#ifdef CONFIG_PAX_EMUTRAMP
1617 + case 2:
1618 + return;
1619 +#endif
1620 +
1621 + }
1622 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1623 + do_group_exit(SIGKILL);
1624 + }
1625 +#endif
1626 +
1627 goto bad_area;
1628 + }
1629
1630 /*
1631 * If for any reason at all we couldn't handle the fault, make
1632 diff -urNp linux-3.0.3/arch/powerpc/include/asm/elf.h linux-3.0.3/arch/powerpc/include/asm/elf.h
1633 --- linux-3.0.3/arch/powerpc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1634 +++ linux-3.0.3/arch/powerpc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1635 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1636 the loader. We need to make sure that it is out of the way of the program
1637 that it will "exec", and that there is sufficient room for the brk. */
1638
1639 -extern unsigned long randomize_et_dyn(unsigned long base);
1640 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1641 +#define ELF_ET_DYN_BASE (0x20000000)
1642 +
1643 +#ifdef CONFIG_PAX_ASLR
1644 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1645 +
1646 +#ifdef __powerpc64__
1647 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
1648 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
1649 +#else
1650 +#define PAX_DELTA_MMAP_LEN 15
1651 +#define PAX_DELTA_STACK_LEN 15
1652 +#endif
1653 +#endif
1654
1655 /*
1656 * Our registers are always unsigned longs, whether we're a 32 bit
1657 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1658 (0x7ff >> (PAGE_SHIFT - 12)) : \
1659 (0x3ffff >> (PAGE_SHIFT - 12)))
1660
1661 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1662 -#define arch_randomize_brk arch_randomize_brk
1663 -
1664 #endif /* __KERNEL__ */
1665
1666 /*
1667 diff -urNp linux-3.0.3/arch/powerpc/include/asm/kmap_types.h linux-3.0.3/arch/powerpc/include/asm/kmap_types.h
1668 --- linux-3.0.3/arch/powerpc/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
1669 +++ linux-3.0.3/arch/powerpc/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
1670 @@ -27,6 +27,7 @@ enum km_type {
1671 KM_PPC_SYNC_PAGE,
1672 KM_PPC_SYNC_ICACHE,
1673 KM_KDB,
1674 + KM_CLEARPAGE,
1675 KM_TYPE_NR
1676 };
1677
1678 diff -urNp linux-3.0.3/arch/powerpc/include/asm/mman.h linux-3.0.3/arch/powerpc/include/asm/mman.h
1679 --- linux-3.0.3/arch/powerpc/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
1680 +++ linux-3.0.3/arch/powerpc/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
1681 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm
1682 }
1683 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
1684
1685 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
1686 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
1687 {
1688 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
1689 }
1690 diff -urNp linux-3.0.3/arch/powerpc/include/asm/page_64.h linux-3.0.3/arch/powerpc/include/asm/page_64.h
1691 --- linux-3.0.3/arch/powerpc/include/asm/page_64.h 2011-07-21 22:17:23.000000000 -0400
1692 +++ linux-3.0.3/arch/powerpc/include/asm/page_64.h 2011-08-23 21:47:55.000000000 -0400
1693 @@ -155,15 +155,18 @@ do { \
1694 * stack by default, so in the absence of a PT_GNU_STACK program header
1695 * we turn execute permission off.
1696 */
1697 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1698 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1699 +#define VM_STACK_DEFAULT_FLAGS32 \
1700 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1701 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1702
1703 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1704 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1705
1706 +#ifndef CONFIG_PAX_PAGEEXEC
1707 #define VM_STACK_DEFAULT_FLAGS \
1708 (is_32bit_task() ? \
1709 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1710 +#endif
1711
1712 #include <asm-generic/getorder.h>
1713
1714 diff -urNp linux-3.0.3/arch/powerpc/include/asm/page.h linux-3.0.3/arch/powerpc/include/asm/page.h
1715 --- linux-3.0.3/arch/powerpc/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1716 +++ linux-3.0.3/arch/powerpc/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1717 @@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
1718 * and needs to be executable. This means the whole heap ends
1719 * up being executable.
1720 */
1721 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1722 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1723 +#define VM_DATA_DEFAULT_FLAGS32 \
1724 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1725 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1726
1727 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1728 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1729 @@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
1730 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1731 #endif
1732
1733 +#define ktla_ktva(addr) (addr)
1734 +#define ktva_ktla(addr) (addr)
1735 +
1736 #ifndef __ASSEMBLY__
1737
1738 #undef STRICT_MM_TYPECHECKS
1739 diff -urNp linux-3.0.3/arch/powerpc/include/asm/pgtable.h linux-3.0.3/arch/powerpc/include/asm/pgtable.h
1740 --- linux-3.0.3/arch/powerpc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1741 +++ linux-3.0.3/arch/powerpc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1742 @@ -2,6 +2,7 @@
1743 #define _ASM_POWERPC_PGTABLE_H
1744 #ifdef __KERNEL__
1745
1746 +#include <linux/const.h>
1747 #ifndef __ASSEMBLY__
1748 #include <asm/processor.h> /* For TASK_SIZE */
1749 #include <asm/mmu.h>
1750 diff -urNp linux-3.0.3/arch/powerpc/include/asm/pte-hash32.h linux-3.0.3/arch/powerpc/include/asm/pte-hash32.h
1751 --- linux-3.0.3/arch/powerpc/include/asm/pte-hash32.h 2011-07-21 22:17:23.000000000 -0400
1752 +++ linux-3.0.3/arch/powerpc/include/asm/pte-hash32.h 2011-08-23 21:47:55.000000000 -0400
1753 @@ -21,6 +21,7 @@
1754 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
1755 #define _PAGE_USER 0x004 /* usermode access allowed */
1756 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
1757 +#define _PAGE_EXEC _PAGE_GUARDED
1758 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
1759 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
1760 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
1761 diff -urNp linux-3.0.3/arch/powerpc/include/asm/reg.h linux-3.0.3/arch/powerpc/include/asm/reg.h
1762 --- linux-3.0.3/arch/powerpc/include/asm/reg.h 2011-07-21 22:17:23.000000000 -0400
1763 +++ linux-3.0.3/arch/powerpc/include/asm/reg.h 2011-08-23 21:47:55.000000000 -0400
1764 @@ -209,6 +209,7 @@
1765 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
1766 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
1767 #define DSISR_NOHPTE 0x40000000 /* no translation found */
1768 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
1769 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
1770 #define DSISR_ISSTORE 0x02000000 /* access was a store */
1771 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
1772 diff -urNp linux-3.0.3/arch/powerpc/include/asm/system.h linux-3.0.3/arch/powerpc/include/asm/system.h
1773 --- linux-3.0.3/arch/powerpc/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1774 +++ linux-3.0.3/arch/powerpc/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1775 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
1776 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1777 #endif
1778
1779 -extern unsigned long arch_align_stack(unsigned long sp);
1780 +#define arch_align_stack(x) ((x) & ~0xfUL)
1781
1782 /* Used in very early kernel initialization. */
1783 extern unsigned long reloc_offset(void);
1784 diff -urNp linux-3.0.3/arch/powerpc/include/asm/uaccess.h linux-3.0.3/arch/powerpc/include/asm/uaccess.h
1785 --- linux-3.0.3/arch/powerpc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
1786 +++ linux-3.0.3/arch/powerpc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
1787 @@ -13,6 +13,8 @@
1788 #define VERIFY_READ 0
1789 #define VERIFY_WRITE 1
1790
1791 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
1792 +
1793 /*
1794 * The fs value determines whether argument validity checking should be
1795 * performed or not. If get_fs() == USER_DS, checking is performed, with
1796 @@ -327,52 +329,6 @@ do { \
1797 extern unsigned long __copy_tofrom_user(void __user *to,
1798 const void __user *from, unsigned long size);
1799
1800 -#ifndef __powerpc64__
1801 -
1802 -static inline unsigned long copy_from_user(void *to,
1803 - const void __user *from, unsigned long n)
1804 -{
1805 - unsigned long over;
1806 -
1807 - if (access_ok(VERIFY_READ, from, n))
1808 - return __copy_tofrom_user((__force void __user *)to, from, n);
1809 - if ((unsigned long)from < TASK_SIZE) {
1810 - over = (unsigned long)from + n - TASK_SIZE;
1811 - return __copy_tofrom_user((__force void __user *)to, from,
1812 - n - over) + over;
1813 - }
1814 - return n;
1815 -}
1816 -
1817 -static inline unsigned long copy_to_user(void __user *to,
1818 - const void *from, unsigned long n)
1819 -{
1820 - unsigned long over;
1821 -
1822 - if (access_ok(VERIFY_WRITE, to, n))
1823 - return __copy_tofrom_user(to, (__force void __user *)from, n);
1824 - if ((unsigned long)to < TASK_SIZE) {
1825 - over = (unsigned long)to + n - TASK_SIZE;
1826 - return __copy_tofrom_user(to, (__force void __user *)from,
1827 - n - over) + over;
1828 - }
1829 - return n;
1830 -}
1831 -
1832 -#else /* __powerpc64__ */
1833 -
1834 -#define __copy_in_user(to, from, size) \
1835 - __copy_tofrom_user((to), (from), (size))
1836 -
1837 -extern unsigned long copy_from_user(void *to, const void __user *from,
1838 - unsigned long n);
1839 -extern unsigned long copy_to_user(void __user *to, const void *from,
1840 - unsigned long n);
1841 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
1842 - unsigned long n);
1843 -
1844 -#endif /* __powerpc64__ */
1845 -
1846 static inline unsigned long __copy_from_user_inatomic(void *to,
1847 const void __user *from, unsigned long n)
1848 {
1849 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
1850 if (ret == 0)
1851 return 0;
1852 }
1853 +
1854 + if (!__builtin_constant_p(n))
1855 + check_object_size(to, n, false);
1856 +
1857 return __copy_tofrom_user((__force void __user *)to, from, n);
1858 }
1859
1860 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
1861 if (ret == 0)
1862 return 0;
1863 }
1864 +
1865 + if (!__builtin_constant_p(n))
1866 + check_object_size(from, n, true);
1867 +
1868 return __copy_tofrom_user(to, (__force const void __user *)from, n);
1869 }
1870
1871 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
1872 return __copy_to_user_inatomic(to, from, size);
1873 }
1874
1875 +#ifndef __powerpc64__
1876 +
1877 +static inline unsigned long __must_check copy_from_user(void *to,
1878 + const void __user *from, unsigned long n)
1879 +{
1880 + unsigned long over;
1881 +
1882 + if ((long)n < 0)
1883 + return n;
1884 +
1885 + if (access_ok(VERIFY_READ, from, n)) {
1886 + if (!__builtin_constant_p(n))
1887 + check_object_size(to, n, false);
1888 + return __copy_tofrom_user((__force void __user *)to, from, n);
1889 + }
1890 + if ((unsigned long)from < TASK_SIZE) {
1891 + over = (unsigned long)from + n - TASK_SIZE;
1892 + if (!__builtin_constant_p(n - over))
1893 + check_object_size(to, n - over, false);
1894 + return __copy_tofrom_user((__force void __user *)to, from,
1895 + n - over) + over;
1896 + }
1897 + return n;
1898 +}
1899 +
1900 +static inline unsigned long __must_check copy_to_user(void __user *to,
1901 + const void *from, unsigned long n)
1902 +{
1903 + unsigned long over;
1904 +
1905 + if ((long)n < 0)
1906 + return n;
1907 +
1908 + if (access_ok(VERIFY_WRITE, to, n)) {
1909 + if (!__builtin_constant_p(n))
1910 + check_object_size(from, n, true);
1911 + return __copy_tofrom_user(to, (__force void __user *)from, n);
1912 + }
1913 + if ((unsigned long)to < TASK_SIZE) {
1914 + over = (unsigned long)to + n - TASK_SIZE;
1915 + if (!__builtin_constant_p(n))
1916 + check_object_size(from, n - over, true);
1917 + return __copy_tofrom_user(to, (__force void __user *)from,
1918 + n - over) + over;
1919 + }
1920 + return n;
1921 +}
1922 +
1923 +#else /* __powerpc64__ */
1924 +
1925 +#define __copy_in_user(to, from, size) \
1926 + __copy_tofrom_user((to), (from), (size))
1927 +
1928 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1929 +{
1930 + if ((long)n < 0 || n > INT_MAX)
1931 + return n;
1932 +
1933 + if (!__builtin_constant_p(n))
1934 + check_object_size(to, n, false);
1935 +
1936 + if (likely(access_ok(VERIFY_READ, from, n)))
1937 + n = __copy_from_user(to, from, n);
1938 + else
1939 + memset(to, 0, n);
1940 + return n;
1941 +}
1942 +
1943 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1944 +{
1945 + if ((long)n < 0 || n > INT_MAX)
1946 + return n;
1947 +
1948 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
1949 + if (!__builtin_constant_p(n))
1950 + check_object_size(from, n, true);
1951 + n = __copy_to_user(to, from, n);
1952 + }
1953 + return n;
1954 +}
1955 +
1956 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
1957 + unsigned long n);
1958 +
1959 +#endif /* __powerpc64__ */
1960 +
1961 extern unsigned long __clear_user(void __user *addr, unsigned long size);
1962
1963 static inline unsigned long clear_user(void __user *addr, unsigned long size)
1964 diff -urNp linux-3.0.3/arch/powerpc/kernel/exceptions-64e.S linux-3.0.3/arch/powerpc/kernel/exceptions-64e.S
1965 --- linux-3.0.3/arch/powerpc/kernel/exceptions-64e.S 2011-07-21 22:17:23.000000000 -0400
1966 +++ linux-3.0.3/arch/powerpc/kernel/exceptions-64e.S 2011-08-23 21:47:55.000000000 -0400
1967 @@ -567,6 +567,7 @@ storage_fault_common:
1968 std r14,_DAR(r1)
1969 std r15,_DSISR(r1)
1970 addi r3,r1,STACK_FRAME_OVERHEAD
1971 + bl .save_nvgprs
1972 mr r4,r14
1973 mr r5,r15
1974 ld r14,PACA_EXGEN+EX_R14(r13)
1975 @@ -576,8 +577,7 @@ storage_fault_common:
1976 cmpdi r3,0
1977 bne- 1f
1978 b .ret_from_except_lite
1979 -1: bl .save_nvgprs
1980 - mr r5,r3
1981 +1: mr r5,r3
1982 addi r3,r1,STACK_FRAME_OVERHEAD
1983 ld r4,_DAR(r1)
1984 bl .bad_page_fault
1985 diff -urNp linux-3.0.3/arch/powerpc/kernel/exceptions-64s.S linux-3.0.3/arch/powerpc/kernel/exceptions-64s.S
1986 --- linux-3.0.3/arch/powerpc/kernel/exceptions-64s.S 2011-07-21 22:17:23.000000000 -0400
1987 +++ linux-3.0.3/arch/powerpc/kernel/exceptions-64s.S 2011-08-23 21:47:55.000000000 -0400
1988 @@ -956,10 +956,10 @@ handle_page_fault:
1989 11: ld r4,_DAR(r1)
1990 ld r5,_DSISR(r1)
1991 addi r3,r1,STACK_FRAME_OVERHEAD
1992 + bl .save_nvgprs
1993 bl .do_page_fault
1994 cmpdi r3,0
1995 beq+ 13f
1996 - bl .save_nvgprs
1997 mr r5,r3
1998 addi r3,r1,STACK_FRAME_OVERHEAD
1999 lwz r4,_DAR(r1)
2000 diff -urNp linux-3.0.3/arch/powerpc/kernel/module_32.c linux-3.0.3/arch/powerpc/kernel/module_32.c
2001 --- linux-3.0.3/arch/powerpc/kernel/module_32.c 2011-07-21 22:17:23.000000000 -0400
2002 +++ linux-3.0.3/arch/powerpc/kernel/module_32.c 2011-08-23 21:47:55.000000000 -0400
2003 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2004 me->arch.core_plt_section = i;
2005 }
2006 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2007 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2008 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2009 return -ENOEXEC;
2010 }
2011
2012 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2013
2014 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2015 /* Init, or core PLT? */
2016 - if (location >= mod->module_core
2017 - && location < mod->module_core + mod->core_size)
2018 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2019 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2020 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2021 - else
2022 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2023 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2024 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2025 + else {
2026 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2027 + return ~0UL;
2028 + }
2029
2030 /* Find this entry, or if that fails, the next avail. entry */
2031 while (entry->jump[0]) {
2032 diff -urNp linux-3.0.3/arch/powerpc/kernel/module.c linux-3.0.3/arch/powerpc/kernel/module.c
2033 --- linux-3.0.3/arch/powerpc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2034 +++ linux-3.0.3/arch/powerpc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2035 @@ -31,11 +31,24 @@
2036
2037 LIST_HEAD(module_bug_list);
2038
2039 +#ifdef CONFIG_PAX_KERNEXEC
2040 void *module_alloc(unsigned long size)
2041 {
2042 if (size == 0)
2043 return NULL;
2044
2045 + return vmalloc(size);
2046 +}
2047 +
2048 +void *module_alloc_exec(unsigned long size)
2049 +#else
2050 +void *module_alloc(unsigned long size)
2051 +#endif
2052 +
2053 +{
2054 + if (size == 0)
2055 + return NULL;
2056 +
2057 return vmalloc_exec(size);
2058 }
2059
2060 @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2061 vfree(module_region);
2062 }
2063
2064 +#ifdef CONFIG_PAX_KERNEXEC
2065 +void module_free_exec(struct module *mod, void *module_region)
2066 +{
2067 + module_free(mod, module_region);
2068 +}
2069 +#endif
2070 +
2071 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2072 const Elf_Shdr *sechdrs,
2073 const char *name)
2074 diff -urNp linux-3.0.3/arch/powerpc/kernel/process.c linux-3.0.3/arch/powerpc/kernel/process.c
2075 --- linux-3.0.3/arch/powerpc/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2076 +++ linux-3.0.3/arch/powerpc/kernel/process.c 2011-08-23 21:48:14.000000000 -0400
2077 @@ -676,8 +676,8 @@ void show_regs(struct pt_regs * regs)
2078 * Lookup NIP late so we have the best change of getting the
2079 * above info out without failing
2080 */
2081 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2082 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2083 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2084 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2085 #endif
2086 show_stack(current, (unsigned long *) regs->gpr[1]);
2087 if (!user_mode(regs))
2088 @@ -1183,10 +1183,10 @@ void show_stack(struct task_struct *tsk,
2089 newsp = stack[0];
2090 ip = stack[STACK_FRAME_LR_SAVE];
2091 if (!firstframe || ip != lr) {
2092 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2093 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2094 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2095 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2096 - printk(" (%pS)",
2097 + printk(" (%pA)",
2098 (void *)current->ret_stack[curr_frame].ret);
2099 curr_frame--;
2100 }
2101 @@ -1206,7 +1206,7 @@ void show_stack(struct task_struct *tsk,
2102 struct pt_regs *regs = (struct pt_regs *)
2103 (sp + STACK_FRAME_OVERHEAD);
2104 lr = regs->link;
2105 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2106 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2107 regs->trap, (void *)regs->nip, (void *)lr);
2108 firstframe = 1;
2109 }
2110 @@ -1281,58 +1281,3 @@ void thread_info_cache_init(void)
2111 }
2112
2113 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2114 -
2115 -unsigned long arch_align_stack(unsigned long sp)
2116 -{
2117 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2118 - sp -= get_random_int() & ~PAGE_MASK;
2119 - return sp & ~0xf;
2120 -}
2121 -
2122 -static inline unsigned long brk_rnd(void)
2123 -{
2124 - unsigned long rnd = 0;
2125 -
2126 - /* 8MB for 32bit, 1GB for 64bit */
2127 - if (is_32bit_task())
2128 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2129 - else
2130 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2131 -
2132 - return rnd << PAGE_SHIFT;
2133 -}
2134 -
2135 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2136 -{
2137 - unsigned long base = mm->brk;
2138 - unsigned long ret;
2139 -
2140 -#ifdef CONFIG_PPC_STD_MMU_64
2141 - /*
2142 - * If we are using 1TB segments and we are allowed to randomise
2143 - * the heap, we can put it above 1TB so it is backed by a 1TB
2144 - * segment. Otherwise the heap will be in the bottom 1TB
2145 - * which always uses 256MB segments and this may result in a
2146 - * performance penalty.
2147 - */
2148 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2149 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2150 -#endif
2151 -
2152 - ret = PAGE_ALIGN(base + brk_rnd());
2153 -
2154 - if (ret < mm->brk)
2155 - return mm->brk;
2156 -
2157 - return ret;
2158 -}
2159 -
2160 -unsigned long randomize_et_dyn(unsigned long base)
2161 -{
2162 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2163 -
2164 - if (ret < base)
2165 - return base;
2166 -
2167 - return ret;
2168 -}
2169 diff -urNp linux-3.0.3/arch/powerpc/kernel/signal_32.c linux-3.0.3/arch/powerpc/kernel/signal_32.c
2170 --- linux-3.0.3/arch/powerpc/kernel/signal_32.c 2011-07-21 22:17:23.000000000 -0400
2171 +++ linux-3.0.3/arch/powerpc/kernel/signal_32.c 2011-08-23 21:47:55.000000000 -0400
2172 @@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig
2173 /* Save user registers on the stack */
2174 frame = &rt_sf->uc.uc_mcontext;
2175 addr = frame;
2176 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2177 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2178 if (save_user_regs(regs, frame, 0, 1))
2179 goto badframe;
2180 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2181 diff -urNp linux-3.0.3/arch/powerpc/kernel/signal_64.c linux-3.0.3/arch/powerpc/kernel/signal_64.c
2182 --- linux-3.0.3/arch/powerpc/kernel/signal_64.c 2011-07-21 22:17:23.000000000 -0400
2183 +++ linux-3.0.3/arch/powerpc/kernel/signal_64.c 2011-08-23 21:47:55.000000000 -0400
2184 @@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct
2185 current->thread.fpscr.val = 0;
2186
2187 /* Set up to return from userspace. */
2188 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2189 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2190 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2191 } else {
2192 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2193 diff -urNp linux-3.0.3/arch/powerpc/kernel/traps.c linux-3.0.3/arch/powerpc/kernel/traps.c
2194 --- linux-3.0.3/arch/powerpc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
2195 +++ linux-3.0.3/arch/powerpc/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
2196 @@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2197 static inline void pmac_backlight_unblank(void) { }
2198 #endif
2199
2200 +extern void gr_handle_kernel_exploit(void);
2201 +
2202 int die(const char *str, struct pt_regs *regs, long err)
2203 {
2204 static struct {
2205 @@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs
2206 if (panic_on_oops)
2207 panic("Fatal exception");
2208
2209 + gr_handle_kernel_exploit();
2210 +
2211 oops_exit();
2212 do_exit(err);
2213
2214 diff -urNp linux-3.0.3/arch/powerpc/kernel/vdso.c linux-3.0.3/arch/powerpc/kernel/vdso.c
2215 --- linux-3.0.3/arch/powerpc/kernel/vdso.c 2011-07-21 22:17:23.000000000 -0400
2216 +++ linux-3.0.3/arch/powerpc/kernel/vdso.c 2011-08-23 21:47:55.000000000 -0400
2217 @@ -36,6 +36,7 @@
2218 #include <asm/firmware.h>
2219 #include <asm/vdso.h>
2220 #include <asm/vdso_datapage.h>
2221 +#include <asm/mman.h>
2222
2223 #include "setup.h"
2224
2225 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2226 vdso_base = VDSO32_MBASE;
2227 #endif
2228
2229 - current->mm->context.vdso_base = 0;
2230 + current->mm->context.vdso_base = ~0UL;
2231
2232 /* vDSO has a problem and was disabled, just don't "enable" it for the
2233 * process
2234 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2235 vdso_base = get_unmapped_area(NULL, vdso_base,
2236 (vdso_pages << PAGE_SHIFT) +
2237 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2238 - 0, 0);
2239 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2240 if (IS_ERR_VALUE(vdso_base)) {
2241 rc = vdso_base;
2242 goto fail_mmapsem;
2243 diff -urNp linux-3.0.3/arch/powerpc/lib/usercopy_64.c linux-3.0.3/arch/powerpc/lib/usercopy_64.c
2244 --- linux-3.0.3/arch/powerpc/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
2245 +++ linux-3.0.3/arch/powerpc/lib/usercopy_64.c 2011-08-23 21:47:55.000000000 -0400
2246 @@ -9,22 +9,6 @@
2247 #include <linux/module.h>
2248 #include <asm/uaccess.h>
2249
2250 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2251 -{
2252 - if (likely(access_ok(VERIFY_READ, from, n)))
2253 - n = __copy_from_user(to, from, n);
2254 - else
2255 - memset(to, 0, n);
2256 - return n;
2257 -}
2258 -
2259 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2260 -{
2261 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2262 - n = __copy_to_user(to, from, n);
2263 - return n;
2264 -}
2265 -
2266 unsigned long copy_in_user(void __user *to, const void __user *from,
2267 unsigned long n)
2268 {
2269 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2270 return n;
2271 }
2272
2273 -EXPORT_SYMBOL(copy_from_user);
2274 -EXPORT_SYMBOL(copy_to_user);
2275 EXPORT_SYMBOL(copy_in_user);
2276
2277 diff -urNp linux-3.0.3/arch/powerpc/mm/fault.c linux-3.0.3/arch/powerpc/mm/fault.c
2278 --- linux-3.0.3/arch/powerpc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
2279 +++ linux-3.0.3/arch/powerpc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
2280 @@ -32,6 +32,10 @@
2281 #include <linux/perf_event.h>
2282 #include <linux/magic.h>
2283 #include <linux/ratelimit.h>
2284 +#include <linux/slab.h>
2285 +#include <linux/pagemap.h>
2286 +#include <linux/compiler.h>
2287 +#include <linux/unistd.h>
2288
2289 #include <asm/firmware.h>
2290 #include <asm/page.h>
2291 @@ -43,6 +47,7 @@
2292 #include <asm/tlbflush.h>
2293 #include <asm/siginfo.h>
2294 #include <mm/mmu_decl.h>
2295 +#include <asm/ptrace.h>
2296
2297 #ifdef CONFIG_KPROBES
2298 static inline int notify_page_fault(struct pt_regs *regs)
2299 @@ -66,6 +71,33 @@ static inline int notify_page_fault(stru
2300 }
2301 #endif
2302
2303 +#ifdef CONFIG_PAX_PAGEEXEC
2304 +/*
2305 + * PaX: decide what to do with offenders (regs->nip = fault address)
2306 + *
2307 + * returns 1 when task should be killed
2308 + */
2309 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2310 +{
2311 + return 1;
2312 +}
2313 +
2314 +void pax_report_insns(void *pc, void *sp)
2315 +{
2316 + unsigned long i;
2317 +
2318 + printk(KERN_ERR "PAX: bytes at PC: ");
2319 + for (i = 0; i < 5; i++) {
2320 + unsigned int c;
2321 + if (get_user(c, (unsigned int __user *)pc+i))
2322 + printk(KERN_CONT "???????? ");
2323 + else
2324 + printk(KERN_CONT "%08x ", c);
2325 + }
2326 + printk("\n");
2327 +}
2328 +#endif
2329 +
2330 /*
2331 * Check whether the instruction at regs->nip is a store using
2332 * an update addressing form which will update r1.
2333 @@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_re
2334 * indicate errors in DSISR but can validly be set in SRR1.
2335 */
2336 if (trap == 0x400)
2337 - error_code &= 0x48200000;
2338 + error_code &= 0x58200000;
2339 else
2340 is_write = error_code & DSISR_ISSTORE;
2341 #else
2342 @@ -259,7 +291,7 @@ good_area:
2343 * "undefined". Of those that can be set, this is the only
2344 * one which seems bad.
2345 */
2346 - if (error_code & 0x10000000)
2347 + if (error_code & DSISR_GUARDED)
2348 /* Guarded storage error. */
2349 goto bad_area;
2350 #endif /* CONFIG_8xx */
2351 @@ -274,7 +306,7 @@ good_area:
2352 * processors use the same I/D cache coherency mechanism
2353 * as embedded.
2354 */
2355 - if (error_code & DSISR_PROTFAULT)
2356 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2357 goto bad_area;
2358 #endif /* CONFIG_PPC_STD_MMU */
2359
2360 @@ -343,6 +375,23 @@ bad_area:
2361 bad_area_nosemaphore:
2362 /* User mode accesses cause a SIGSEGV */
2363 if (user_mode(regs)) {
2364 +
2365 +#ifdef CONFIG_PAX_PAGEEXEC
2366 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2367 +#ifdef CONFIG_PPC_STD_MMU
2368 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2369 +#else
2370 + if (is_exec && regs->nip == address) {
2371 +#endif
2372 + switch (pax_handle_fetch_fault(regs)) {
2373 + }
2374 +
2375 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2376 + do_group_exit(SIGKILL);
2377 + }
2378 + }
2379 +#endif
2380 +
2381 _exception(SIGSEGV, regs, code, address);
2382 return 0;
2383 }
2384 diff -urNp linux-3.0.3/arch/powerpc/mm/mmap_64.c linux-3.0.3/arch/powerpc/mm/mmap_64.c
2385 --- linux-3.0.3/arch/powerpc/mm/mmap_64.c 2011-07-21 22:17:23.000000000 -0400
2386 +++ linux-3.0.3/arch/powerpc/mm/mmap_64.c 2011-08-23 21:47:55.000000000 -0400
2387 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2388 */
2389 if (mmap_is_legacy()) {
2390 mm->mmap_base = TASK_UNMAPPED_BASE;
2391 +
2392 +#ifdef CONFIG_PAX_RANDMMAP
2393 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2394 + mm->mmap_base += mm->delta_mmap;
2395 +#endif
2396 +
2397 mm->get_unmapped_area = arch_get_unmapped_area;
2398 mm->unmap_area = arch_unmap_area;
2399 } else {
2400 mm->mmap_base = mmap_base();
2401 +
2402 +#ifdef CONFIG_PAX_RANDMMAP
2403 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2404 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2405 +#endif
2406 +
2407 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2408 mm->unmap_area = arch_unmap_area_topdown;
2409 }
2410 diff -urNp linux-3.0.3/arch/powerpc/mm/slice.c linux-3.0.3/arch/powerpc/mm/slice.c
2411 --- linux-3.0.3/arch/powerpc/mm/slice.c 2011-07-21 22:17:23.000000000 -0400
2412 +++ linux-3.0.3/arch/powerpc/mm/slice.c 2011-08-23 21:47:55.000000000 -0400
2413 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2414 if ((mm->task_size - len) < addr)
2415 return 0;
2416 vma = find_vma(mm, addr);
2417 - return (!vma || (addr + len) <= vma->vm_start);
2418 + return check_heap_stack_gap(vma, addr, len);
2419 }
2420
2421 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2422 @@ -256,7 +256,7 @@ full_search:
2423 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2424 continue;
2425 }
2426 - if (!vma || addr + len <= vma->vm_start) {
2427 + if (check_heap_stack_gap(vma, addr, len)) {
2428 /*
2429 * Remember the place where we stopped the search:
2430 */
2431 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2432 }
2433 }
2434
2435 - addr = mm->mmap_base;
2436 - while (addr > len) {
2437 + if (mm->mmap_base < len)
2438 + addr = -ENOMEM;
2439 + else
2440 + addr = mm->mmap_base - len;
2441 +
2442 + while (!IS_ERR_VALUE(addr)) {
2443 /* Go down by chunk size */
2444 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2445 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
2446
2447 /* Check for hit with different page size */
2448 mask = slice_range_to_mask(addr, len);
2449 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2450 * return with success:
2451 */
2452 vma = find_vma(mm, addr);
2453 - if (!vma || (addr + len) <= vma->vm_start) {
2454 + if (check_heap_stack_gap(vma, addr, len)) {
2455 /* remember the address as a hint for next time */
2456 if (use_cache)
2457 mm->free_area_cache = addr;
2458 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2459 mm->cached_hole_size = vma->vm_start - addr;
2460
2461 /* try just below the current vma->vm_start */
2462 - addr = vma->vm_start;
2463 + addr = skip_heap_stack_gap(vma, len);
2464 }
2465
2466 /*
2467 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2468 if (fixed && addr > (mm->task_size - len))
2469 return -EINVAL;
2470
2471 +#ifdef CONFIG_PAX_RANDMMAP
2472 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2473 + addr = 0;
2474 +#endif
2475 +
2476 /* If hint, make sure it matches our alignment restrictions */
2477 if (!fixed && addr) {
2478 addr = _ALIGN_UP(addr, 1ul << pshift);
2479 diff -urNp linux-3.0.3/arch/s390/include/asm/elf.h linux-3.0.3/arch/s390/include/asm/elf.h
2480 --- linux-3.0.3/arch/s390/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
2481 +++ linux-3.0.3/arch/s390/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
2482 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2483 the loader. We need to make sure that it is out of the way of the program
2484 that it will "exec", and that there is sufficient room for the brk. */
2485
2486 -extern unsigned long randomize_et_dyn(unsigned long base);
2487 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2488 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2489 +
2490 +#ifdef CONFIG_PAX_ASLR
2491 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2492 +
2493 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2494 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2495 +#endif
2496
2497 /* This yields a mask that user programs can use to figure out what
2498 instruction set this CPU supports. */
2499 @@ -210,7 +216,4 @@ struct linux_binprm;
2500 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2501 int arch_setup_additional_pages(struct linux_binprm *, int);
2502
2503 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2504 -#define arch_randomize_brk arch_randomize_brk
2505 -
2506 #endif
2507 diff -urNp linux-3.0.3/arch/s390/include/asm/system.h linux-3.0.3/arch/s390/include/asm/system.h
2508 --- linux-3.0.3/arch/s390/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2509 +++ linux-3.0.3/arch/s390/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2510 @@ -255,7 +255,7 @@ extern void (*_machine_restart)(char *co
2511 extern void (*_machine_halt)(void);
2512 extern void (*_machine_power_off)(void);
2513
2514 -extern unsigned long arch_align_stack(unsigned long sp);
2515 +#define arch_align_stack(x) ((x) & ~0xfUL)
2516
2517 static inline int tprot(unsigned long addr)
2518 {
2519 diff -urNp linux-3.0.3/arch/s390/include/asm/uaccess.h linux-3.0.3/arch/s390/include/asm/uaccess.h
2520 --- linux-3.0.3/arch/s390/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
2521 +++ linux-3.0.3/arch/s390/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
2522 @@ -235,6 +235,10 @@ static inline unsigned long __must_check
2523 copy_to_user(void __user *to, const void *from, unsigned long n)
2524 {
2525 might_fault();
2526 +
2527 + if ((long)n < 0)
2528 + return n;
2529 +
2530 if (access_ok(VERIFY_WRITE, to, n))
2531 n = __copy_to_user(to, from, n);
2532 return n;
2533 @@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void
2534 static inline unsigned long __must_check
2535 __copy_from_user(void *to, const void __user *from, unsigned long n)
2536 {
2537 + if ((long)n < 0)
2538 + return n;
2539 +
2540 if (__builtin_constant_p(n) && (n <= 256))
2541 return uaccess.copy_from_user_small(n, from, to);
2542 else
2543 @@ -294,6 +301,10 @@ copy_from_user(void *to, const void __us
2544 unsigned int sz = __compiletime_object_size(to);
2545
2546 might_fault();
2547 +
2548 + if ((long)n < 0)
2549 + return n;
2550 +
2551 if (unlikely(sz != -1 && sz < n)) {
2552 copy_from_user_overflow();
2553 return n;
2554 diff -urNp linux-3.0.3/arch/s390/kernel/module.c linux-3.0.3/arch/s390/kernel/module.c
2555 --- linux-3.0.3/arch/s390/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2556 +++ linux-3.0.3/arch/s390/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2557 @@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2558
2559 /* Increase core size by size of got & plt and set start
2560 offsets for got and plt. */
2561 - me->core_size = ALIGN(me->core_size, 4);
2562 - me->arch.got_offset = me->core_size;
2563 - me->core_size += me->arch.got_size;
2564 - me->arch.plt_offset = me->core_size;
2565 - me->core_size += me->arch.plt_size;
2566 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
2567 + me->arch.got_offset = me->core_size_rw;
2568 + me->core_size_rw += me->arch.got_size;
2569 + me->arch.plt_offset = me->core_size_rx;
2570 + me->core_size_rx += me->arch.plt_size;
2571 return 0;
2572 }
2573
2574 @@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2575 if (info->got_initialized == 0) {
2576 Elf_Addr *gotent;
2577
2578 - gotent = me->module_core + me->arch.got_offset +
2579 + gotent = me->module_core_rw + me->arch.got_offset +
2580 info->got_offset;
2581 *gotent = val;
2582 info->got_initialized = 1;
2583 @@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2584 else if (r_type == R_390_GOTENT ||
2585 r_type == R_390_GOTPLTENT)
2586 *(unsigned int *) loc =
2587 - (val + (Elf_Addr) me->module_core - loc) >> 1;
2588 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2589 else if (r_type == R_390_GOT64 ||
2590 r_type == R_390_GOTPLT64)
2591 *(unsigned long *) loc = val;
2592 @@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2593 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
2594 if (info->plt_initialized == 0) {
2595 unsigned int *ip;
2596 - ip = me->module_core + me->arch.plt_offset +
2597 + ip = me->module_core_rx + me->arch.plt_offset +
2598 info->plt_offset;
2599 #ifndef CONFIG_64BIT
2600 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2601 @@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2602 val - loc + 0xffffUL < 0x1ffffeUL) ||
2603 (r_type == R_390_PLT32DBL &&
2604 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2605 - val = (Elf_Addr) me->module_core +
2606 + val = (Elf_Addr) me->module_core_rx +
2607 me->arch.plt_offset +
2608 info->plt_offset;
2609 val += rela->r_addend - loc;
2610 @@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2611 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
2612 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
2613 val = val + rela->r_addend -
2614 - ((Elf_Addr) me->module_core + me->arch.got_offset);
2615 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2616 if (r_type == R_390_GOTOFF16)
2617 *(unsigned short *) loc = val;
2618 else if (r_type == R_390_GOTOFF32)
2619 @@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2620 break;
2621 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
2622 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
2623 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
2624 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2625 rela->r_addend - loc;
2626 if (r_type == R_390_GOTPC)
2627 *(unsigned int *) loc = val;
2628 diff -urNp linux-3.0.3/arch/s390/kernel/process.c linux-3.0.3/arch/s390/kernel/process.c
2629 --- linux-3.0.3/arch/s390/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2630 +++ linux-3.0.3/arch/s390/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2631 @@ -319,39 +319,3 @@ unsigned long get_wchan(struct task_stru
2632 }
2633 return 0;
2634 }
2635 -
2636 -unsigned long arch_align_stack(unsigned long sp)
2637 -{
2638 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2639 - sp -= get_random_int() & ~PAGE_MASK;
2640 - return sp & ~0xf;
2641 -}
2642 -
2643 -static inline unsigned long brk_rnd(void)
2644 -{
2645 - /* 8MB for 32bit, 1GB for 64bit */
2646 - if (is_32bit_task())
2647 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2648 - else
2649 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2650 -}
2651 -
2652 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2653 -{
2654 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2655 -
2656 - if (ret < mm->brk)
2657 - return mm->brk;
2658 - return ret;
2659 -}
2660 -
2661 -unsigned long randomize_et_dyn(unsigned long base)
2662 -{
2663 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2664 -
2665 - if (!(current->flags & PF_RANDOMIZE))
2666 - return base;
2667 - if (ret < base)
2668 - return base;
2669 - return ret;
2670 -}
2671 diff -urNp linux-3.0.3/arch/s390/kernel/setup.c linux-3.0.3/arch/s390/kernel/setup.c
2672 --- linux-3.0.3/arch/s390/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
2673 +++ linux-3.0.3/arch/s390/kernel/setup.c 2011-08-23 21:47:55.000000000 -0400
2674 @@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2675 }
2676 early_param("mem", early_parse_mem);
2677
2678 -unsigned int user_mode = HOME_SPACE_MODE;
2679 +unsigned int user_mode = SECONDARY_SPACE_MODE;
2680 EXPORT_SYMBOL_GPL(user_mode);
2681
2682 static int set_amode_and_uaccess(unsigned long user_amode,
2683 diff -urNp linux-3.0.3/arch/s390/mm/mmap.c linux-3.0.3/arch/s390/mm/mmap.c
2684 --- linux-3.0.3/arch/s390/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2685 +++ linux-3.0.3/arch/s390/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2686 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
2687 */
2688 if (mmap_is_legacy()) {
2689 mm->mmap_base = TASK_UNMAPPED_BASE;
2690 +
2691 +#ifdef CONFIG_PAX_RANDMMAP
2692 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2693 + mm->mmap_base += mm->delta_mmap;
2694 +#endif
2695 +
2696 mm->get_unmapped_area = arch_get_unmapped_area;
2697 mm->unmap_area = arch_unmap_area;
2698 } else {
2699 mm->mmap_base = mmap_base();
2700 +
2701 +#ifdef CONFIG_PAX_RANDMMAP
2702 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2703 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2704 +#endif
2705 +
2706 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2707 mm->unmap_area = arch_unmap_area_topdown;
2708 }
2709 @@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
2710 */
2711 if (mmap_is_legacy()) {
2712 mm->mmap_base = TASK_UNMAPPED_BASE;
2713 +
2714 +#ifdef CONFIG_PAX_RANDMMAP
2715 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2716 + mm->mmap_base += mm->delta_mmap;
2717 +#endif
2718 +
2719 mm->get_unmapped_area = s390_get_unmapped_area;
2720 mm->unmap_area = arch_unmap_area;
2721 } else {
2722 mm->mmap_base = mmap_base();
2723 +
2724 +#ifdef CONFIG_PAX_RANDMMAP
2725 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2726 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2727 +#endif
2728 +
2729 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
2730 mm->unmap_area = arch_unmap_area_topdown;
2731 }
2732 diff -urNp linux-3.0.3/arch/score/include/asm/system.h linux-3.0.3/arch/score/include/asm/system.h
2733 --- linux-3.0.3/arch/score/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2734 +++ linux-3.0.3/arch/score/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2735 @@ -17,7 +17,7 @@ do { \
2736 #define finish_arch_switch(prev) do {} while (0)
2737
2738 typedef void (*vi_handler_t)(void);
2739 -extern unsigned long arch_align_stack(unsigned long sp);
2740 +#define arch_align_stack(x) (x)
2741
2742 #define mb() barrier()
2743 #define rmb() barrier()
2744 diff -urNp linux-3.0.3/arch/score/kernel/process.c linux-3.0.3/arch/score/kernel/process.c
2745 --- linux-3.0.3/arch/score/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2746 +++ linux-3.0.3/arch/score/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2747 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
2748
2749 return task_pt_regs(task)->cp0_epc;
2750 }
2751 -
2752 -unsigned long arch_align_stack(unsigned long sp)
2753 -{
2754 - return sp;
2755 -}
2756 diff -urNp linux-3.0.3/arch/sh/mm/mmap.c linux-3.0.3/arch/sh/mm/mmap.c
2757 --- linux-3.0.3/arch/sh/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2758 +++ linux-3.0.3/arch/sh/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2759 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
2760 addr = PAGE_ALIGN(addr);
2761
2762 vma = find_vma(mm, addr);
2763 - if (TASK_SIZE - len >= addr &&
2764 - (!vma || addr + len <= vma->vm_start))
2765 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2766 return addr;
2767 }
2768
2769 @@ -106,7 +105,7 @@ full_search:
2770 }
2771 return -ENOMEM;
2772 }
2773 - if (likely(!vma || addr + len <= vma->vm_start)) {
2774 + if (likely(check_heap_stack_gap(vma, addr, len))) {
2775 /*
2776 * Remember the place where we stopped the search:
2777 */
2778 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
2779 addr = PAGE_ALIGN(addr);
2780
2781 vma = find_vma(mm, addr);
2782 - if (TASK_SIZE - len >= addr &&
2783 - (!vma || addr + len <= vma->vm_start))
2784 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2785 return addr;
2786 }
2787
2788 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
2789 /* make sure it can fit in the remaining address space */
2790 if (likely(addr > len)) {
2791 vma = find_vma(mm, addr-len);
2792 - if (!vma || addr <= vma->vm_start) {
2793 + if (check_heap_stack_gap(vma, addr - len, len)) {
2794 /* remember the address as a hint for next time */
2795 return (mm->free_area_cache = addr-len);
2796 }
2797 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
2798 if (unlikely(mm->mmap_base < len))
2799 goto bottomup;
2800
2801 - addr = mm->mmap_base-len;
2802 - if (do_colour_align)
2803 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2804 + addr = mm->mmap_base - len;
2805
2806 do {
2807 + if (do_colour_align)
2808 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2809 /*
2810 * Lookup failure means no vma is above this address,
2811 * else if new region fits below vma->vm_start,
2812 * return with success:
2813 */
2814 vma = find_vma(mm, addr);
2815 - if (likely(!vma || addr+len <= vma->vm_start)) {
2816 + if (likely(check_heap_stack_gap(vma, addr, len))) {
2817 /* remember the address as a hint for next time */
2818 return (mm->free_area_cache = addr);
2819 }
2820 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
2821 mm->cached_hole_size = vma->vm_start - addr;
2822
2823 /* try just below the current vma->vm_start */
2824 - addr = vma->vm_start-len;
2825 - if (do_colour_align)
2826 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2827 - } while (likely(len < vma->vm_start));
2828 + addr = skip_heap_stack_gap(vma, len);
2829 + } while (!IS_ERR_VALUE(addr));
2830
2831 bottomup:
2832 /*
2833 diff -urNp linux-3.0.3/arch/sparc/include/asm/atomic_64.h linux-3.0.3/arch/sparc/include/asm/atomic_64.h
2834 --- linux-3.0.3/arch/sparc/include/asm/atomic_64.h 2011-07-21 22:17:23.000000000 -0400
2835 +++ linux-3.0.3/arch/sparc/include/asm/atomic_64.h 2011-08-23 21:48:14.000000000 -0400
2836 @@ -14,18 +14,40 @@
2837 #define ATOMIC64_INIT(i) { (i) }
2838
2839 #define atomic_read(v) (*(volatile int *)&(v)->counter)
2840 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
2841 +{
2842 + return v->counter;
2843 +}
2844 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
2845 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
2846 +{
2847 + return v->counter;
2848 +}
2849
2850 #define atomic_set(v, i) (((v)->counter) = i)
2851 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
2852 +{
2853 + v->counter = i;
2854 +}
2855 #define atomic64_set(v, i) (((v)->counter) = i)
2856 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
2857 +{
2858 + v->counter = i;
2859 +}
2860
2861 extern void atomic_add(int, atomic_t *);
2862 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
2863 extern void atomic64_add(long, atomic64_t *);
2864 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
2865 extern void atomic_sub(int, atomic_t *);
2866 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
2867 extern void atomic64_sub(long, atomic64_t *);
2868 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
2869
2870 extern int atomic_add_ret(int, atomic_t *);
2871 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
2872 extern long atomic64_add_ret(long, atomic64_t *);
2873 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
2874 extern int atomic_sub_ret(int, atomic_t *);
2875 extern long atomic64_sub_ret(long, atomic64_t *);
2876
2877 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
2878 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
2879
2880 #define atomic_inc_return(v) atomic_add_ret(1, v)
2881 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
2882 +{
2883 + return atomic_add_ret_unchecked(1, v);
2884 +}
2885 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
2886 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
2887 +{
2888 + return atomic64_add_ret_unchecked(1, v);
2889 +}
2890
2891 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
2892 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
2893
2894 #define atomic_add_return(i, v) atomic_add_ret(i, v)
2895 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
2896 +{
2897 + return atomic_add_ret_unchecked(i, v);
2898 +}
2899 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
2900 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
2901 +{
2902 + return atomic64_add_ret_unchecked(i, v);
2903 +}
2904
2905 /*
2906 * atomic_inc_and_test - increment and test
2907 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
2908 * other cases.
2909 */
2910 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2911 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
2912 +{
2913 + return atomic_inc_return_unchecked(v) == 0;
2914 +}
2915 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2916
2917 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
2918 @@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi
2919 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
2920
2921 #define atomic_inc(v) atomic_add(1, v)
2922 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
2923 +{
2924 + atomic_add_unchecked(1, v);
2925 +}
2926 #define atomic64_inc(v) atomic64_add(1, v)
2927 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
2928 +{
2929 + atomic64_add_unchecked(1, v);
2930 +}
2931
2932 #define atomic_dec(v) atomic_sub(1, v)
2933 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
2934 +{
2935 + atomic_sub_unchecked(1, v);
2936 +}
2937 #define atomic64_dec(v) atomic64_sub(1, v)
2938 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
2939 +{
2940 + atomic64_sub_unchecked(1, v);
2941 +}
2942
2943 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
2944 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
2945
2946 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2947 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
2948 +{
2949 + return cmpxchg(&v->counter, old, new);
2950 +}
2951 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2952 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
2953 +{
2954 + return xchg(&v->counter, new);
2955 +}
2956
2957 static inline int atomic_add_unless(atomic_t *v, int a, int u)
2958 {
2959 - int c, old;
2960 + int c, old, new;
2961 c = atomic_read(v);
2962 for (;;) {
2963 - if (unlikely(c == (u)))
2964 + if (unlikely(c == u))
2965 break;
2966 - old = atomic_cmpxchg((v), c, c + (a));
2967 +
2968 + asm volatile("addcc %2, %0, %0\n"
2969 +
2970 +#ifdef CONFIG_PAX_REFCOUNT
2971 + "tvs %%icc, 6\n"
2972 +#endif
2973 +
2974 + : "=r" (new)
2975 + : "0" (c), "ir" (a)
2976 + : "cc");
2977 +
2978 + old = atomic_cmpxchg(v, c, new);
2979 if (likely(old == c))
2980 break;
2981 c = old;
2982 }
2983 - return c != (u);
2984 + return c != u;
2985 }
2986
2987 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
2988 @@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom
2989 #define atomic64_cmpxchg(v, o, n) \
2990 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
2991 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
2992 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
2993 +{
2994 + return xchg(&v->counter, new);
2995 +}
2996
2997 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
2998 {
2999 - long c, old;
3000 + long c, old, new;
3001 c = atomic64_read(v);
3002 for (;;) {
3003 - if (unlikely(c == (u)))
3004 + if (unlikely(c == u))
3005 break;
3006 - old = atomic64_cmpxchg((v), c, c + (a));
3007 +
3008 + asm volatile("addcc %2, %0, %0\n"
3009 +
3010 +#ifdef CONFIG_PAX_REFCOUNT
3011 + "tvs %%xcc, 6\n"
3012 +#endif
3013 +
3014 + : "=r" (new)
3015 + : "0" (c), "ir" (a)
3016 + : "cc");
3017 +
3018 + old = atomic64_cmpxchg(v, c, new);
3019 if (likely(old == c))
3020 break;
3021 c = old;
3022 }
3023 - return c != (u);
3024 + return c != u;
3025 }
3026
3027 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3028 diff -urNp linux-3.0.3/arch/sparc/include/asm/cache.h linux-3.0.3/arch/sparc/include/asm/cache.h
3029 --- linux-3.0.3/arch/sparc/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
3030 +++ linux-3.0.3/arch/sparc/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
3031 @@ -10,7 +10,7 @@
3032 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3033
3034 #define L1_CACHE_SHIFT 5
3035 -#define L1_CACHE_BYTES 32
3036 +#define L1_CACHE_BYTES 32UL
3037
3038 #ifdef CONFIG_SPARC32
3039 #define SMP_CACHE_BYTES_SHIFT 5
3040 diff -urNp linux-3.0.3/arch/sparc/include/asm/elf_32.h linux-3.0.3/arch/sparc/include/asm/elf_32.h
3041 --- linux-3.0.3/arch/sparc/include/asm/elf_32.h 2011-07-21 22:17:23.000000000 -0400
3042 +++ linux-3.0.3/arch/sparc/include/asm/elf_32.h 2011-08-23 21:47:55.000000000 -0400
3043 @@ -114,6 +114,13 @@ typedef struct {
3044
3045 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3046
3047 +#ifdef CONFIG_PAX_ASLR
3048 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3049 +
3050 +#define PAX_DELTA_MMAP_LEN 16
3051 +#define PAX_DELTA_STACK_LEN 16
3052 +#endif
3053 +
3054 /* This yields a mask that user programs can use to figure out what
3055 instruction set this cpu supports. This can NOT be done in userspace
3056 on Sparc. */
3057 diff -urNp linux-3.0.3/arch/sparc/include/asm/elf_64.h linux-3.0.3/arch/sparc/include/asm/elf_64.h
3058 --- linux-3.0.3/arch/sparc/include/asm/elf_64.h 2011-08-23 21:44:40.000000000 -0400
3059 +++ linux-3.0.3/arch/sparc/include/asm/elf_64.h 2011-08-23 21:47:55.000000000 -0400
3060 @@ -180,6 +180,13 @@ typedef struct {
3061 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3062 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3063
3064 +#ifdef CONFIG_PAX_ASLR
3065 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3066 +
3067 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3068 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3069 +#endif
3070 +
3071 extern unsigned long sparc64_elf_hwcap;
3072 #define ELF_HWCAP sparc64_elf_hwcap
3073
3074 diff -urNp linux-3.0.3/arch/sparc/include/asm/pgtable_32.h linux-3.0.3/arch/sparc/include/asm/pgtable_32.h
3075 --- linux-3.0.3/arch/sparc/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
3076 +++ linux-3.0.3/arch/sparc/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
3077 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3078 BTFIXUPDEF_INT(page_none)
3079 BTFIXUPDEF_INT(page_copy)
3080 BTFIXUPDEF_INT(page_readonly)
3081 +
3082 +#ifdef CONFIG_PAX_PAGEEXEC
3083 +BTFIXUPDEF_INT(page_shared_noexec)
3084 +BTFIXUPDEF_INT(page_copy_noexec)
3085 +BTFIXUPDEF_INT(page_readonly_noexec)
3086 +#endif
3087 +
3088 BTFIXUPDEF_INT(page_kernel)
3089
3090 #define PMD_SHIFT SUN4C_PMD_SHIFT
3091 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3092 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3093 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3094
3095 +#ifdef CONFIG_PAX_PAGEEXEC
3096 +extern pgprot_t PAGE_SHARED_NOEXEC;
3097 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3098 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3099 +#else
3100 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3101 +# define PAGE_COPY_NOEXEC PAGE_COPY
3102 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3103 +#endif
3104 +
3105 extern unsigned long page_kernel;
3106
3107 #ifdef MODULE
3108 diff -urNp linux-3.0.3/arch/sparc/include/asm/pgtsrmmu.h linux-3.0.3/arch/sparc/include/asm/pgtsrmmu.h
3109 --- linux-3.0.3/arch/sparc/include/asm/pgtsrmmu.h 2011-07-21 22:17:23.000000000 -0400
3110 +++ linux-3.0.3/arch/sparc/include/asm/pgtsrmmu.h 2011-08-23 21:47:55.000000000 -0400
3111 @@ -115,6 +115,13 @@
3112 SRMMU_EXEC | SRMMU_REF)
3113 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3114 SRMMU_EXEC | SRMMU_REF)
3115 +
3116 +#ifdef CONFIG_PAX_PAGEEXEC
3117 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3118 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3119 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3120 +#endif
3121 +
3122 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3123 SRMMU_DIRTY | SRMMU_REF)
3124
3125 diff -urNp linux-3.0.3/arch/sparc/include/asm/spinlock_64.h linux-3.0.3/arch/sparc/include/asm/spinlock_64.h
3126 --- linux-3.0.3/arch/sparc/include/asm/spinlock_64.h 2011-07-21 22:17:23.000000000 -0400
3127 +++ linux-3.0.3/arch/sparc/include/asm/spinlock_64.h 2011-08-23 21:47:55.000000000 -0400
3128 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3129
3130 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3131
3132 -static void inline arch_read_lock(arch_rwlock_t *lock)
3133 +static inline void arch_read_lock(arch_rwlock_t *lock)
3134 {
3135 unsigned long tmp1, tmp2;
3136
3137 __asm__ __volatile__ (
3138 "1: ldsw [%2], %0\n"
3139 " brlz,pn %0, 2f\n"
3140 -"4: add %0, 1, %1\n"
3141 +"4: addcc %0, 1, %1\n"
3142 +
3143 +#ifdef CONFIG_PAX_REFCOUNT
3144 +" tvs %%icc, 6\n"
3145 +#endif
3146 +
3147 " cas [%2], %0, %1\n"
3148 " cmp %0, %1\n"
3149 " bne,pn %%icc, 1b\n"
3150 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3151 " .previous"
3152 : "=&r" (tmp1), "=&r" (tmp2)
3153 : "r" (lock)
3154 - : "memory");
3155 + : "memory", "cc");
3156 }
3157
3158 -static int inline arch_read_trylock(arch_rwlock_t *lock)
3159 +static inline int arch_read_trylock(arch_rwlock_t *lock)
3160 {
3161 int tmp1, tmp2;
3162
3163 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3164 "1: ldsw [%2], %0\n"
3165 " brlz,a,pn %0, 2f\n"
3166 " mov 0, %0\n"
3167 -" add %0, 1, %1\n"
3168 +" addcc %0, 1, %1\n"
3169 +
3170 +#ifdef CONFIG_PAX_REFCOUNT
3171 +" tvs %%icc, 6\n"
3172 +#endif
3173 +
3174 " cas [%2], %0, %1\n"
3175 " cmp %0, %1\n"
3176 " bne,pn %%icc, 1b\n"
3177 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3178 return tmp1;
3179 }
3180
3181 -static void inline arch_read_unlock(arch_rwlock_t *lock)
3182 +static inline void arch_read_unlock(arch_rwlock_t *lock)
3183 {
3184 unsigned long tmp1, tmp2;
3185
3186 __asm__ __volatile__(
3187 "1: lduw [%2], %0\n"
3188 -" sub %0, 1, %1\n"
3189 +" subcc %0, 1, %1\n"
3190 +
3191 +#ifdef CONFIG_PAX_REFCOUNT
3192 +" tvs %%icc, 6\n"
3193 +#endif
3194 +
3195 " cas [%2], %0, %1\n"
3196 " cmp %0, %1\n"
3197 " bne,pn %%xcc, 1b\n"
3198 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3199 : "memory");
3200 }
3201
3202 -static void inline arch_write_lock(arch_rwlock_t *lock)
3203 +static inline void arch_write_lock(arch_rwlock_t *lock)
3204 {
3205 unsigned long mask, tmp1, tmp2;
3206
3207 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3208 : "memory");
3209 }
3210
3211 -static void inline arch_write_unlock(arch_rwlock_t *lock)
3212 +static inline void arch_write_unlock(arch_rwlock_t *lock)
3213 {
3214 __asm__ __volatile__(
3215 " stw %%g0, [%0]"
3216 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3217 : "memory");
3218 }
3219
3220 -static int inline arch_write_trylock(arch_rwlock_t *lock)
3221 +static inline int arch_write_trylock(arch_rwlock_t *lock)
3222 {
3223 unsigned long mask, tmp1, tmp2, result;
3224
3225 diff -urNp linux-3.0.3/arch/sparc/include/asm/thread_info_32.h linux-3.0.3/arch/sparc/include/asm/thread_info_32.h
3226 --- linux-3.0.3/arch/sparc/include/asm/thread_info_32.h 2011-07-21 22:17:23.000000000 -0400
3227 +++ linux-3.0.3/arch/sparc/include/asm/thread_info_32.h 2011-08-23 21:47:55.000000000 -0400
3228 @@ -50,6 +50,8 @@ struct thread_info {
3229 unsigned long w_saved;
3230
3231 struct restart_block restart_block;
3232 +
3233 + unsigned long lowest_stack;
3234 };
3235
3236 /*
3237 diff -urNp linux-3.0.3/arch/sparc/include/asm/thread_info_64.h linux-3.0.3/arch/sparc/include/asm/thread_info_64.h
3238 --- linux-3.0.3/arch/sparc/include/asm/thread_info_64.h 2011-07-21 22:17:23.000000000 -0400
3239 +++ linux-3.0.3/arch/sparc/include/asm/thread_info_64.h 2011-08-23 21:47:55.000000000 -0400
3240 @@ -63,6 +63,8 @@ struct thread_info {
3241 struct pt_regs *kern_una_regs;
3242 unsigned int kern_una_insn;
3243
3244 + unsigned long lowest_stack;
3245 +
3246 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3247 };
3248
3249 diff -urNp linux-3.0.3/arch/sparc/include/asm/uaccess_32.h linux-3.0.3/arch/sparc/include/asm/uaccess_32.h
3250 --- linux-3.0.3/arch/sparc/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
3251 +++ linux-3.0.3/arch/sparc/include/asm/uaccess_32.h 2011-08-23 21:47:55.000000000 -0400
3252 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3253
3254 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3255 {
3256 - if (n && __access_ok((unsigned long) to, n))
3257 + if ((long)n < 0)
3258 + return n;
3259 +
3260 + if (n && __access_ok((unsigned long) to, n)) {
3261 + if (!__builtin_constant_p(n))
3262 + check_object_size(from, n, true);
3263 return __copy_user(to, (__force void __user *) from, n);
3264 - else
3265 + } else
3266 return n;
3267 }
3268
3269 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3270 {
3271 + if ((long)n < 0)
3272 + return n;
3273 +
3274 + if (!__builtin_constant_p(n))
3275 + check_object_size(from, n, true);
3276 +
3277 return __copy_user(to, (__force void __user *) from, n);
3278 }
3279
3280 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3281 {
3282 - if (n && __access_ok((unsigned long) from, n))
3283 + if ((long)n < 0)
3284 + return n;
3285 +
3286 + if (n && __access_ok((unsigned long) from, n)) {
3287 + if (!__builtin_constant_p(n))
3288 + check_object_size(to, n, false);
3289 return __copy_user((__force void __user *) to, from, n);
3290 - else
3291 + } else
3292 return n;
3293 }
3294
3295 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3296 {
3297 + if ((long)n < 0)
3298 + return n;
3299 +
3300 return __copy_user((__force void __user *) to, from, n);
3301 }
3302
3303 diff -urNp linux-3.0.3/arch/sparc/include/asm/uaccess_64.h linux-3.0.3/arch/sparc/include/asm/uaccess_64.h
3304 --- linux-3.0.3/arch/sparc/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
3305 +++ linux-3.0.3/arch/sparc/include/asm/uaccess_64.h 2011-08-23 21:47:55.000000000 -0400
3306 @@ -10,6 +10,7 @@
3307 #include <linux/compiler.h>
3308 #include <linux/string.h>
3309 #include <linux/thread_info.h>
3310 +#include <linux/kernel.h>
3311 #include <asm/asi.h>
3312 #include <asm/system.h>
3313 #include <asm/spitfire.h>
3314 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3315 static inline unsigned long __must_check
3316 copy_from_user(void *to, const void __user *from, unsigned long size)
3317 {
3318 - unsigned long ret = ___copy_from_user(to, from, size);
3319 + unsigned long ret;
3320
3321 + if ((long)size < 0 || size > INT_MAX)
3322 + return size;
3323 +
3324 + if (!__builtin_constant_p(size))
3325 + check_object_size(to, size, false);
3326 +
3327 + ret = ___copy_from_user(to, from, size);
3328 if (unlikely(ret))
3329 ret = copy_from_user_fixup(to, from, size);
3330
3331 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3332 static inline unsigned long __must_check
3333 copy_to_user(void __user *to, const void *from, unsigned long size)
3334 {
3335 - unsigned long ret = ___copy_to_user(to, from, size);
3336 + unsigned long ret;
3337 +
3338 + if ((long)size < 0 || size > INT_MAX)
3339 + return size;
3340 +
3341 + if (!__builtin_constant_p(size))
3342 + check_object_size(from, size, true);
3343
3344 + ret = ___copy_to_user(to, from, size);
3345 if (unlikely(ret))
3346 ret = copy_to_user_fixup(to, from, size);
3347 return ret;
3348 diff -urNp linux-3.0.3/arch/sparc/include/asm/uaccess.h linux-3.0.3/arch/sparc/include/asm/uaccess.h
3349 --- linux-3.0.3/arch/sparc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
3350 +++ linux-3.0.3/arch/sparc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
3351 @@ -1,5 +1,13 @@
3352 #ifndef ___ASM_SPARC_UACCESS_H
3353 #define ___ASM_SPARC_UACCESS_H
3354 +
3355 +#ifdef __KERNEL__
3356 +#ifndef __ASSEMBLY__
3357 +#include <linux/types.h>
3358 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3359 +#endif
3360 +#endif
3361 +
3362 #if defined(__sparc__) && defined(__arch64__)
3363 #include <asm/uaccess_64.h>
3364 #else
3365 diff -urNp linux-3.0.3/arch/sparc/kernel/Makefile linux-3.0.3/arch/sparc/kernel/Makefile
3366 --- linux-3.0.3/arch/sparc/kernel/Makefile 2011-07-21 22:17:23.000000000 -0400
3367 +++ linux-3.0.3/arch/sparc/kernel/Makefile 2011-08-23 21:47:55.000000000 -0400
3368 @@ -3,7 +3,7 @@
3369 #
3370
3371 asflags-y := -ansi
3372 -ccflags-y := -Werror
3373 +#ccflags-y := -Werror
3374
3375 extra-y := head_$(BITS).o
3376 extra-y += init_task.o
3377 diff -urNp linux-3.0.3/arch/sparc/kernel/process_32.c linux-3.0.3/arch/sparc/kernel/process_32.c
3378 --- linux-3.0.3/arch/sparc/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
3379 +++ linux-3.0.3/arch/sparc/kernel/process_32.c 2011-08-23 21:48:14.000000000 -0400
3380 @@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3381 rw->ins[4], rw->ins[5],
3382 rw->ins[6],
3383 rw->ins[7]);
3384 - printk("%pS\n", (void *) rw->ins[7]);
3385 + printk("%pA\n", (void *) rw->ins[7]);
3386 rw = (struct reg_window32 *) rw->ins[6];
3387 }
3388 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3389 @@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3390
3391 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3392 r->psr, r->pc, r->npc, r->y, print_tainted());
3393 - printk("PC: <%pS>\n", (void *) r->pc);
3394 + printk("PC: <%pA>\n", (void *) r->pc);
3395 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3396 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3397 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3398 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3399 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3400 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3401 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3402 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3403
3404 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3405 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3406 @@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk,
3407 rw = (struct reg_window32 *) fp;
3408 pc = rw->ins[7];
3409 printk("[%08lx : ", pc);
3410 - printk("%pS ] ", (void *) pc);
3411 + printk("%pA ] ", (void *) pc);
3412 fp = rw->ins[6];
3413 } while (++count < 16);
3414 printk("\n");
3415 diff -urNp linux-3.0.3/arch/sparc/kernel/process_64.c linux-3.0.3/arch/sparc/kernel/process_64.c
3416 --- linux-3.0.3/arch/sparc/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
3417 +++ linux-3.0.3/arch/sparc/kernel/process_64.c 2011-08-23 21:48:14.000000000 -0400
3418 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3419 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3420 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3421 if (regs->tstate & TSTATE_PRIV)
3422 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3423 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3424 }
3425
3426 void show_regs(struct pt_regs *regs)
3427 {
3428 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3429 regs->tpc, regs->tnpc, regs->y, print_tainted());
3430 - printk("TPC: <%pS>\n", (void *) regs->tpc);
3431 + printk("TPC: <%pA>\n", (void *) regs->tpc);
3432 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3433 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3434 regs->u_regs[3]);
3435 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3436 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3437 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3438 regs->u_regs[15]);
3439 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3440 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3441 show_regwindow(regs);
3442 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3443 }
3444 @@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3445 ((tp && tp->task) ? tp->task->pid : -1));
3446
3447 if (gp->tstate & TSTATE_PRIV) {
3448 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3449 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3450 (void *) gp->tpc,
3451 (void *) gp->o7,
3452 (void *) gp->i7,
3453 diff -urNp linux-3.0.3/arch/sparc/kernel/sys_sparc_32.c linux-3.0.3/arch/sparc/kernel/sys_sparc_32.c
3454 --- linux-3.0.3/arch/sparc/kernel/sys_sparc_32.c 2011-07-21 22:17:23.000000000 -0400
3455 +++ linux-3.0.3/arch/sparc/kernel/sys_sparc_32.c 2011-08-23 21:47:55.000000000 -0400
3456 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3457 if (ARCH_SUN4C && len > 0x20000000)
3458 return -ENOMEM;
3459 if (!addr)
3460 - addr = TASK_UNMAPPED_BASE;
3461 + addr = current->mm->mmap_base;
3462
3463 if (flags & MAP_SHARED)
3464 addr = COLOUR_ALIGN(addr);
3465 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3466 }
3467 if (TASK_SIZE - PAGE_SIZE - len < addr)
3468 return -ENOMEM;
3469 - if (!vmm || addr + len <= vmm->vm_start)
3470 + if (check_heap_stack_gap(vmm, addr, len))
3471 return addr;
3472 addr = vmm->vm_end;
3473 if (flags & MAP_SHARED)
3474 diff -urNp linux-3.0.3/arch/sparc/kernel/sys_sparc_64.c linux-3.0.3/arch/sparc/kernel/sys_sparc_64.c
3475 --- linux-3.0.3/arch/sparc/kernel/sys_sparc_64.c 2011-07-21 22:17:23.000000000 -0400
3476 +++ linux-3.0.3/arch/sparc/kernel/sys_sparc_64.c 2011-08-23 21:47:55.000000000 -0400
3477 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3478 /* We do not accept a shared mapping if it would violate
3479 * cache aliasing constraints.
3480 */
3481 - if ((flags & MAP_SHARED) &&
3482 + if ((filp || (flags & MAP_SHARED)) &&
3483 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3484 return -EINVAL;
3485 return addr;
3486 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3487 if (filp || (flags & MAP_SHARED))
3488 do_color_align = 1;
3489
3490 +#ifdef CONFIG_PAX_RANDMMAP
3491 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3492 +#endif
3493 +
3494 if (addr) {
3495 if (do_color_align)
3496 addr = COLOUR_ALIGN(addr, pgoff);
3497 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3498 addr = PAGE_ALIGN(addr);
3499
3500 vma = find_vma(mm, addr);
3501 - if (task_size - len >= addr &&
3502 - (!vma || addr + len <= vma->vm_start))
3503 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3504 return addr;
3505 }
3506
3507 if (len > mm->cached_hole_size) {
3508 - start_addr = addr = mm->free_area_cache;
3509 + start_addr = addr = mm->free_area_cache;
3510 } else {
3511 - start_addr = addr = TASK_UNMAPPED_BASE;
3512 + start_addr = addr = mm->mmap_base;
3513 mm->cached_hole_size = 0;
3514 }
3515
3516 @@ -174,14 +177,14 @@ full_search:
3517 vma = find_vma(mm, VA_EXCLUDE_END);
3518 }
3519 if (unlikely(task_size < addr)) {
3520 - if (start_addr != TASK_UNMAPPED_BASE) {
3521 - start_addr = addr = TASK_UNMAPPED_BASE;
3522 + if (start_addr != mm->mmap_base) {
3523 + start_addr = addr = mm->mmap_base;
3524 mm->cached_hole_size = 0;
3525 goto full_search;
3526 }
3527 return -ENOMEM;
3528 }
3529 - if (likely(!vma || addr + len <= vma->vm_start)) {
3530 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3531 /*
3532 * Remember the place where we stopped the search:
3533 */
3534 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3535 /* We do not accept a shared mapping if it would violate
3536 * cache aliasing constraints.
3537 */
3538 - if ((flags & MAP_SHARED) &&
3539 + if ((filp || (flags & MAP_SHARED)) &&
3540 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3541 return -EINVAL;
3542 return addr;
3543 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3544 addr = PAGE_ALIGN(addr);
3545
3546 vma = find_vma(mm, addr);
3547 - if (task_size - len >= addr &&
3548 - (!vma || addr + len <= vma->vm_start))
3549 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3550 return addr;
3551 }
3552
3553 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3554 /* make sure it can fit in the remaining address space */
3555 if (likely(addr > len)) {
3556 vma = find_vma(mm, addr-len);
3557 - if (!vma || addr <= vma->vm_start) {
3558 + if (check_heap_stack_gap(vma, addr - len, len)) {
3559 /* remember the address as a hint for next time */
3560 return (mm->free_area_cache = addr-len);
3561 }
3562 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3563 if (unlikely(mm->mmap_base < len))
3564 goto bottomup;
3565
3566 - addr = mm->mmap_base-len;
3567 - if (do_color_align)
3568 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3569 + addr = mm->mmap_base - len;
3570
3571 do {
3572 + if (do_color_align)
3573 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3574 /*
3575 * Lookup failure means no vma is above this address,
3576 * else if new region fits below vma->vm_start,
3577 * return with success:
3578 */
3579 vma = find_vma(mm, addr);
3580 - if (likely(!vma || addr+len <= vma->vm_start)) {
3581 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3582 /* remember the address as a hint for next time */
3583 return (mm->free_area_cache = addr);
3584 }
3585 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3586 mm->cached_hole_size = vma->vm_start - addr;
3587
3588 /* try just below the current vma->vm_start */
3589 - addr = vma->vm_start-len;
3590 - if (do_color_align)
3591 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3592 - } while (likely(len < vma->vm_start));
3593 + addr = skip_heap_stack_gap(vma, len);
3594 + } while (!IS_ERR_VALUE(addr));
3595
3596 bottomup:
3597 /*
3598 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3599 gap == RLIM_INFINITY ||
3600 sysctl_legacy_va_layout) {
3601 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3602 +
3603 +#ifdef CONFIG_PAX_RANDMMAP
3604 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3605 + mm->mmap_base += mm->delta_mmap;
3606 +#endif
3607 +
3608 mm->get_unmapped_area = arch_get_unmapped_area;
3609 mm->unmap_area = arch_unmap_area;
3610 } else {
3611 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3612 gap = (task_size / 6 * 5);
3613
3614 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3615 +
3616 +#ifdef CONFIG_PAX_RANDMMAP
3617 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3618 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3619 +#endif
3620 +
3621 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3622 mm->unmap_area = arch_unmap_area_topdown;
3623 }
3624 diff -urNp linux-3.0.3/arch/sparc/kernel/traps_32.c linux-3.0.3/arch/sparc/kernel/traps_32.c
3625 --- linux-3.0.3/arch/sparc/kernel/traps_32.c 2011-07-21 22:17:23.000000000 -0400
3626 +++ linux-3.0.3/arch/sparc/kernel/traps_32.c 2011-08-23 21:48:14.000000000 -0400
3627 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3628 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3629 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3630
3631 +extern void gr_handle_kernel_exploit(void);
3632 +
3633 void die_if_kernel(char *str, struct pt_regs *regs)
3634 {
3635 static int die_counter;
3636 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3637 count++ < 30 &&
3638 (((unsigned long) rw) >= PAGE_OFFSET) &&
3639 !(((unsigned long) rw) & 0x7)) {
3640 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
3641 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
3642 (void *) rw->ins[7]);
3643 rw = (struct reg_window32 *)rw->ins[6];
3644 }
3645 }
3646 printk("Instruction DUMP:");
3647 instruction_dump ((unsigned long *) regs->pc);
3648 - if(regs->psr & PSR_PS)
3649 + if(regs->psr & PSR_PS) {
3650 + gr_handle_kernel_exploit();
3651 do_exit(SIGKILL);
3652 + }
3653 do_exit(SIGSEGV);
3654 }
3655
3656 diff -urNp linux-3.0.3/arch/sparc/kernel/traps_64.c linux-3.0.3/arch/sparc/kernel/traps_64.c
3657 --- linux-3.0.3/arch/sparc/kernel/traps_64.c 2011-07-21 22:17:23.000000000 -0400
3658 +++ linux-3.0.3/arch/sparc/kernel/traps_64.c 2011-08-23 21:48:14.000000000 -0400
3659 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3660 i + 1,
3661 p->trapstack[i].tstate, p->trapstack[i].tpc,
3662 p->trapstack[i].tnpc, p->trapstack[i].tt);
3663 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3664 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3665 }
3666 }
3667
3668 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
3669
3670 lvl -= 0x100;
3671 if (regs->tstate & TSTATE_PRIV) {
3672 +
3673 +#ifdef CONFIG_PAX_REFCOUNT
3674 + if (lvl == 6)
3675 + pax_report_refcount_overflow(regs);
3676 +#endif
3677 +
3678 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
3679 die_if_kernel(buffer, regs);
3680 }
3681 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
3682 void bad_trap_tl1(struct pt_regs *regs, long lvl)
3683 {
3684 char buffer[32];
3685 -
3686 +
3687 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
3688 0, lvl, SIGTRAP) == NOTIFY_STOP)
3689 return;
3690
3691 +#ifdef CONFIG_PAX_REFCOUNT
3692 + if (lvl == 6)
3693 + pax_report_refcount_overflow(regs);
3694 +#endif
3695 +
3696 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
3697
3698 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
3699 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
3700 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
3701 printk("%s" "ERROR(%d): ",
3702 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
3703 - printk("TPC<%pS>\n", (void *) regs->tpc);
3704 + printk("TPC<%pA>\n", (void *) regs->tpc);
3705 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
3706 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
3707 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
3708 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
3709 smp_processor_id(),
3710 (type & 0x1) ? 'I' : 'D',
3711 regs->tpc);
3712 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
3713 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
3714 panic("Irrecoverable Cheetah+ parity error.");
3715 }
3716
3717 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
3718 smp_processor_id(),
3719 (type & 0x1) ? 'I' : 'D',
3720 regs->tpc);
3721 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
3722 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
3723 }
3724
3725 struct sun4v_error_entry {
3726 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
3727
3728 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
3729 regs->tpc, tl);
3730 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
3731 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
3732 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3733 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
3734 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
3735 (void *) regs->u_regs[UREG_I7]);
3736 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
3737 "pte[%lx] error[%lx]\n",
3738 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
3739
3740 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
3741 regs->tpc, tl);
3742 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
3743 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
3744 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3745 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
3746 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
3747 (void *) regs->u_regs[UREG_I7]);
3748 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
3749 "pte[%lx] error[%lx]\n",
3750 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
3751 fp = (unsigned long)sf->fp + STACK_BIAS;
3752 }
3753
3754 - printk(" [%016lx] %pS\n", pc, (void *) pc);
3755 + printk(" [%016lx] %pA\n", pc, (void *) pc);
3756 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3757 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
3758 int index = tsk->curr_ret_stack;
3759 if (tsk->ret_stack && index >= graph) {
3760 pc = tsk->ret_stack[index - graph].ret;
3761 - printk(" [%016lx] %pS\n", pc, (void *) pc);
3762 + printk(" [%016lx] %pA\n", pc, (void *) pc);
3763 graph++;
3764 }
3765 }
3766 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
3767 return (struct reg_window *) (fp + STACK_BIAS);
3768 }
3769
3770 +extern void gr_handle_kernel_exploit(void);
3771 +
3772 void die_if_kernel(char *str, struct pt_regs *regs)
3773 {
3774 static int die_counter;
3775 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
3776 while (rw &&
3777 count++ < 30 &&
3778 kstack_valid(tp, (unsigned long) rw)) {
3779 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
3780 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
3781 (void *) rw->ins[7]);
3782
3783 rw = kernel_stack_up(rw);
3784 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
3785 }
3786 user_instruction_dump ((unsigned int __user *) regs->tpc);
3787 }
3788 - if (regs->tstate & TSTATE_PRIV)
3789 + if (regs->tstate & TSTATE_PRIV) {
3790 + gr_handle_kernel_exploit();
3791 do_exit(SIGKILL);
3792 + }
3793 do_exit(SIGSEGV);
3794 }
3795 EXPORT_SYMBOL(die_if_kernel);
3796 diff -urNp linux-3.0.3/arch/sparc/kernel/unaligned_64.c linux-3.0.3/arch/sparc/kernel/unaligned_64.c
3797 --- linux-3.0.3/arch/sparc/kernel/unaligned_64.c 2011-08-23 21:44:40.000000000 -0400
3798 +++ linux-3.0.3/arch/sparc/kernel/unaligned_64.c 2011-08-23 21:48:14.000000000 -0400
3799 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs
3800 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
3801
3802 if (__ratelimit(&ratelimit)) {
3803 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
3804 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
3805 regs->tpc, (void *) regs->tpc);
3806 }
3807 }
3808 diff -urNp linux-3.0.3/arch/sparc/lib/atomic_64.S linux-3.0.3/arch/sparc/lib/atomic_64.S
3809 --- linux-3.0.3/arch/sparc/lib/atomic_64.S 2011-07-21 22:17:23.000000000 -0400
3810 +++ linux-3.0.3/arch/sparc/lib/atomic_64.S 2011-08-23 21:47:55.000000000 -0400
3811 @@ -18,7 +18,12 @@
3812 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
3813 BACKOFF_SETUP(%o2)
3814 1: lduw [%o1], %g1
3815 - add %g1, %o0, %g7
3816 + addcc %g1, %o0, %g7
3817 +
3818 +#ifdef CONFIG_PAX_REFCOUNT
3819 + tvs %icc, 6
3820 +#endif
3821 +
3822 cas [%o1], %g1, %g7
3823 cmp %g1, %g7
3824 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3825 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
3826 2: BACKOFF_SPIN(%o2, %o3, 1b)
3827 .size atomic_add, .-atomic_add
3828
3829 + .globl atomic_add_unchecked
3830 + .type atomic_add_unchecked,#function
3831 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3832 + BACKOFF_SETUP(%o2)
3833 +1: lduw [%o1], %g1
3834 + add %g1, %o0, %g7
3835 + cas [%o1], %g1, %g7
3836 + cmp %g1, %g7
3837 + bne,pn %icc, 2f
3838 + nop
3839 + retl
3840 + nop
3841 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3842 + .size atomic_add_unchecked, .-atomic_add_unchecked
3843 +
3844 .globl atomic_sub
3845 .type atomic_sub,#function
3846 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3847 BACKOFF_SETUP(%o2)
3848 1: lduw [%o1], %g1
3849 - sub %g1, %o0, %g7
3850 + subcc %g1, %o0, %g7
3851 +
3852 +#ifdef CONFIG_PAX_REFCOUNT
3853 + tvs %icc, 6
3854 +#endif
3855 +
3856 cas [%o1], %g1, %g7
3857 cmp %g1, %g7
3858 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3859 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
3860 2: BACKOFF_SPIN(%o2, %o3, 1b)
3861 .size atomic_sub, .-atomic_sub
3862
3863 + .globl atomic_sub_unchecked
3864 + .type atomic_sub_unchecked,#function
3865 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3866 + BACKOFF_SETUP(%o2)
3867 +1: lduw [%o1], %g1
3868 + sub %g1, %o0, %g7
3869 + cas [%o1], %g1, %g7
3870 + cmp %g1, %g7
3871 + bne,pn %icc, 2f
3872 + nop
3873 + retl
3874 + nop
3875 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3876 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
3877 +
3878 .globl atomic_add_ret
3879 .type atomic_add_ret,#function
3880 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3881 BACKOFF_SETUP(%o2)
3882 1: lduw [%o1], %g1
3883 - add %g1, %o0, %g7
3884 + addcc %g1, %o0, %g7
3885 +
3886 +#ifdef CONFIG_PAX_REFCOUNT
3887 + tvs %icc, 6
3888 +#endif
3889 +
3890 cas [%o1], %g1, %g7
3891 cmp %g1, %g7
3892 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3893 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
3894 2: BACKOFF_SPIN(%o2, %o3, 1b)
3895 .size atomic_add_ret, .-atomic_add_ret
3896
3897 + .globl atomic_add_ret_unchecked
3898 + .type atomic_add_ret_unchecked,#function
3899 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3900 + BACKOFF_SETUP(%o2)
3901 +1: lduw [%o1], %g1
3902 + addcc %g1, %o0, %g7
3903 + cas [%o1], %g1, %g7
3904 + cmp %g1, %g7
3905 + bne,pn %icc, 2f
3906 + add %g7, %o0, %g7
3907 + sra %g7, 0, %o0
3908 + retl
3909 + nop
3910 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3911 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
3912 +
3913 .globl atomic_sub_ret
3914 .type atomic_sub_ret,#function
3915 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
3916 BACKOFF_SETUP(%o2)
3917 1: lduw [%o1], %g1
3918 - sub %g1, %o0, %g7
3919 + subcc %g1, %o0, %g7
3920 +
3921 +#ifdef CONFIG_PAX_REFCOUNT
3922 + tvs %icc, 6
3923 +#endif
3924 +
3925 cas [%o1], %g1, %g7
3926 cmp %g1, %g7
3927 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3928 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
3929 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
3930 BACKOFF_SETUP(%o2)
3931 1: ldx [%o1], %g1
3932 - add %g1, %o0, %g7
3933 + addcc %g1, %o0, %g7
3934 +
3935 +#ifdef CONFIG_PAX_REFCOUNT
3936 + tvs %xcc, 6
3937 +#endif
3938 +
3939 casx [%o1], %g1, %g7
3940 cmp %g1, %g7
3941 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3942 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
3943 2: BACKOFF_SPIN(%o2, %o3, 1b)
3944 .size atomic64_add, .-atomic64_add
3945
3946 + .globl atomic64_add_unchecked
3947 + .type atomic64_add_unchecked,#function
3948 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3949 + BACKOFF_SETUP(%o2)
3950 +1: ldx [%o1], %g1
3951 + addcc %g1, %o0, %g7
3952 + casx [%o1], %g1, %g7
3953 + cmp %g1, %g7
3954 + bne,pn %xcc, 2f
3955 + nop
3956 + retl
3957 + nop
3958 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3959 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
3960 +
3961 .globl atomic64_sub
3962 .type atomic64_sub,#function
3963 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3964 BACKOFF_SETUP(%o2)
3965 1: ldx [%o1], %g1
3966 - sub %g1, %o0, %g7
3967 + subcc %g1, %o0, %g7
3968 +
3969 +#ifdef CONFIG_PAX_REFCOUNT
3970 + tvs %xcc, 6
3971 +#endif
3972 +
3973 casx [%o1], %g1, %g7
3974 cmp %g1, %g7
3975 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3976 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
3977 2: BACKOFF_SPIN(%o2, %o3, 1b)
3978 .size atomic64_sub, .-atomic64_sub
3979
3980 + .globl atomic64_sub_unchecked
3981 + .type atomic64_sub_unchecked,#function
3982 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3983 + BACKOFF_SETUP(%o2)
3984 +1: ldx [%o1], %g1
3985 + subcc %g1, %o0, %g7
3986 + casx [%o1], %g1, %g7
3987 + cmp %g1, %g7
3988 + bne,pn %xcc, 2f
3989 + nop
3990 + retl
3991 + nop
3992 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3993 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
3994 +
3995 .globl atomic64_add_ret
3996 .type atomic64_add_ret,#function
3997 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3998 BACKOFF_SETUP(%o2)
3999 1: ldx [%o1], %g1
4000 - add %g1, %o0, %g7
4001 + addcc %g1, %o0, %g7
4002 +
4003 +#ifdef CONFIG_PAX_REFCOUNT
4004 + tvs %xcc, 6
4005 +#endif
4006 +
4007 casx [%o1], %g1, %g7
4008 cmp %g1, %g7
4009 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4010 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4011 2: BACKOFF_SPIN(%o2, %o3, 1b)
4012 .size atomic64_add_ret, .-atomic64_add_ret
4013
4014 + .globl atomic64_add_ret_unchecked
4015 + .type atomic64_add_ret_unchecked,#function
4016 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4017 + BACKOFF_SETUP(%o2)
4018 +1: ldx [%o1], %g1
4019 + addcc %g1, %o0, %g7
4020 + casx [%o1], %g1, %g7
4021 + cmp %g1, %g7
4022 + bne,pn %xcc, 2f
4023 + add %g7, %o0, %g7
4024 + mov %g7, %o0
4025 + retl
4026 + nop
4027 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4028 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4029 +
4030 .globl atomic64_sub_ret
4031 .type atomic64_sub_ret,#function
4032 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4033 BACKOFF_SETUP(%o2)
4034 1: ldx [%o1], %g1
4035 - sub %g1, %o0, %g7
4036 + subcc %g1, %o0, %g7
4037 +
4038 +#ifdef CONFIG_PAX_REFCOUNT
4039 + tvs %xcc, 6
4040 +#endif
4041 +
4042 casx [%o1], %g1, %g7
4043 cmp %g1, %g7
4044 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4045 diff -urNp linux-3.0.3/arch/sparc/lib/ksyms.c linux-3.0.3/arch/sparc/lib/ksyms.c
4046 --- linux-3.0.3/arch/sparc/lib/ksyms.c 2011-07-21 22:17:23.000000000 -0400
4047 +++ linux-3.0.3/arch/sparc/lib/ksyms.c 2011-08-23 21:48:14.000000000 -0400
4048 @@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4049
4050 /* Atomic counter implementation. */
4051 EXPORT_SYMBOL(atomic_add);
4052 +EXPORT_SYMBOL(atomic_add_unchecked);
4053 EXPORT_SYMBOL(atomic_add_ret);
4054 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
4055 EXPORT_SYMBOL(atomic_sub);
4056 +EXPORT_SYMBOL(atomic_sub_unchecked);
4057 EXPORT_SYMBOL(atomic_sub_ret);
4058 EXPORT_SYMBOL(atomic64_add);
4059 +EXPORT_SYMBOL(atomic64_add_unchecked);
4060 EXPORT_SYMBOL(atomic64_add_ret);
4061 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4062 EXPORT_SYMBOL(atomic64_sub);
4063 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4064 EXPORT_SYMBOL(atomic64_sub_ret);
4065
4066 /* Atomic bit operations. */
4067 diff -urNp linux-3.0.3/arch/sparc/lib/Makefile linux-3.0.3/arch/sparc/lib/Makefile
4068 --- linux-3.0.3/arch/sparc/lib/Makefile 2011-08-23 21:44:40.000000000 -0400
4069 +++ linux-3.0.3/arch/sparc/lib/Makefile 2011-08-23 21:47:55.000000000 -0400
4070 @@ -2,7 +2,7 @@
4071 #
4072
4073 asflags-y := -ansi -DST_DIV0=0x02
4074 -ccflags-y := -Werror
4075 +#ccflags-y := -Werror
4076
4077 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4078 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4079 diff -urNp linux-3.0.3/arch/sparc/Makefile linux-3.0.3/arch/sparc/Makefile
4080 --- linux-3.0.3/arch/sparc/Makefile 2011-07-21 22:17:23.000000000 -0400
4081 +++ linux-3.0.3/arch/sparc/Makefile 2011-08-23 21:48:14.000000000 -0400
4082 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4083 # Export what is needed by arch/sparc/boot/Makefile
4084 export VMLINUX_INIT VMLINUX_MAIN
4085 VMLINUX_INIT := $(head-y) $(init-y)
4086 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4087 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4088 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4089 VMLINUX_MAIN += $(drivers-y) $(net-y)
4090
4091 diff -urNp linux-3.0.3/arch/sparc/mm/fault_32.c linux-3.0.3/arch/sparc/mm/fault_32.c
4092 --- linux-3.0.3/arch/sparc/mm/fault_32.c 2011-07-21 22:17:23.000000000 -0400
4093 +++ linux-3.0.3/arch/sparc/mm/fault_32.c 2011-08-23 21:47:55.000000000 -0400
4094 @@ -22,6 +22,9 @@
4095 #include <linux/interrupt.h>
4096 #include <linux/module.h>
4097 #include <linux/kdebug.h>
4098 +#include <linux/slab.h>
4099 +#include <linux/pagemap.h>
4100 +#include <linux/compiler.h>
4101
4102 #include <asm/system.h>
4103 #include <asm/page.h>
4104 @@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4105 return safe_compute_effective_address(regs, insn);
4106 }
4107
4108 +#ifdef CONFIG_PAX_PAGEEXEC
4109 +#ifdef CONFIG_PAX_DLRESOLVE
4110 +static void pax_emuplt_close(struct vm_area_struct *vma)
4111 +{
4112 + vma->vm_mm->call_dl_resolve = 0UL;
4113 +}
4114 +
4115 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4116 +{
4117 + unsigned int *kaddr;
4118 +
4119 + vmf->page = alloc_page(GFP_HIGHUSER);
4120 + if (!vmf->page)
4121 + return VM_FAULT_OOM;
4122 +
4123 + kaddr = kmap(vmf->page);
4124 + memset(kaddr, 0, PAGE_SIZE);
4125 + kaddr[0] = 0x9DE3BFA8U; /* save */
4126 + flush_dcache_page(vmf->page);
4127 + kunmap(vmf->page);
4128 + return VM_FAULT_MAJOR;
4129 +}
4130 +
4131 +static const struct vm_operations_struct pax_vm_ops = {
4132 + .close = pax_emuplt_close,
4133 + .fault = pax_emuplt_fault
4134 +};
4135 +
4136 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4137 +{
4138 + int ret;
4139 +
4140 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4141 + vma->vm_mm = current->mm;
4142 + vma->vm_start = addr;
4143 + vma->vm_end = addr + PAGE_SIZE;
4144 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4145 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4146 + vma->vm_ops = &pax_vm_ops;
4147 +
4148 + ret = insert_vm_struct(current->mm, vma);
4149 + if (ret)
4150 + return ret;
4151 +
4152 + ++current->mm->total_vm;
4153 + return 0;
4154 +}
4155 +#endif
4156 +
4157 +/*
4158 + * PaX: decide what to do with offenders (regs->pc = fault address)
4159 + *
4160 + * returns 1 when task should be killed
4161 + * 2 when patched PLT trampoline was detected
4162 + * 3 when unpatched PLT trampoline was detected
4163 + */
4164 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4165 +{
4166 +
4167 +#ifdef CONFIG_PAX_EMUPLT
4168 + int err;
4169 +
4170 + do { /* PaX: patched PLT emulation #1 */
4171 + unsigned int sethi1, sethi2, jmpl;
4172 +
4173 + err = get_user(sethi1, (unsigned int *)regs->pc);
4174 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4175 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4176 +
4177 + if (err)
4178 + break;
4179 +
4180 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4181 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4182 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4183 + {
4184 + unsigned int addr;
4185 +
4186 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4187 + addr = regs->u_regs[UREG_G1];
4188 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4189 + regs->pc = addr;
4190 + regs->npc = addr+4;
4191 + return 2;
4192 + }
4193 + } while (0);
4194 +
4195 + { /* PaX: patched PLT emulation #2 */
4196 + unsigned int ba;
4197 +
4198 + err = get_user(ba, (unsigned int *)regs->pc);
4199 +
4200 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4201 + unsigned int addr;
4202 +
4203 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4204 + regs->pc = addr;
4205 + regs->npc = addr+4;
4206 + return 2;
4207 + }
4208 + }
4209 +
4210 + do { /* PaX: patched PLT emulation #3 */
4211 + unsigned int sethi, jmpl, nop;
4212 +
4213 + err = get_user(sethi, (unsigned int *)regs->pc);
4214 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4215 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4216 +
4217 + if (err)
4218 + break;
4219 +
4220 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4221 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4222 + nop == 0x01000000U)
4223 + {
4224 + unsigned int addr;
4225 +
4226 + addr = (sethi & 0x003FFFFFU) << 10;
4227 + regs->u_regs[UREG_G1] = addr;
4228 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4229 + regs->pc = addr;
4230 + regs->npc = addr+4;
4231 + return 2;
4232 + }
4233 + } while (0);
4234 +
4235 + do { /* PaX: unpatched PLT emulation step 1 */
4236 + unsigned int sethi, ba, nop;
4237 +
4238 + err = get_user(sethi, (unsigned int *)regs->pc);
4239 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
4240 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4241 +
4242 + if (err)
4243 + break;
4244 +
4245 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4246 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4247 + nop == 0x01000000U)
4248 + {
4249 + unsigned int addr, save, call;
4250 +
4251 + if ((ba & 0xFFC00000U) == 0x30800000U)
4252 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4253 + else
4254 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4255 +
4256 + err = get_user(save, (unsigned int *)addr);
4257 + err |= get_user(call, (unsigned int *)(addr+4));
4258 + err |= get_user(nop, (unsigned int *)(addr+8));
4259 + if (err)
4260 + break;
4261 +
4262 +#ifdef CONFIG_PAX_DLRESOLVE
4263 + if (save == 0x9DE3BFA8U &&
4264 + (call & 0xC0000000U) == 0x40000000U &&
4265 + nop == 0x01000000U)
4266 + {
4267 + struct vm_area_struct *vma;
4268 + unsigned long call_dl_resolve;
4269 +
4270 + down_read(&current->mm->mmap_sem);
4271 + call_dl_resolve = current->mm->call_dl_resolve;
4272 + up_read(&current->mm->mmap_sem);
4273 + if (likely(call_dl_resolve))
4274 + goto emulate;
4275 +
4276 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4277 +
4278 + down_write(&current->mm->mmap_sem);
4279 + if (current->mm->call_dl_resolve) {
4280 + call_dl_resolve = current->mm->call_dl_resolve;
4281 + up_write(&current->mm->mmap_sem);
4282 + if (vma)
4283 + kmem_cache_free(vm_area_cachep, vma);
4284 + goto emulate;
4285 + }
4286 +
4287 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4288 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4289 + up_write(&current->mm->mmap_sem);
4290 + if (vma)
4291 + kmem_cache_free(vm_area_cachep, vma);
4292 + return 1;
4293 + }
4294 +
4295 + if (pax_insert_vma(vma, call_dl_resolve)) {
4296 + up_write(&current->mm->mmap_sem);
4297 + kmem_cache_free(vm_area_cachep, vma);
4298 + return 1;
4299 + }
4300 +
4301 + current->mm->call_dl_resolve = call_dl_resolve;
4302 + up_write(&current->mm->mmap_sem);
4303 +
4304 +emulate:
4305 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4306 + regs->pc = call_dl_resolve;
4307 + regs->npc = addr+4;
4308 + return 3;
4309 + }
4310 +#endif
4311 +
4312 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4313 + if ((save & 0xFFC00000U) == 0x05000000U &&
4314 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4315 + nop == 0x01000000U)
4316 + {
4317 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4318 + regs->u_regs[UREG_G2] = addr + 4;
4319 + addr = (save & 0x003FFFFFU) << 10;
4320 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4321 + regs->pc = addr;
4322 + regs->npc = addr+4;
4323 + return 3;
4324 + }
4325 + }
4326 + } while (0);
4327 +
4328 + do { /* PaX: unpatched PLT emulation step 2 */
4329 + unsigned int save, call, nop;
4330 +
4331 + err = get_user(save, (unsigned int *)(regs->pc-4));
4332 + err |= get_user(call, (unsigned int *)regs->pc);
4333 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
4334 + if (err)
4335 + break;
4336 +
4337 + if (save == 0x9DE3BFA8U &&
4338 + (call & 0xC0000000U) == 0x40000000U &&
4339 + nop == 0x01000000U)
4340 + {
4341 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4342 +
4343 + regs->u_regs[UREG_RETPC] = regs->pc;
4344 + regs->pc = dl_resolve;
4345 + regs->npc = dl_resolve+4;
4346 + return 3;
4347 + }
4348 + } while (0);
4349 +#endif
4350 +
4351 + return 1;
4352 +}
4353 +
4354 +void pax_report_insns(void *pc, void *sp)
4355 +{
4356 + unsigned long i;
4357 +
4358 + printk(KERN_ERR "PAX: bytes at PC: ");
4359 + for (i = 0; i < 8; i++) {
4360 + unsigned int c;
4361 + if (get_user(c, (unsigned int *)pc+i))
4362 + printk(KERN_CONT "???????? ");
4363 + else
4364 + printk(KERN_CONT "%08x ", c);
4365 + }
4366 + printk("\n");
4367 +}
4368 +#endif
4369 +
4370 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4371 int text_fault)
4372 {
4373 @@ -281,6 +546,24 @@ good_area:
4374 if(!(vma->vm_flags & VM_WRITE))
4375 goto bad_area;
4376 } else {
4377 +
4378 +#ifdef CONFIG_PAX_PAGEEXEC
4379 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4380 + up_read(&mm->mmap_sem);
4381 + switch (pax_handle_fetch_fault(regs)) {
4382 +
4383 +#ifdef CONFIG_PAX_EMUPLT
4384 + case 2:
4385 + case 3:
4386 + return;
4387 +#endif
4388 +
4389 + }
4390 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4391 + do_group_exit(SIGKILL);
4392 + }
4393 +#endif
4394 +
4395 /* Allow reads even for write-only mappings */
4396 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4397 goto bad_area;
4398 diff -urNp linux-3.0.3/arch/sparc/mm/fault_64.c linux-3.0.3/arch/sparc/mm/fault_64.c
4399 --- linux-3.0.3/arch/sparc/mm/fault_64.c 2011-07-21 22:17:23.000000000 -0400
4400 +++ linux-3.0.3/arch/sparc/mm/fault_64.c 2011-08-23 21:48:14.000000000 -0400
4401 @@ -21,6 +21,9 @@
4402 #include <linux/kprobes.h>
4403 #include <linux/kdebug.h>
4404 #include <linux/percpu.h>
4405 +#include <linux/slab.h>
4406 +#include <linux/pagemap.h>
4407 +#include <linux/compiler.h>
4408
4409 #include <asm/page.h>
4410 #include <asm/pgtable.h>
4411 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4412 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4413 regs->tpc);
4414 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4415 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4416 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4417 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4418 dump_stack();
4419 unhandled_fault(regs->tpc, current, regs);
4420 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4421 show_regs(regs);
4422 }
4423
4424 +#ifdef CONFIG_PAX_PAGEEXEC
4425 +#ifdef CONFIG_PAX_DLRESOLVE
4426 +static void pax_emuplt_close(struct vm_area_struct *vma)
4427 +{
4428 + vma->vm_mm->call_dl_resolve = 0UL;
4429 +}
4430 +
4431 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4432 +{
4433 + unsigned int *kaddr;
4434 +
4435 + vmf->page = alloc_page(GFP_HIGHUSER);
4436 + if (!vmf->page)
4437 + return VM_FAULT_OOM;
4438 +
4439 + kaddr = kmap(vmf->page);
4440 + memset(kaddr, 0, PAGE_SIZE);
4441 + kaddr[0] = 0x9DE3BFA8U; /* save */
4442 + flush_dcache_page(vmf->page);
4443 + kunmap(vmf->page);
4444 + return VM_FAULT_MAJOR;
4445 +}
4446 +
4447 +static const struct vm_operations_struct pax_vm_ops = {
4448 + .close = pax_emuplt_close,
4449 + .fault = pax_emuplt_fault
4450 +};
4451 +
4452 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4453 +{
4454 + int ret;
4455 +
4456 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4457 + vma->vm_mm = current->mm;
4458 + vma->vm_start = addr;
4459 + vma->vm_end = addr + PAGE_SIZE;
4460 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4461 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4462 + vma->vm_ops = &pax_vm_ops;
4463 +
4464 + ret = insert_vm_struct(current->mm, vma);
4465 + if (ret)
4466 + return ret;
4467 +
4468 + ++current->mm->total_vm;
4469 + return 0;
4470 +}
4471 +#endif
4472 +
4473 +/*
4474 + * PaX: decide what to do with offenders (regs->tpc = fault address)
4475 + *
4476 + * returns 1 when task should be killed
4477 + * 2 when patched PLT trampoline was detected
4478 + * 3 when unpatched PLT trampoline was detected
4479 + */
4480 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4481 +{
4482 +
4483 +#ifdef CONFIG_PAX_EMUPLT
4484 + int err;
4485 +
4486 + do { /* PaX: patched PLT emulation #1 */
4487 + unsigned int sethi1, sethi2, jmpl;
4488 +
4489 + err = get_user(sethi1, (unsigned int *)regs->tpc);
4490 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4491 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4492 +
4493 + if (err)
4494 + break;
4495 +
4496 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4497 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4498 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4499 + {
4500 + unsigned long addr;
4501 +
4502 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4503 + addr = regs->u_regs[UREG_G1];
4504 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4505 +
4506 + if (test_thread_flag(TIF_32BIT))
4507 + addr &= 0xFFFFFFFFUL;
4508 +
4509 + regs->tpc = addr;
4510 + regs->tnpc = addr+4;
4511 + return 2;
4512 + }
4513 + } while (0);
4514 +
4515 + { /* PaX: patched PLT emulation #2 */
4516 + unsigned int ba;
4517 +
4518 + err = get_user(ba, (unsigned int *)regs->tpc);
4519 +
4520 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4521 + unsigned long addr;
4522 +
4523 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4524 +
4525 + if (test_thread_flag(TIF_32BIT))
4526 + addr &= 0xFFFFFFFFUL;
4527 +
4528 + regs->tpc = addr;
4529 + regs->tnpc = addr+4;
4530 + return 2;
4531 + }
4532 + }
4533 +
4534 + do { /* PaX: patched PLT emulation #3 */
4535 + unsigned int sethi, jmpl, nop;
4536 +
4537 + err = get_user(sethi, (unsigned int *)regs->tpc);
4538 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4539 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4540 +
4541 + if (err)
4542 + break;
4543 +
4544 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4545 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4546 + nop == 0x01000000U)
4547 + {
4548 + unsigned long addr;
4549 +
4550 + addr = (sethi & 0x003FFFFFU) << 10;
4551 + regs->u_regs[UREG_G1] = addr;
4552 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4553 +
4554 + if (test_thread_flag(TIF_32BIT))
4555 + addr &= 0xFFFFFFFFUL;
4556 +
4557 + regs->tpc = addr;
4558 + regs->tnpc = addr+4;
4559 + return 2;
4560 + }
4561 + } while (0);
4562 +
4563 + do { /* PaX: patched PLT emulation #4 */
4564 + unsigned int sethi, mov1, call, mov2;
4565 +
4566 + err = get_user(sethi, (unsigned int *)regs->tpc);
4567 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4568 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
4569 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4570 +
4571 + if (err)
4572 + break;
4573 +
4574 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4575 + mov1 == 0x8210000FU &&
4576 + (call & 0xC0000000U) == 0x40000000U &&
4577 + mov2 == 0x9E100001U)
4578 + {
4579 + unsigned long addr;
4580 +
4581 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4582 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4583 +
4584 + if (test_thread_flag(TIF_32BIT))
4585 + addr &= 0xFFFFFFFFUL;
4586 +
4587 + regs->tpc = addr;
4588 + regs->tnpc = addr+4;
4589 + return 2;
4590 + }
4591 + } while (0);
4592 +
4593 + do { /* PaX: patched PLT emulation #5 */
4594 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4595 +
4596 + err = get_user(sethi, (unsigned int *)regs->tpc);
4597 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4598 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4599 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4600 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4601 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4602 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4603 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4604 +
4605 + if (err)
4606 + break;
4607 +
4608 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4609 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
4610 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4611 + (or1 & 0xFFFFE000U) == 0x82106000U &&
4612 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
4613 + sllx == 0x83287020U &&
4614 + jmpl == 0x81C04005U &&
4615 + nop == 0x01000000U)
4616 + {
4617 + unsigned long addr;
4618 +
4619 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4620 + regs->u_regs[UREG_G1] <<= 32;
4621 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4622 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4623 + regs->tpc = addr;
4624 + regs->tnpc = addr+4;
4625 + return 2;
4626 + }
4627 + } while (0);
4628 +
4629 + do { /* PaX: patched PLT emulation #6 */
4630 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4631 +
4632 + err = get_user(sethi, (unsigned int *)regs->tpc);
4633 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4634 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4635 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4636 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
4637 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4638 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4639 +
4640 + if (err)
4641 + break;
4642 +
4643 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4644 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
4645 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4646 + sllx == 0x83287020U &&
4647 + (or & 0xFFFFE000U) == 0x8A116000U &&
4648 + jmpl == 0x81C04005U &&
4649 + nop == 0x01000000U)
4650 + {
4651 + unsigned long addr;
4652 +
4653 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4654 + regs->u_regs[UREG_G1] <<= 32;
4655 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4656 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4657 + regs->tpc = addr;
4658 + regs->tnpc = addr+4;
4659 + return 2;
4660 + }
4661 + } while (0);
4662 +
4663 + do { /* PaX: unpatched PLT emulation step 1 */
4664 + unsigned int sethi, ba, nop;
4665 +
4666 + err = get_user(sethi, (unsigned int *)regs->tpc);
4667 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4668 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4669 +
4670 + if (err)
4671 + break;
4672 +
4673 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4674 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4675 + nop == 0x01000000U)
4676 + {
4677 + unsigned long addr;
4678 + unsigned int save, call;
4679 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4680 +
4681 + if ((ba & 0xFFC00000U) == 0x30800000U)
4682 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4683 + else
4684 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4685 +
4686 + if (test_thread_flag(TIF_32BIT))
4687 + addr &= 0xFFFFFFFFUL;
4688 +
4689 + err = get_user(save, (unsigned int *)addr);
4690 + err |= get_user(call, (unsigned int *)(addr+4));
4691 + err |= get_user(nop, (unsigned int *)(addr+8));
4692 + if (err)
4693 + break;
4694 +
4695 +#ifdef CONFIG_PAX_DLRESOLVE
4696 + if (save == 0x9DE3BFA8U &&
4697 + (call & 0xC0000000U) == 0x40000000U &&
4698 + nop == 0x01000000U)
4699 + {
4700 + struct vm_area_struct *vma;
4701 + unsigned long call_dl_resolve;
4702 +
4703 + down_read(&current->mm->mmap_sem);
4704 + call_dl_resolve = current->mm->call_dl_resolve;
4705 + up_read(&current->mm->mmap_sem);
4706 + if (likely(call_dl_resolve))
4707 + goto emulate;
4708 +
4709 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4710 +
4711 + down_write(&current->mm->mmap_sem);
4712 + if (current->mm->call_dl_resolve) {
4713 + call_dl_resolve = current->mm->call_dl_resolve;
4714 + up_write(&current->mm->mmap_sem);
4715 + if (vma)
4716 + kmem_cache_free(vm_area_cachep, vma);
4717 + goto emulate;
4718 + }
4719 +
4720 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4721 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4722 + up_write(&current->mm->mmap_sem);
4723 + if (vma)
4724 + kmem_cache_free(vm_area_cachep, vma);
4725 + return 1;
4726 + }
4727 +
4728 + if (pax_insert_vma(vma, call_dl_resolve)) {
4729 + up_write(&current->mm->mmap_sem);
4730 + kmem_cache_free(vm_area_cachep, vma);
4731 + return 1;
4732 + }
4733 +
4734 + current->mm->call_dl_resolve = call_dl_resolve;
4735 + up_write(&current->mm->mmap_sem);
4736 +
4737 +emulate:
4738 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4739 + regs->tpc = call_dl_resolve;
4740 + regs->tnpc = addr+4;
4741 + return 3;
4742 + }
4743 +#endif
4744 +
4745 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4746 + if ((save & 0xFFC00000U) == 0x05000000U &&
4747 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4748 + nop == 0x01000000U)
4749 + {
4750 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4751 + regs->u_regs[UREG_G2] = addr + 4;
4752 + addr = (save & 0x003FFFFFU) << 10;
4753 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4754 +
4755 + if (test_thread_flag(TIF_32BIT))
4756 + addr &= 0xFFFFFFFFUL;
4757 +
4758 + regs->tpc = addr;
4759 + regs->tnpc = addr+4;
4760 + return 3;
4761 + }
4762 +
4763 + /* PaX: 64-bit PLT stub */
4764 + err = get_user(sethi1, (unsigned int *)addr);
4765 + err |= get_user(sethi2, (unsigned int *)(addr+4));
4766 + err |= get_user(or1, (unsigned int *)(addr+8));
4767 + err |= get_user(or2, (unsigned int *)(addr+12));
4768 + err |= get_user(sllx, (unsigned int *)(addr+16));
4769 + err |= get_user(add, (unsigned int *)(addr+20));
4770 + err |= get_user(jmpl, (unsigned int *)(addr+24));
4771 + err |= get_user(nop, (unsigned int *)(addr+28));
4772 + if (err)
4773 + break;
4774 +
4775 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
4776 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4777 + (or1 & 0xFFFFE000U) == 0x88112000U &&
4778 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
4779 + sllx == 0x89293020U &&
4780 + add == 0x8A010005U &&
4781 + jmpl == 0x89C14000U &&
4782 + nop == 0x01000000U)
4783 + {
4784 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4785 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4786 + regs->u_regs[UREG_G4] <<= 32;
4787 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4788 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
4789 + regs->u_regs[UREG_G4] = addr + 24;
4790 + addr = regs->u_regs[UREG_G5];
4791 + regs->tpc = addr;
4792 + regs->tnpc = addr+4;
4793 + return 3;
4794 + }
4795 + }
4796 + } while (0);
4797 +
4798 +#ifdef CONFIG_PAX_DLRESOLVE
4799 + do { /* PaX: unpatched PLT emulation step 2 */
4800 + unsigned int save, call, nop;
4801 +
4802 + err = get_user(save, (unsigned int *)(regs->tpc-4));
4803 + err |= get_user(call, (unsigned int *)regs->tpc);
4804 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
4805 + if (err)
4806 + break;
4807 +
4808 + if (save == 0x9DE3BFA8U &&
4809 + (call & 0xC0000000U) == 0x40000000U &&
4810 + nop == 0x01000000U)
4811 + {
4812 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4813 +
4814 + if (test_thread_flag(TIF_32BIT))
4815 + dl_resolve &= 0xFFFFFFFFUL;
4816 +
4817 + regs->u_regs[UREG_RETPC] = regs->tpc;
4818 + regs->tpc = dl_resolve;
4819 + regs->tnpc = dl_resolve+4;
4820 + return 3;
4821 + }
4822 + } while (0);
4823 +#endif
4824 +
4825 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
4826 + unsigned int sethi, ba, nop;
4827 +
4828 + err = get_user(sethi, (unsigned int *)regs->tpc);
4829 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4830 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4831 +
4832 + if (err)
4833 + break;
4834 +
4835 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4836 + (ba & 0xFFF00000U) == 0x30600000U &&
4837 + nop == 0x01000000U)
4838 + {
4839 + unsigned long addr;
4840 +
4841 + addr = (sethi & 0x003FFFFFU) << 10;
4842 + regs->u_regs[UREG_G1] = addr;
4843 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4844 +
4845 + if (test_thread_flag(TIF_32BIT))
4846 + addr &= 0xFFFFFFFFUL;
4847 +
4848 + regs->tpc = addr;
4849 + regs->tnpc = addr+4;
4850 + return 2;
4851 + }
4852 + } while (0);
4853 +
4854 +#endif
4855 +
4856 + return 1;
4857 +}
4858 +
4859 +void pax_report_insns(void *pc, void *sp)
4860 +{
4861 + unsigned long i;
4862 +
4863 + printk(KERN_ERR "PAX: bytes at PC: ");
4864 + for (i = 0; i < 8; i++) {
4865 + unsigned int c;
4866 + if (get_user(c, (unsigned int *)pc+i))
4867 + printk(KERN_CONT "???????? ");
4868 + else
4869 + printk(KERN_CONT "%08x ", c);
4870 + }
4871 + printk("\n");
4872 +}
4873 +#endif
4874 +
4875 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
4876 {
4877 struct mm_struct *mm = current->mm;
4878 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
4879 if (!vma)
4880 goto bad_area;
4881
4882 +#ifdef CONFIG_PAX_PAGEEXEC
4883 + /* PaX: detect ITLB misses on non-exec pages */
4884 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
4885 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
4886 + {
4887 + if (address != regs->tpc)
4888 + goto good_area;
4889 +
4890 + up_read(&mm->mmap_sem);
4891 + switch (pax_handle_fetch_fault(regs)) {
4892 +
4893 +#ifdef CONFIG_PAX_EMUPLT
4894 + case 2:
4895 + case 3:
4896 + return;
4897 +#endif
4898 +
4899 + }
4900 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
4901 + do_group_exit(SIGKILL);
4902 + }
4903 +#endif
4904 +
4905 /* Pure DTLB misses do not tell us whether the fault causing
4906 * load/store/atomic was a write or not, it only says that there
4907 * was no match. So in such a case we (carefully) read the
4908 diff -urNp linux-3.0.3/arch/sparc/mm/hugetlbpage.c linux-3.0.3/arch/sparc/mm/hugetlbpage.c
4909 --- linux-3.0.3/arch/sparc/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
4910 +++ linux-3.0.3/arch/sparc/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
4911 @@ -68,7 +68,7 @@ full_search:
4912 }
4913 return -ENOMEM;
4914 }
4915 - if (likely(!vma || addr + len <= vma->vm_start)) {
4916 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4917 /*
4918 * Remember the place where we stopped the search:
4919 */
4920 @@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
4921 /* make sure it can fit in the remaining address space */
4922 if (likely(addr > len)) {
4923 vma = find_vma(mm, addr-len);
4924 - if (!vma || addr <= vma->vm_start) {
4925 + if (check_heap_stack_gap(vma, addr - len, len)) {
4926 /* remember the address as a hint for next time */
4927 return (mm->free_area_cache = addr-len);
4928 }
4929 @@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
4930 if (unlikely(mm->mmap_base < len))
4931 goto bottomup;
4932
4933 - addr = (mm->mmap_base-len) & HPAGE_MASK;
4934 + addr = mm->mmap_base - len;
4935
4936 do {
4937 + addr &= HPAGE_MASK;
4938 /*
4939 * Lookup failure means no vma is above this address,
4940 * else if new region fits below vma->vm_start,
4941 * return with success:
4942 */
4943 vma = find_vma(mm, addr);
4944 - if (likely(!vma || addr+len <= vma->vm_start)) {
4945 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4946 /* remember the address as a hint for next time */
4947 return (mm->free_area_cache = addr);
4948 }
4949 @@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
4950 mm->cached_hole_size = vma->vm_start - addr;
4951
4952 /* try just below the current vma->vm_start */
4953 - addr = (vma->vm_start-len) & HPAGE_MASK;
4954 - } while (likely(len < vma->vm_start));
4955 + addr = skip_heap_stack_gap(vma, len);
4956 + } while (!IS_ERR_VALUE(addr));
4957
4958 bottomup:
4959 /*
4960 @@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
4961 if (addr) {
4962 addr = ALIGN(addr, HPAGE_SIZE);
4963 vma = find_vma(mm, addr);
4964 - if (task_size - len >= addr &&
4965 - (!vma || addr + len <= vma->vm_start))
4966 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4967 return addr;
4968 }
4969 if (mm->get_unmapped_area == arch_get_unmapped_area)
4970 diff -urNp linux-3.0.3/arch/sparc/mm/init_32.c linux-3.0.3/arch/sparc/mm/init_32.c
4971 --- linux-3.0.3/arch/sparc/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
4972 +++ linux-3.0.3/arch/sparc/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
4973 @@ -316,6 +316,9 @@ extern void device_scan(void);
4974 pgprot_t PAGE_SHARED __read_mostly;
4975 EXPORT_SYMBOL(PAGE_SHARED);
4976
4977 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
4978 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
4979 +
4980 void __init paging_init(void)
4981 {
4982 switch(sparc_cpu_model) {
4983 @@ -344,17 +347,17 @@ void __init paging_init(void)
4984
4985 /* Initialize the protection map with non-constant, MMU dependent values. */
4986 protection_map[0] = PAGE_NONE;
4987 - protection_map[1] = PAGE_READONLY;
4988 - protection_map[2] = PAGE_COPY;
4989 - protection_map[3] = PAGE_COPY;
4990 + protection_map[1] = PAGE_READONLY_NOEXEC;
4991 + protection_map[2] = PAGE_COPY_NOEXEC;
4992 + protection_map[3] = PAGE_COPY_NOEXEC;
4993 protection_map[4] = PAGE_READONLY;
4994 protection_map[5] = PAGE_READONLY;
4995 protection_map[6] = PAGE_COPY;
4996 protection_map[7] = PAGE_COPY;
4997 protection_map[8] = PAGE_NONE;
4998 - protection_map[9] = PAGE_READONLY;
4999 - protection_map[10] = PAGE_SHARED;
5000 - protection_map[11] = PAGE_SHARED;
5001 + protection_map[9] = PAGE_READONLY_NOEXEC;
5002 + protection_map[10] = PAGE_SHARED_NOEXEC;
5003 + protection_map[11] = PAGE_SHARED_NOEXEC;
5004 protection_map[12] = PAGE_READONLY;
5005 protection_map[13] = PAGE_READONLY;
5006 protection_map[14] = PAGE_SHARED;
5007 diff -urNp linux-3.0.3/arch/sparc/mm/Makefile linux-3.0.3/arch/sparc/mm/Makefile
5008 --- linux-3.0.3/arch/sparc/mm/Makefile 2011-07-21 22:17:23.000000000 -0400
5009 +++ linux-3.0.3/arch/sparc/mm/Makefile 2011-08-23 21:47:55.000000000 -0400
5010 @@ -2,7 +2,7 @@
5011 #
5012
5013 asflags-y := -ansi
5014 -ccflags-y := -Werror
5015 +#ccflags-y := -Werror
5016
5017 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5018 obj-y += fault_$(BITS).o
5019 diff -urNp linux-3.0.3/arch/sparc/mm/srmmu.c linux-3.0.3/arch/sparc/mm/srmmu.c
5020 --- linux-3.0.3/arch/sparc/mm/srmmu.c 2011-07-21 22:17:23.000000000 -0400
5021 +++ linux-3.0.3/arch/sparc/mm/srmmu.c 2011-08-23 21:47:55.000000000 -0400
5022 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5023 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5024 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5025 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5026 +
5027 +#ifdef CONFIG_PAX_PAGEEXEC
5028 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5029 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5030 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5031 +#endif
5032 +
5033 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5034 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5035
5036 diff -urNp linux-3.0.3/arch/um/include/asm/kmap_types.h linux-3.0.3/arch/um/include/asm/kmap_types.h
5037 --- linux-3.0.3/arch/um/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
5038 +++ linux-3.0.3/arch/um/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
5039 @@ -23,6 +23,7 @@ enum km_type {
5040 KM_IRQ1,
5041 KM_SOFTIRQ0,
5042 KM_SOFTIRQ1,
5043 + KM_CLEARPAGE,
5044 KM_TYPE_NR
5045 };
5046
5047 diff -urNp linux-3.0.3/arch/um/include/asm/page.h linux-3.0.3/arch/um/include/asm/page.h
5048 --- linux-3.0.3/arch/um/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
5049 +++ linux-3.0.3/arch/um/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
5050 @@ -14,6 +14,9 @@
5051 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5052 #define PAGE_MASK (~(PAGE_SIZE-1))
5053
5054 +#define ktla_ktva(addr) (addr)
5055 +#define ktva_ktla(addr) (addr)
5056 +
5057 #ifndef __ASSEMBLY__
5058
5059 struct page;
5060 diff -urNp linux-3.0.3/arch/um/kernel/process.c linux-3.0.3/arch/um/kernel/process.c
5061 --- linux-3.0.3/arch/um/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
5062 +++ linux-3.0.3/arch/um/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
5063 @@ -404,22 +404,6 @@ int singlestepping(void * t)
5064 return 2;
5065 }
5066
5067 -/*
5068 - * Only x86 and x86_64 have an arch_align_stack().
5069 - * All other arches have "#define arch_align_stack(x) (x)"
5070 - * in their asm/system.h
5071 - * As this is included in UML from asm-um/system-generic.h,
5072 - * we can use it to behave as the subarch does.
5073 - */
5074 -#ifndef arch_align_stack
5075 -unsigned long arch_align_stack(unsigned long sp)
5076 -{
5077 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5078 - sp -= get_random_int() % 8192;
5079 - return sp & ~0xf;
5080 -}
5081 -#endif
5082 -
5083 unsigned long get_wchan(struct task_struct *p)
5084 {
5085 unsigned long stack_page, sp, ip;
5086 diff -urNp linux-3.0.3/arch/um/sys-i386/syscalls.c linux-3.0.3/arch/um/sys-i386/syscalls.c
5087 --- linux-3.0.3/arch/um/sys-i386/syscalls.c 2011-07-21 22:17:23.000000000 -0400
5088 +++ linux-3.0.3/arch/um/sys-i386/syscalls.c 2011-08-23 21:47:55.000000000 -0400
5089 @@ -11,6 +11,21 @@
5090 #include "asm/uaccess.h"
5091 #include "asm/unistd.h"
5092
5093 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5094 +{
5095 + unsigned long pax_task_size = TASK_SIZE;
5096 +
5097 +#ifdef CONFIG_PAX_SEGMEXEC
5098 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5099 + pax_task_size = SEGMEXEC_TASK_SIZE;
5100 +#endif
5101 +
5102 + if (len > pax_task_size || addr > pax_task_size - len)
5103 + return -EINVAL;
5104 +
5105 + return 0;
5106 +}
5107 +
5108 /*
5109 * The prototype on i386 is:
5110 *
5111 diff -urNp linux-3.0.3/arch/x86/boot/bitops.h linux-3.0.3/arch/x86/boot/bitops.h
5112 --- linux-3.0.3/arch/x86/boot/bitops.h 2011-07-21 22:17:23.000000000 -0400
5113 +++ linux-3.0.3/arch/x86/boot/bitops.h 2011-08-23 21:47:55.000000000 -0400
5114 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5115 u8 v;
5116 const u32 *p = (const u32 *)addr;
5117
5118 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5119 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5120 return v;
5121 }
5122
5123 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5124
5125 static inline void set_bit(int nr, void *addr)
5126 {
5127 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5128 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5129 }
5130
5131 #endif /* BOOT_BITOPS_H */
5132 diff -urNp linux-3.0.3/arch/x86/boot/boot.h linux-3.0.3/arch/x86/boot/boot.h
5133 --- linux-3.0.3/arch/x86/boot/boot.h 2011-07-21 22:17:23.000000000 -0400
5134 +++ linux-3.0.3/arch/x86/boot/boot.h 2011-08-23 21:47:55.000000000 -0400
5135 @@ -85,7 +85,7 @@ static inline void io_delay(void)
5136 static inline u16 ds(void)
5137 {
5138 u16 seg;
5139 - asm("movw %%ds,%0" : "=rm" (seg));
5140 + asm volatile("movw %%ds,%0" : "=rm" (seg));
5141 return seg;
5142 }
5143
5144 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
5145 static inline int memcmp(const void *s1, const void *s2, size_t len)
5146 {
5147 u8 diff;
5148 - asm("repe; cmpsb; setnz %0"
5149 + asm volatile("repe; cmpsb; setnz %0"
5150 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5151 return diff;
5152 }
5153 diff -urNp linux-3.0.3/arch/x86/boot/compressed/head_32.S linux-3.0.3/arch/x86/boot/compressed/head_32.S
5154 --- linux-3.0.3/arch/x86/boot/compressed/head_32.S 2011-07-21 22:17:23.000000000 -0400
5155 +++ linux-3.0.3/arch/x86/boot/compressed/head_32.S 2011-08-23 21:47:55.000000000 -0400
5156 @@ -76,7 +76,7 @@ ENTRY(startup_32)
5157 notl %eax
5158 andl %eax, %ebx
5159 #else
5160 - movl $LOAD_PHYSICAL_ADDR, %ebx
5161 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5162 #endif
5163
5164 /* Target address to relocate to for decompression */
5165 @@ -162,7 +162,7 @@ relocated:
5166 * and where it was actually loaded.
5167 */
5168 movl %ebp, %ebx
5169 - subl $LOAD_PHYSICAL_ADDR, %ebx
5170 + subl $____LOAD_PHYSICAL_ADDR, %ebx
5171 jz 2f /* Nothing to be done if loaded at compiled addr. */
5172 /*
5173 * Process relocations.
5174 @@ -170,8 +170,7 @@ relocated:
5175
5176 1: subl $4, %edi
5177 movl (%edi), %ecx
5178 - testl %ecx, %ecx
5179 - jz 2f
5180 + jecxz 2f
5181 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5182 jmp 1b
5183 2:
5184 diff -urNp linux-3.0.3/arch/x86/boot/compressed/head_64.S linux-3.0.3/arch/x86/boot/compressed/head_64.S
5185 --- linux-3.0.3/arch/x86/boot/compressed/head_64.S 2011-07-21 22:17:23.000000000 -0400
5186 +++ linux-3.0.3/arch/x86/boot/compressed/head_64.S 2011-08-23 21:47:55.000000000 -0400
5187 @@ -91,7 +91,7 @@ ENTRY(startup_32)
5188 notl %eax
5189 andl %eax, %ebx
5190 #else
5191 - movl $LOAD_PHYSICAL_ADDR, %ebx
5192 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5193 #endif
5194
5195 /* Target address to relocate to for decompression */
5196 @@ -233,7 +233,7 @@ ENTRY(startup_64)
5197 notq %rax
5198 andq %rax, %rbp
5199 #else
5200 - movq $LOAD_PHYSICAL_ADDR, %rbp
5201 + movq $____LOAD_PHYSICAL_ADDR, %rbp
5202 #endif
5203
5204 /* Target address to relocate to for decompression */
5205 diff -urNp linux-3.0.3/arch/x86/boot/compressed/Makefile linux-3.0.3/arch/x86/boot/compressed/Makefile
5206 --- linux-3.0.3/arch/x86/boot/compressed/Makefile 2011-07-21 22:17:23.000000000 -0400
5207 +++ linux-3.0.3/arch/x86/boot/compressed/Makefile 2011-08-23 21:47:55.000000000 -0400
5208 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5209 KBUILD_CFLAGS += $(cflags-y)
5210 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5211 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5212 +ifdef CONSTIFY_PLUGIN
5213 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5214 +endif
5215
5216 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5217 GCOV_PROFILE := n
5218 diff -urNp linux-3.0.3/arch/x86/boot/compressed/misc.c linux-3.0.3/arch/x86/boot/compressed/misc.c
5219 --- linux-3.0.3/arch/x86/boot/compressed/misc.c 2011-07-21 22:17:23.000000000 -0400
5220 +++ linux-3.0.3/arch/x86/boot/compressed/misc.c 2011-08-23 21:47:55.000000000 -0400
5221 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
5222 case PT_LOAD:
5223 #ifdef CONFIG_RELOCATABLE
5224 dest = output;
5225 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5226 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5227 #else
5228 dest = (void *)(phdr->p_paddr);
5229 #endif
5230 @@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5231 error("Destination address too large");
5232 #endif
5233 #ifndef CONFIG_RELOCATABLE
5234 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5235 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5236 error("Wrong destination address");
5237 #endif
5238
5239 diff -urNp linux-3.0.3/arch/x86/boot/compressed/relocs.c linux-3.0.3/arch/x86/boot/compressed/relocs.c
5240 --- linux-3.0.3/arch/x86/boot/compressed/relocs.c 2011-07-21 22:17:23.000000000 -0400
5241 +++ linux-3.0.3/arch/x86/boot/compressed/relocs.c 2011-08-23 21:47:55.000000000 -0400
5242 @@ -13,8 +13,11 @@
5243
5244 static void die(char *fmt, ...);
5245
5246 +#include "../../../../include/generated/autoconf.h"
5247 +
5248 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5249 static Elf32_Ehdr ehdr;
5250 +static Elf32_Phdr *phdr;
5251 static unsigned long reloc_count, reloc_idx;
5252 static unsigned long *relocs;
5253
5254 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5255 }
5256 }
5257
5258 +static void read_phdrs(FILE *fp)
5259 +{
5260 + unsigned int i;
5261 +
5262 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5263 + if (!phdr) {
5264 + die("Unable to allocate %d program headers\n",
5265 + ehdr.e_phnum);
5266 + }
5267 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5268 + die("Seek to %d failed: %s\n",
5269 + ehdr.e_phoff, strerror(errno));
5270 + }
5271 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5272 + die("Cannot read ELF program headers: %s\n",
5273 + strerror(errno));
5274 + }
5275 + for(i = 0; i < ehdr.e_phnum; i++) {
5276 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5277 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5278 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5279 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5280 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5281 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5282 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5283 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5284 + }
5285 +
5286 +}
5287 +
5288 static void read_shdrs(FILE *fp)
5289 {
5290 - int i;
5291 + unsigned int i;
5292 Elf32_Shdr shdr;
5293
5294 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5295 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5296
5297 static void read_strtabs(FILE *fp)
5298 {
5299 - int i;
5300 + unsigned int i;
5301 for (i = 0; i < ehdr.e_shnum; i++) {
5302 struct section *sec = &secs[i];
5303 if (sec->shdr.sh_type != SHT_STRTAB) {
5304 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5305
5306 static void read_symtabs(FILE *fp)
5307 {
5308 - int i,j;
5309 + unsigned int i,j;
5310 for (i = 0; i < ehdr.e_shnum; i++) {
5311 struct section *sec = &secs[i];
5312 if (sec->shdr.sh_type != SHT_SYMTAB) {
5313 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5314
5315 static void read_relocs(FILE *fp)
5316 {
5317 - int i,j;
5318 + unsigned int i,j;
5319 + uint32_t base;
5320 +
5321 for (i = 0; i < ehdr.e_shnum; i++) {
5322 struct section *sec = &secs[i];
5323 if (sec->shdr.sh_type != SHT_REL) {
5324 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5325 die("Cannot read symbol table: %s\n",
5326 strerror(errno));
5327 }
5328 + base = 0;
5329 + for (j = 0; j < ehdr.e_phnum; j++) {
5330 + if (phdr[j].p_type != PT_LOAD )
5331 + continue;
5332 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5333 + continue;
5334 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5335 + break;
5336 + }
5337 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5338 Elf32_Rel *rel = &sec->reltab[j];
5339 - rel->r_offset = elf32_to_cpu(rel->r_offset);
5340 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5341 rel->r_info = elf32_to_cpu(rel->r_info);
5342 }
5343 }
5344 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5345
5346 static void print_absolute_symbols(void)
5347 {
5348 - int i;
5349 + unsigned int i;
5350 printf("Absolute symbols\n");
5351 printf(" Num: Value Size Type Bind Visibility Name\n");
5352 for (i = 0; i < ehdr.e_shnum; i++) {
5353 struct section *sec = &secs[i];
5354 char *sym_strtab;
5355 Elf32_Sym *sh_symtab;
5356 - int j;
5357 + unsigned int j;
5358
5359 if (sec->shdr.sh_type != SHT_SYMTAB) {
5360 continue;
5361 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5362
5363 static void print_absolute_relocs(void)
5364 {
5365 - int i, printed = 0;
5366 + unsigned int i, printed = 0;
5367
5368 for (i = 0; i < ehdr.e_shnum; i++) {
5369 struct section *sec = &secs[i];
5370 struct section *sec_applies, *sec_symtab;
5371 char *sym_strtab;
5372 Elf32_Sym *sh_symtab;
5373 - int j;
5374 + unsigned int j;
5375 if (sec->shdr.sh_type != SHT_REL) {
5376 continue;
5377 }
5378 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5379
5380 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5381 {
5382 - int i;
5383 + unsigned int i;
5384 /* Walk through the relocations */
5385 for (i = 0; i < ehdr.e_shnum; i++) {
5386 char *sym_strtab;
5387 Elf32_Sym *sh_symtab;
5388 struct section *sec_applies, *sec_symtab;
5389 - int j;
5390 + unsigned int j;
5391 struct section *sec = &secs[i];
5392
5393 if (sec->shdr.sh_type != SHT_REL) {
5394 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5395 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5396 continue;
5397 }
5398 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5399 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5400 + continue;
5401 +
5402 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5403 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5404 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5405 + continue;
5406 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5407 + continue;
5408 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5409 + continue;
5410 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5411 + continue;
5412 +#endif
5413 +
5414 switch (r_type) {
5415 case R_386_NONE:
5416 case R_386_PC32:
5417 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5418
5419 static void emit_relocs(int as_text)
5420 {
5421 - int i;
5422 + unsigned int i;
5423 /* Count how many relocations I have and allocate space for them. */
5424 reloc_count = 0;
5425 walk_relocs(count_reloc);
5426 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
5427 fname, strerror(errno));
5428 }
5429 read_ehdr(fp);
5430 + read_phdrs(fp);
5431 read_shdrs(fp);
5432 read_strtabs(fp);
5433 read_symtabs(fp);
5434 diff -urNp linux-3.0.3/arch/x86/boot/cpucheck.c linux-3.0.3/arch/x86/boot/cpucheck.c
5435 --- linux-3.0.3/arch/x86/boot/cpucheck.c 2011-07-21 22:17:23.000000000 -0400
5436 +++ linux-3.0.3/arch/x86/boot/cpucheck.c 2011-08-23 21:47:55.000000000 -0400
5437 @@ -74,7 +74,7 @@ static int has_fpu(void)
5438 u16 fcw = -1, fsw = -1;
5439 u32 cr0;
5440
5441 - asm("movl %%cr0,%0" : "=r" (cr0));
5442 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
5443 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5444 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5445 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5446 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5447 {
5448 u32 f0, f1;
5449
5450 - asm("pushfl ; "
5451 + asm volatile("pushfl ; "
5452 "pushfl ; "
5453 "popl %0 ; "
5454 "movl %0,%1 ; "
5455 @@ -115,7 +115,7 @@ static void get_flags(void)
5456 set_bit(X86_FEATURE_FPU, cpu.flags);
5457
5458 if (has_eflag(X86_EFLAGS_ID)) {
5459 - asm("cpuid"
5460 + asm volatile("cpuid"
5461 : "=a" (max_intel_level),
5462 "=b" (cpu_vendor[0]),
5463 "=d" (cpu_vendor[1]),
5464 @@ -124,7 +124,7 @@ static void get_flags(void)
5465
5466 if (max_intel_level >= 0x00000001 &&
5467 max_intel_level <= 0x0000ffff) {
5468 - asm("cpuid"
5469 + asm volatile("cpuid"
5470 : "=a" (tfms),
5471 "=c" (cpu.flags[4]),
5472 "=d" (cpu.flags[0])
5473 @@ -136,7 +136,7 @@ static void get_flags(void)
5474 cpu.model += ((tfms >> 16) & 0xf) << 4;
5475 }
5476
5477 - asm("cpuid"
5478 + asm volatile("cpuid"
5479 : "=a" (max_amd_level)
5480 : "a" (0x80000000)
5481 : "ebx", "ecx", "edx");
5482 @@ -144,7 +144,7 @@ static void get_flags(void)
5483 if (max_amd_level >= 0x80000001 &&
5484 max_amd_level <= 0x8000ffff) {
5485 u32 eax = 0x80000001;
5486 - asm("cpuid"
5487 + asm volatile("cpuid"
5488 : "+a" (eax),
5489 "=c" (cpu.flags[6]),
5490 "=d" (cpu.flags[1])
5491 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5492 u32 ecx = MSR_K7_HWCR;
5493 u32 eax, edx;
5494
5495 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5496 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5497 eax &= ~(1 << 15);
5498 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5499 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5500
5501 get_flags(); /* Make sure it really did something */
5502 err = check_flags();
5503 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5504 u32 ecx = MSR_VIA_FCR;
5505 u32 eax, edx;
5506
5507 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5508 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5509 eax |= (1<<1)|(1<<7);
5510 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5511 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5512
5513 set_bit(X86_FEATURE_CX8, cpu.flags);
5514 err = check_flags();
5515 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5516 u32 eax, edx;
5517 u32 level = 1;
5518
5519 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5520 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5521 - asm("cpuid"
5522 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5523 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5524 + asm volatile("cpuid"
5525 : "+a" (level), "=d" (cpu.flags[0])
5526 : : "ecx", "ebx");
5527 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5528 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5529
5530 err = check_flags();
5531 }
5532 diff -urNp linux-3.0.3/arch/x86/boot/header.S linux-3.0.3/arch/x86/boot/header.S
5533 --- linux-3.0.3/arch/x86/boot/header.S 2011-07-21 22:17:23.000000000 -0400
5534 +++ linux-3.0.3/arch/x86/boot/header.S 2011-08-23 21:47:55.000000000 -0400
5535 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5536 # single linked list of
5537 # struct setup_data
5538
5539 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5540 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5541
5542 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5543 #define VO_INIT_SIZE (VO__end - VO__text)
5544 diff -urNp linux-3.0.3/arch/x86/boot/Makefile linux-3.0.3/arch/x86/boot/Makefile
5545 --- linux-3.0.3/arch/x86/boot/Makefile 2011-07-21 22:17:23.000000000 -0400
5546 +++ linux-3.0.3/arch/x86/boot/Makefile 2011-08-23 21:47:55.000000000 -0400
5547 @@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
5548 $(call cc-option, -fno-stack-protector) \
5549 $(call cc-option, -mpreferred-stack-boundary=2)
5550 KBUILD_CFLAGS += $(call cc-option, -m32)
5551 +ifdef CONSTIFY_PLUGIN
5552 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5553 +endif
5554 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5555 GCOV_PROFILE := n
5556
5557 diff -urNp linux-3.0.3/arch/x86/boot/memory.c linux-3.0.3/arch/x86/boot/memory.c
5558 --- linux-3.0.3/arch/x86/boot/memory.c 2011-07-21 22:17:23.000000000 -0400
5559 +++ linux-3.0.3/arch/x86/boot/memory.c 2011-08-23 21:47:55.000000000 -0400
5560 @@ -19,7 +19,7 @@
5561
5562 static int detect_memory_e820(void)
5563 {
5564 - int count = 0;
5565 + unsigned int count = 0;
5566 struct biosregs ireg, oreg;
5567 struct e820entry *desc = boot_params.e820_map;
5568 static struct e820entry buf; /* static so it is zeroed */
5569 diff -urNp linux-3.0.3/arch/x86/boot/video.c linux-3.0.3/arch/x86/boot/video.c
5570 --- linux-3.0.3/arch/x86/boot/video.c 2011-07-21 22:17:23.000000000 -0400
5571 +++ linux-3.0.3/arch/x86/boot/video.c 2011-08-23 21:47:55.000000000 -0400
5572 @@ -96,7 +96,7 @@ static void store_mode_params(void)
5573 static unsigned int get_entry(void)
5574 {
5575 char entry_buf[4];
5576 - int i, len = 0;
5577 + unsigned int i, len = 0;
5578 int key;
5579 unsigned int v;
5580
5581 diff -urNp linux-3.0.3/arch/x86/boot/video-vesa.c linux-3.0.3/arch/x86/boot/video-vesa.c
5582 --- linux-3.0.3/arch/x86/boot/video-vesa.c 2011-07-21 22:17:23.000000000 -0400
5583 +++ linux-3.0.3/arch/x86/boot/video-vesa.c 2011-08-23 21:47:55.000000000 -0400
5584 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5585
5586 boot_params.screen_info.vesapm_seg = oreg.es;
5587 boot_params.screen_info.vesapm_off = oreg.di;
5588 + boot_params.screen_info.vesapm_size = oreg.cx;
5589 }
5590
5591 /*
5592 diff -urNp linux-3.0.3/arch/x86/ia32/ia32_aout.c linux-3.0.3/arch/x86/ia32/ia32_aout.c
5593 --- linux-3.0.3/arch/x86/ia32/ia32_aout.c 2011-07-21 22:17:23.000000000 -0400
5594 +++ linux-3.0.3/arch/x86/ia32/ia32_aout.c 2011-08-23 21:48:14.000000000 -0400
5595 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
5596 unsigned long dump_start, dump_size;
5597 struct user32 dump;
5598
5599 + memset(&dump, 0, sizeof(dump));
5600 +
5601 fs = get_fs();
5602 set_fs(KERNEL_DS);
5603 has_dumped = 1;
5604 diff -urNp linux-3.0.3/arch/x86/ia32/ia32entry.S linux-3.0.3/arch/x86/ia32/ia32entry.S
5605 --- linux-3.0.3/arch/x86/ia32/ia32entry.S 2011-07-21 22:17:23.000000000 -0400
5606 +++ linux-3.0.3/arch/x86/ia32/ia32entry.S 2011-08-23 21:48:14.000000000 -0400
5607 @@ -13,6 +13,7 @@
5608 #include <asm/thread_info.h>
5609 #include <asm/segment.h>
5610 #include <asm/irqflags.h>
5611 +#include <asm/pgtable.h>
5612 #include <linux/linkage.h>
5613
5614 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
5615 @@ -95,6 +96,32 @@ ENTRY(native_irq_enable_sysexit)
5616 ENDPROC(native_irq_enable_sysexit)
5617 #endif
5618
5619 + .macro pax_enter_kernel_user
5620 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5621 + call pax_enter_kernel_user
5622 +#endif
5623 + .endm
5624 +
5625 + .macro pax_exit_kernel_user
5626 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5627 + call pax_exit_kernel_user
5628 +#endif
5629 +#ifdef CONFIG_PAX_RANDKSTACK
5630 + pushq %rax
5631 + call pax_randomize_kstack
5632 + popq %rax
5633 +#endif
5634 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5635 + call pax_erase_kstack
5636 +#endif
5637 + .endm
5638 +
5639 + .macro pax_erase_kstack
5640 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5641 + call pax_erase_kstack
5642 +#endif
5643 + .endm
5644 +
5645 /*
5646 * 32bit SYSENTER instruction entry.
5647 *
5648 @@ -121,7 +148,7 @@ ENTRY(ia32_sysenter_target)
5649 CFI_REGISTER rsp,rbp
5650 SWAPGS_UNSAFE_STACK
5651 movq PER_CPU_VAR(kernel_stack), %rsp
5652 - addq $(KERNEL_STACK_OFFSET),%rsp
5653 + pax_enter_kernel_user
5654 /*
5655 * No need to follow this irqs on/off section: the syscall
5656 * disabled irqs, here we enable it straight after entry:
5657 @@ -134,7 +161,8 @@ ENTRY(ia32_sysenter_target)
5658 CFI_REL_OFFSET rsp,0
5659 pushfq_cfi
5660 /*CFI_REL_OFFSET rflags,0*/
5661 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
5662 + GET_THREAD_INFO(%r10)
5663 + movl TI_sysenter_return(%r10), %r10d
5664 CFI_REGISTER rip,r10
5665 pushq_cfi $__USER32_CS
5666 /*CFI_REL_OFFSET cs,0*/
5667 @@ -146,6 +174,12 @@ ENTRY(ia32_sysenter_target)
5668 SAVE_ARGS 0,0,1
5669 /* no need to do an access_ok check here because rbp has been
5670 32bit zero extended */
5671 +
5672 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5673 + mov $PAX_USER_SHADOW_BASE,%r10
5674 + add %r10,%rbp
5675 +#endif
5676 +
5677 1: movl (%rbp),%ebp
5678 .section __ex_table,"a"
5679 .quad 1b,ia32_badarg
5680 @@ -168,6 +202,7 @@ sysenter_dispatch:
5681 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5682 jnz sysexit_audit
5683 sysexit_from_sys_call:
5684 + pax_exit_kernel_user
5685 andl $~TS_COMPAT,TI_status(%r10)
5686 /* clear IF, that popfq doesn't enable interrupts early */
5687 andl $~0x200,EFLAGS-R11(%rsp)
5688 @@ -194,6 +229,9 @@ sysexit_from_sys_call:
5689 movl %eax,%esi /* 2nd arg: syscall number */
5690 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
5691 call audit_syscall_entry
5692 +
5693 + pax_erase_kstack
5694 +
5695 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
5696 cmpq $(IA32_NR_syscalls-1),%rax
5697 ja ia32_badsys
5698 @@ -246,6 +284,9 @@ sysenter_tracesys:
5699 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
5700 movq %rsp,%rdi /* &pt_regs -> arg1 */
5701 call syscall_trace_enter
5702 +
5703 + pax_erase_kstack
5704 +
5705 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5706 RESTORE_REST
5707 cmpq $(IA32_NR_syscalls-1),%rax
5708 @@ -277,19 +318,24 @@ ENDPROC(ia32_sysenter_target)
5709 ENTRY(ia32_cstar_target)
5710 CFI_STARTPROC32 simple
5711 CFI_SIGNAL_FRAME
5712 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
5713 + CFI_DEF_CFA rsp,0
5714 CFI_REGISTER rip,rcx
5715 /*CFI_REGISTER rflags,r11*/
5716 SWAPGS_UNSAFE_STACK
5717 movl %esp,%r8d
5718 CFI_REGISTER rsp,r8
5719 movq PER_CPU_VAR(kernel_stack),%rsp
5720 +
5721 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5722 + pax_enter_kernel_user
5723 +#endif
5724 +
5725 /*
5726 * No need to follow this irqs on/off section: the syscall
5727 * disabled irqs and here we enable it straight after entry:
5728 */
5729 ENABLE_INTERRUPTS(CLBR_NONE)
5730 - SAVE_ARGS 8,1,1
5731 + SAVE_ARGS 8*6,1,1
5732 movl %eax,%eax /* zero extension */
5733 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
5734 movq %rcx,RIP-ARGOFFSET(%rsp)
5735 @@ -305,6 +351,12 @@ ENTRY(ia32_cstar_target)
5736 /* no need to do an access_ok check here because r8 has been
5737 32bit zero extended */
5738 /* hardware stack frame is complete now */
5739 +
5740 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5741 + mov $PAX_USER_SHADOW_BASE,%r10
5742 + add %r10,%r8
5743 +#endif
5744 +
5745 1: movl (%r8),%r9d
5746 .section __ex_table,"a"
5747 .quad 1b,ia32_badarg
5748 @@ -327,6 +379,7 @@ cstar_dispatch:
5749 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5750 jnz sysretl_audit
5751 sysretl_from_sys_call:
5752 + pax_exit_kernel_user
5753 andl $~TS_COMPAT,TI_status(%r10)
5754 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
5755 movl RIP-ARGOFFSET(%rsp),%ecx
5756 @@ -364,6 +417,9 @@ cstar_tracesys:
5757 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5758 movq %rsp,%rdi /* &pt_regs -> arg1 */
5759 call syscall_trace_enter
5760 +
5761 + pax_erase_kstack
5762 +
5763 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
5764 RESTORE_REST
5765 xchgl %ebp,%r9d
5766 @@ -409,6 +465,7 @@ ENTRY(ia32_syscall)
5767 CFI_REL_OFFSET rip,RIP-RIP
5768 PARAVIRT_ADJUST_EXCEPTION_FRAME
5769 SWAPGS
5770 + pax_enter_kernel_user
5771 /*
5772 * No need to follow this irqs on/off section: the syscall
5773 * disabled irqs and here we enable it straight after entry:
5774 @@ -441,6 +498,9 @@ ia32_tracesys:
5775 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5776 movq %rsp,%rdi /* &pt_regs -> arg1 */
5777 call syscall_trace_enter
5778 +
5779 + pax_erase_kstack
5780 +
5781 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5782 RESTORE_REST
5783 cmpq $(IA32_NR_syscalls-1),%rax
5784 diff -urNp linux-3.0.3/arch/x86/ia32/ia32_signal.c linux-3.0.3/arch/x86/ia32/ia32_signal.c
5785 --- linux-3.0.3/arch/x86/ia32/ia32_signal.c 2011-07-21 22:17:23.000000000 -0400
5786 +++ linux-3.0.3/arch/x86/ia32/ia32_signal.c 2011-08-23 21:47:55.000000000 -0400
5787 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
5788 sp -= frame_size;
5789 /* Align the stack pointer according to the i386 ABI,
5790 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
5791 - sp = ((sp + 4) & -16ul) - 4;
5792 + sp = ((sp - 12) & -16ul) - 4;
5793 return (void __user *) sp;
5794 }
5795
5796 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
5797 * These are actually not used anymore, but left because some
5798 * gdb versions depend on them as a marker.
5799 */
5800 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5801 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5802 } put_user_catch(err);
5803
5804 if (err)
5805 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
5806 0xb8,
5807 __NR_ia32_rt_sigreturn,
5808 0x80cd,
5809 - 0,
5810 + 0
5811 };
5812
5813 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
5814 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
5815
5816 if (ka->sa.sa_flags & SA_RESTORER)
5817 restorer = ka->sa.sa_restorer;
5818 + else if (current->mm->context.vdso)
5819 + /* Return stub is in 32bit vsyscall page */
5820 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5821 else
5822 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
5823 - rt_sigreturn);
5824 + restorer = &frame->retcode;
5825 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
5826
5827 /*
5828 * Not actually used anymore, but left because some gdb
5829 * versions need it.
5830 */
5831 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5832 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5833 } put_user_catch(err);
5834
5835 if (err)
5836 diff -urNp linux-3.0.3/arch/x86/include/asm/alternative.h linux-3.0.3/arch/x86/include/asm/alternative.h
5837 --- linux-3.0.3/arch/x86/include/asm/alternative.h 2011-07-21 22:17:23.000000000 -0400
5838 +++ linux-3.0.3/arch/x86/include/asm/alternative.h 2011-08-23 21:47:55.000000000 -0400
5839 @@ -93,7 +93,7 @@ static inline int alternatives_text_rese
5840 ".section .discard,\"aw\",@progbits\n" \
5841 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
5842 ".previous\n" \
5843 - ".section .altinstr_replacement, \"ax\"\n" \
5844 + ".section .altinstr_replacement, \"a\"\n" \
5845 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
5846 ".previous"
5847
5848 diff -urNp linux-3.0.3/arch/x86/include/asm/apic.h linux-3.0.3/arch/x86/include/asm/apic.h
5849 --- linux-3.0.3/arch/x86/include/asm/apic.h 2011-07-21 22:17:23.000000000 -0400
5850 +++ linux-3.0.3/arch/x86/include/asm/apic.h 2011-08-23 21:48:14.000000000 -0400
5851 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(vo
5852
5853 #ifdef CONFIG_X86_LOCAL_APIC
5854
5855 -extern unsigned int apic_verbosity;
5856 +extern int apic_verbosity;
5857 extern int local_apic_timer_c2_ok;
5858
5859 extern int disable_apic;
5860 diff -urNp linux-3.0.3/arch/x86/include/asm/apm.h linux-3.0.3/arch/x86/include/asm/apm.h
5861 --- linux-3.0.3/arch/x86/include/asm/apm.h 2011-07-21 22:17:23.000000000 -0400
5862 +++ linux-3.0.3/arch/x86/include/asm/apm.h 2011-08-23 21:47:55.000000000 -0400
5863 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
5864 __asm__ __volatile__(APM_DO_ZERO_SEGS
5865 "pushl %%edi\n\t"
5866 "pushl %%ebp\n\t"
5867 - "lcall *%%cs:apm_bios_entry\n\t"
5868 + "lcall *%%ss:apm_bios_entry\n\t"
5869 "setc %%al\n\t"
5870 "popl %%ebp\n\t"
5871 "popl %%edi\n\t"
5872 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
5873 __asm__ __volatile__(APM_DO_ZERO_SEGS
5874 "pushl %%edi\n\t"
5875 "pushl %%ebp\n\t"
5876 - "lcall *%%cs:apm_bios_entry\n\t"
5877 + "lcall *%%ss:apm_bios_entry\n\t"
5878 "setc %%bl\n\t"
5879 "popl %%ebp\n\t"
5880 "popl %%edi\n\t"
5881 diff -urNp linux-3.0.3/arch/x86/include/asm/atomic64_32.h linux-3.0.3/arch/x86/include/asm/atomic64_32.h
5882 --- linux-3.0.3/arch/x86/include/asm/atomic64_32.h 2011-07-21 22:17:23.000000000 -0400
5883 +++ linux-3.0.3/arch/x86/include/asm/atomic64_32.h 2011-08-23 21:47:55.000000000 -0400
5884 @@ -12,6 +12,14 @@ typedef struct {
5885 u64 __aligned(8) counter;
5886 } atomic64_t;
5887
5888 +#ifdef CONFIG_PAX_REFCOUNT
5889 +typedef struct {
5890 + u64 __aligned(8) counter;
5891 +} atomic64_unchecked_t;
5892 +#else
5893 +typedef atomic64_t atomic64_unchecked_t;
5894 +#endif
5895 +
5896 #define ATOMIC64_INIT(val) { (val) }
5897
5898 #ifdef CONFIG_X86_CMPXCHG64
5899 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
5900 }
5901
5902 /**
5903 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
5904 + * @p: pointer to type atomic64_unchecked_t
5905 + * @o: expected value
5906 + * @n: new value
5907 + *
5908 + * Atomically sets @v to @n if it was equal to @o and returns
5909 + * the old value.
5910 + */
5911 +
5912 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
5913 +{
5914 + return cmpxchg64(&v->counter, o, n);
5915 +}
5916 +
5917 +/**
5918 * atomic64_xchg - xchg atomic64 variable
5919 * @v: pointer to type atomic64_t
5920 * @n: value to assign
5921 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
5922 }
5923
5924 /**
5925 + * atomic64_set_unchecked - set atomic64 variable
5926 + * @v: pointer to type atomic64_unchecked_t
5927 + * @n: value to assign
5928 + *
5929 + * Atomically sets the value of @v to @n.
5930 + */
5931 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
5932 +{
5933 + unsigned high = (unsigned)(i >> 32);
5934 + unsigned low = (unsigned)i;
5935 + asm volatile(ATOMIC64_ALTERNATIVE(set)
5936 + : "+b" (low), "+c" (high)
5937 + : "S" (v)
5938 + : "eax", "edx", "memory"
5939 + );
5940 +}
5941 +
5942 +/**
5943 * atomic64_read - read atomic64 variable
5944 * @v: pointer to type atomic64_t
5945 *
5946 @@ -93,6 +134,22 @@ static inline long long atomic64_read(at
5947 }
5948
5949 /**
5950 + * atomic64_read_unchecked - read atomic64 variable
5951 + * @v: pointer to type atomic64_unchecked_t
5952 + *
5953 + * Atomically reads the value of @v and returns it.
5954 + */
5955 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
5956 +{
5957 + long long r;
5958 + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
5959 + : "=A" (r), "+c" (v)
5960 + : : "memory"
5961 + );
5962 + return r;
5963 + }
5964 +
5965 +/**
5966 * atomic64_add_return - add and return
5967 * @i: integer value to add
5968 * @v: pointer to type atomic64_t
5969 @@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
5970 return i;
5971 }
5972
5973 +/**
5974 + * atomic64_add_return_unchecked - add and return
5975 + * @i: integer value to add
5976 + * @v: pointer to type atomic64_unchecked_t
5977 + *
5978 + * Atomically adds @i to @v and returns @i + *@v
5979 + */
5980 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
5981 +{
5982 + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
5983 + : "+A" (i), "+c" (v)
5984 + : : "memory"
5985 + );
5986 + return i;
5987 +}
5988 +
5989 /*
5990 * Other variants with different arithmetic operators:
5991 */
5992 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
5993 return a;
5994 }
5995
5996 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5997 +{
5998 + long long a;
5999 + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
6000 + : "=A" (a)
6001 + : "S" (v)
6002 + : "memory", "ecx"
6003 + );
6004 + return a;
6005 +}
6006 +
6007 static inline long long atomic64_dec_return(atomic64_t *v)
6008 {
6009 long long a;
6010 @@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
6011 }
6012
6013 /**
6014 + * atomic64_add_unchecked - add integer to atomic64 variable
6015 + * @i: integer value to add
6016 + * @v: pointer to type atomic64_unchecked_t
6017 + *
6018 + * Atomically adds @i to @v.
6019 + */
6020 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
6021 +{
6022 + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
6023 + : "+A" (i), "+c" (v)
6024 + : : "memory"
6025 + );
6026 + return i;
6027 +}
6028 +
6029 +/**
6030 * atomic64_sub - subtract the atomic64 variable
6031 * @i: integer value to subtract
6032 * @v: pointer to type atomic64_t
6033 diff -urNp linux-3.0.3/arch/x86/include/asm/atomic64_64.h linux-3.0.3/arch/x86/include/asm/atomic64_64.h
6034 --- linux-3.0.3/arch/x86/include/asm/atomic64_64.h 2011-07-21 22:17:23.000000000 -0400
6035 +++ linux-3.0.3/arch/x86/include/asm/atomic64_64.h 2011-08-23 21:47:55.000000000 -0400
6036 @@ -18,7 +18,19 @@
6037 */
6038 static inline long atomic64_read(const atomic64_t *v)
6039 {
6040 - return (*(volatile long *)&(v)->counter);
6041 + return (*(volatile const long *)&(v)->counter);
6042 +}
6043 +
6044 +/**
6045 + * atomic64_read_unchecked - read atomic64 variable
6046 + * @v: pointer of type atomic64_unchecked_t
6047 + *
6048 + * Atomically reads the value of @v.
6049 + * Doesn't imply a read memory barrier.
6050 + */
6051 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6052 +{
6053 + return (*(volatile const long *)&(v)->counter);
6054 }
6055
6056 /**
6057 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6058 }
6059
6060 /**
6061 + * atomic64_set_unchecked - set atomic64 variable
6062 + * @v: pointer to type atomic64_unchecked_t
6063 + * @i: required value
6064 + *
6065 + * Atomically sets the value of @v to @i.
6066 + */
6067 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6068 +{
6069 + v->counter = i;
6070 +}
6071 +
6072 +/**
6073 * atomic64_add - add integer to atomic64 variable
6074 * @i: integer value to add
6075 * @v: pointer to type atomic64_t
6076 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6077 */
6078 static inline void atomic64_add(long i, atomic64_t *v)
6079 {
6080 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
6081 +
6082 +#ifdef CONFIG_PAX_REFCOUNT
6083 + "jno 0f\n"
6084 + LOCK_PREFIX "subq %1,%0\n"
6085 + "int $4\n0:\n"
6086 + _ASM_EXTABLE(0b, 0b)
6087 +#endif
6088 +
6089 + : "=m" (v->counter)
6090 + : "er" (i), "m" (v->counter));
6091 +}
6092 +
6093 +/**
6094 + * atomic64_add_unchecked - add integer to atomic64 variable
6095 + * @i: integer value to add
6096 + * @v: pointer to type atomic64_unchecked_t
6097 + *
6098 + * Atomically adds @i to @v.
6099 + */
6100 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6101 +{
6102 asm volatile(LOCK_PREFIX "addq %1,%0"
6103 : "=m" (v->counter)
6104 : "er" (i), "m" (v->counter));
6105 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
6106 */
6107 static inline void atomic64_sub(long i, atomic64_t *v)
6108 {
6109 - asm volatile(LOCK_PREFIX "subq %1,%0"
6110 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
6111 +
6112 +#ifdef CONFIG_PAX_REFCOUNT
6113 + "jno 0f\n"
6114 + LOCK_PREFIX "addq %1,%0\n"
6115 + "int $4\n0:\n"
6116 + _ASM_EXTABLE(0b, 0b)
6117 +#endif
6118 +
6119 + : "=m" (v->counter)
6120 + : "er" (i), "m" (v->counter));
6121 +}
6122 +
6123 +/**
6124 + * atomic64_sub_unchecked - subtract the atomic64 variable
6125 + * @i: integer value to subtract
6126 + * @v: pointer to type atomic64_unchecked_t
6127 + *
6128 + * Atomically subtracts @i from @v.
6129 + */
6130 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6131 +{
6132 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
6133 : "=m" (v->counter)
6134 : "er" (i), "m" (v->counter));
6135 }
6136 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
6137 {
6138 unsigned char c;
6139
6140 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6141 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
6142 +
6143 +#ifdef CONFIG_PAX_REFCOUNT
6144 + "jno 0f\n"
6145 + LOCK_PREFIX "addq %2,%0\n"
6146 + "int $4\n0:\n"
6147 + _ASM_EXTABLE(0b, 0b)
6148 +#endif
6149 +
6150 + "sete %1\n"
6151 : "=m" (v->counter), "=qm" (c)
6152 : "er" (i), "m" (v->counter) : "memory");
6153 return c;
6154 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
6155 */
6156 static inline void atomic64_inc(atomic64_t *v)
6157 {
6158 + asm volatile(LOCK_PREFIX "incq %0\n"
6159 +
6160 +#ifdef CONFIG_PAX_REFCOUNT
6161 + "jno 0f\n"
6162 + LOCK_PREFIX "decq %0\n"
6163 + "int $4\n0:\n"
6164 + _ASM_EXTABLE(0b, 0b)
6165 +#endif
6166 +
6167 + : "=m" (v->counter)
6168 + : "m" (v->counter));
6169 +}
6170 +
6171 +/**
6172 + * atomic64_inc_unchecked - increment atomic64 variable
6173 + * @v: pointer to type atomic64_unchecked_t
6174 + *
6175 + * Atomically increments @v by 1.
6176 + */
6177 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6178 +{
6179 asm volatile(LOCK_PREFIX "incq %0"
6180 : "=m" (v->counter)
6181 : "m" (v->counter));
6182 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
6183 */
6184 static inline void atomic64_dec(atomic64_t *v)
6185 {
6186 - asm volatile(LOCK_PREFIX "decq %0"
6187 + asm volatile(LOCK_PREFIX "decq %0\n"
6188 +
6189 +#ifdef CONFIG_PAX_REFCOUNT
6190 + "jno 0f\n"
6191 + LOCK_PREFIX "incq %0\n"
6192 + "int $4\n0:\n"
6193 + _ASM_EXTABLE(0b, 0b)
6194 +#endif
6195 +
6196 + : "=m" (v->counter)
6197 + : "m" (v->counter));
6198 +}
6199 +
6200 +/**
6201 + * atomic64_dec_unchecked - decrement atomic64 variable
6202 + * @v: pointer to type atomic64_t
6203 + *
6204 + * Atomically decrements @v by 1.
6205 + */
6206 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6207 +{
6208 + asm volatile(LOCK_PREFIX "decq %0\n"
6209 : "=m" (v->counter)
6210 : "m" (v->counter));
6211 }
6212 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
6213 {
6214 unsigned char c;
6215
6216 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
6217 + asm volatile(LOCK_PREFIX "decq %0\n"
6218 +
6219 +#ifdef CONFIG_PAX_REFCOUNT
6220 + "jno 0f\n"
6221 + LOCK_PREFIX "incq %0\n"
6222 + "int $4\n0:\n"
6223 + _ASM_EXTABLE(0b, 0b)
6224 +#endif
6225 +
6226 + "sete %1\n"
6227 : "=m" (v->counter), "=qm" (c)
6228 : "m" (v->counter) : "memory");
6229 return c != 0;
6230 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
6231 {
6232 unsigned char c;
6233
6234 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
6235 + asm volatile(LOCK_PREFIX "incq %0\n"
6236 +
6237 +#ifdef CONFIG_PAX_REFCOUNT
6238 + "jno 0f\n"
6239 + LOCK_PREFIX "decq %0\n"
6240 + "int $4\n0:\n"
6241 + _ASM_EXTABLE(0b, 0b)
6242 +#endif
6243 +
6244 + "sete %1\n"
6245 : "=m" (v->counter), "=qm" (c)
6246 : "m" (v->counter) : "memory");
6247 return c != 0;
6248 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
6249 {
6250 unsigned char c;
6251
6252 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6253 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
6254 +
6255 +#ifdef CONFIG_PAX_REFCOUNT
6256 + "jno 0f\n"
6257 + LOCK_PREFIX "subq %2,%0\n"
6258 + "int $4\n0:\n"
6259 + _ASM_EXTABLE(0b, 0b)
6260 +#endif
6261 +
6262 + "sets %1\n"
6263 : "=m" (v->counter), "=qm" (c)
6264 : "er" (i), "m" (v->counter) : "memory");
6265 return c;
6266 @@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
6267 static inline long atomic64_add_return(long i, atomic64_t *v)
6268 {
6269 long __i = i;
6270 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6271 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6272 +
6273 +#ifdef CONFIG_PAX_REFCOUNT
6274 + "jno 0f\n"
6275 + "movq %0, %1\n"
6276 + "int $4\n0:\n"
6277 + _ASM_EXTABLE(0b, 0b)
6278 +#endif
6279 +
6280 + : "+r" (i), "+m" (v->counter)
6281 + : : "memory");
6282 + return i + __i;
6283 +}
6284 +
6285 +/**
6286 + * atomic64_add_return_unchecked - add and return
6287 + * @i: integer value to add
6288 + * @v: pointer to type atomic64_unchecked_t
6289 + *
6290 + * Atomically adds @i to @v and returns @i + @v
6291 + */
6292 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6293 +{
6294 + long __i = i;
6295 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
6296 : "+r" (i), "+m" (v->counter)
6297 : : "memory");
6298 return i + __i;
6299 @@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
6300 }
6301
6302 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6303 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6304 +{
6305 + return atomic64_add_return_unchecked(1, v);
6306 +}
6307 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6308
6309 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6310 @@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
6311 return cmpxchg(&v->counter, old, new);
6312 }
6313
6314 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
6315 +{
6316 + return cmpxchg(&v->counter, old, new);
6317 +}
6318 +
6319 static inline long atomic64_xchg(atomic64_t *v, long new)
6320 {
6321 return xchg(&v->counter, new);
6322 @@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
6323 */
6324 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6325 {
6326 - long c, old;
6327 + long c, old, new;
6328 c = atomic64_read(v);
6329 for (;;) {
6330 - if (unlikely(c == (u)))
6331 + if (unlikely(c == u))
6332 break;
6333 - old = atomic64_cmpxchg((v), c, c + (a));
6334 +
6335 + asm volatile("add %2,%0\n"
6336 +
6337 +#ifdef CONFIG_PAX_REFCOUNT
6338 + "jno 0f\n"
6339 + "sub %2,%0\n"
6340 + "int $4\n0:\n"
6341 + _ASM_EXTABLE(0b, 0b)
6342 +#endif
6343 +
6344 + : "=r" (new)
6345 + : "0" (c), "ir" (a));
6346 +
6347 + old = atomic64_cmpxchg(v, c, new);
6348 if (likely(old == c))
6349 break;
6350 c = old;
6351 }
6352 - return c != (u);
6353 + return c != u;
6354 }
6355
6356 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6357 diff -urNp linux-3.0.3/arch/x86/include/asm/atomic.h linux-3.0.3/arch/x86/include/asm/atomic.h
6358 --- linux-3.0.3/arch/x86/include/asm/atomic.h 2011-07-21 22:17:23.000000000 -0400
6359 +++ linux-3.0.3/arch/x86/include/asm/atomic.h 2011-08-23 21:47:55.000000000 -0400
6360 @@ -22,7 +22,18 @@
6361 */
6362 static inline int atomic_read(const atomic_t *v)
6363 {
6364 - return (*(volatile int *)&(v)->counter);
6365 + return (*(volatile const int *)&(v)->counter);
6366 +}
6367 +
6368 +/**
6369 + * atomic_read_unchecked - read atomic variable
6370 + * @v: pointer of type atomic_unchecked_t
6371 + *
6372 + * Atomically reads the value of @v.
6373 + */
6374 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6375 +{
6376 + return (*(volatile const int *)&(v)->counter);
6377 }
6378
6379 /**
6380 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6381 }
6382
6383 /**
6384 + * atomic_set_unchecked - set atomic variable
6385 + * @v: pointer of type atomic_unchecked_t
6386 + * @i: required value
6387 + *
6388 + * Atomically sets the value of @v to @i.
6389 + */
6390 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6391 +{
6392 + v->counter = i;
6393 +}
6394 +
6395 +/**
6396 * atomic_add - add integer to atomic variable
6397 * @i: integer value to add
6398 * @v: pointer of type atomic_t
6399 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6400 */
6401 static inline void atomic_add(int i, atomic_t *v)
6402 {
6403 - asm volatile(LOCK_PREFIX "addl %1,%0"
6404 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6405 +
6406 +#ifdef CONFIG_PAX_REFCOUNT
6407 + "jno 0f\n"
6408 + LOCK_PREFIX "subl %1,%0\n"
6409 + "int $4\n0:\n"
6410 + _ASM_EXTABLE(0b, 0b)
6411 +#endif
6412 +
6413 + : "+m" (v->counter)
6414 + : "ir" (i));
6415 +}
6416 +
6417 +/**
6418 + * atomic_add_unchecked - add integer to atomic variable
6419 + * @i: integer value to add
6420 + * @v: pointer of type atomic_unchecked_t
6421 + *
6422 + * Atomically adds @i to @v.
6423 + */
6424 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6425 +{
6426 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6427 : "+m" (v->counter)
6428 : "ir" (i));
6429 }
6430 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6431 */
6432 static inline void atomic_sub(int i, atomic_t *v)
6433 {
6434 - asm volatile(LOCK_PREFIX "subl %1,%0"
6435 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6436 +
6437 +#ifdef CONFIG_PAX_REFCOUNT
6438 + "jno 0f\n"
6439 + LOCK_PREFIX "addl %1,%0\n"
6440 + "int $4\n0:\n"
6441 + _ASM_EXTABLE(0b, 0b)
6442 +#endif
6443 +
6444 + : "+m" (v->counter)
6445 + : "ir" (i));
6446 +}
6447 +
6448 +/**
6449 + * atomic_sub_unchecked - subtract integer from atomic variable
6450 + * @i: integer value to subtract
6451 + * @v: pointer of type atomic_unchecked_t
6452 + *
6453 + * Atomically subtracts @i from @v.
6454 + */
6455 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6456 +{
6457 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6458 : "+m" (v->counter)
6459 : "ir" (i));
6460 }
6461 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6462 {
6463 unsigned char c;
6464
6465 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6466 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
6467 +
6468 +#ifdef CONFIG_PAX_REFCOUNT
6469 + "jno 0f\n"
6470 + LOCK_PREFIX "addl %2,%0\n"
6471 + "int $4\n0:\n"
6472 + _ASM_EXTABLE(0b, 0b)
6473 +#endif
6474 +
6475 + "sete %1\n"
6476 : "+m" (v->counter), "=qm" (c)
6477 : "ir" (i) : "memory");
6478 return c;
6479 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6480 */
6481 static inline void atomic_inc(atomic_t *v)
6482 {
6483 - asm volatile(LOCK_PREFIX "incl %0"
6484 + asm volatile(LOCK_PREFIX "incl %0\n"
6485 +
6486 +#ifdef CONFIG_PAX_REFCOUNT
6487 + "jno 0f\n"
6488 + LOCK_PREFIX "decl %0\n"
6489 + "int $4\n0:\n"
6490 + _ASM_EXTABLE(0b, 0b)
6491 +#endif
6492 +
6493 + : "+m" (v->counter));
6494 +}
6495 +
6496 +/**
6497 + * atomic_inc_unchecked - increment atomic variable
6498 + * @v: pointer of type atomic_unchecked_t
6499 + *
6500 + * Atomically increments @v by 1.
6501 + */
6502 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6503 +{
6504 + asm volatile(LOCK_PREFIX "incl %0\n"
6505 : "+m" (v->counter));
6506 }
6507
6508 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6509 */
6510 static inline void atomic_dec(atomic_t *v)
6511 {
6512 - asm volatile(LOCK_PREFIX "decl %0"
6513 + asm volatile(LOCK_PREFIX "decl %0\n"
6514 +
6515 +#ifdef CONFIG_PAX_REFCOUNT
6516 + "jno 0f\n"
6517 + LOCK_PREFIX "incl %0\n"
6518 + "int $4\n0:\n"
6519 + _ASM_EXTABLE(0b, 0b)
6520 +#endif
6521 +
6522 + : "+m" (v->counter));
6523 +}
6524 +
6525 +/**
6526 + * atomic_dec_unchecked - decrement atomic variable
6527 + * @v: pointer of type atomic_unchecked_t
6528 + *
6529 + * Atomically decrements @v by 1.
6530 + */
6531 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6532 +{
6533 + asm volatile(LOCK_PREFIX "decl %0\n"
6534 : "+m" (v->counter));
6535 }
6536
6537 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6538 {
6539 unsigned char c;
6540
6541 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
6542 + asm volatile(LOCK_PREFIX "decl %0\n"
6543 +
6544 +#ifdef CONFIG_PAX_REFCOUNT
6545 + "jno 0f\n"
6546 + LOCK_PREFIX "incl %0\n"
6547 + "int $4\n0:\n"
6548 + _ASM_EXTABLE(0b, 0b)
6549 +#endif
6550 +
6551 + "sete %1\n"
6552 : "+m" (v->counter), "=qm" (c)
6553 : : "memory");
6554 return c != 0;
6555 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
6556 {
6557 unsigned char c;
6558
6559 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
6560 + asm volatile(LOCK_PREFIX "incl %0\n"
6561 +
6562 +#ifdef CONFIG_PAX_REFCOUNT
6563 + "jno 0f\n"
6564 + LOCK_PREFIX "decl %0\n"
6565 + "int $4\n0:\n"
6566 + _ASM_EXTABLE(0b, 0b)
6567 +#endif
6568 +
6569 + "sete %1\n"
6570 + : "+m" (v->counter), "=qm" (c)
6571 + : : "memory");
6572 + return c != 0;
6573 +}
6574 +
6575 +/**
6576 + * atomic_inc_and_test_unchecked - increment and test
6577 + * @v: pointer of type atomic_unchecked_t
6578 + *
6579 + * Atomically increments @v by 1
6580 + * and returns true if the result is zero, or false for all
6581 + * other cases.
6582 + */
6583 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6584 +{
6585 + unsigned char c;
6586 +
6587 + asm volatile(LOCK_PREFIX "incl %0\n"
6588 + "sete %1\n"
6589 : "+m" (v->counter), "=qm" (c)
6590 : : "memory");
6591 return c != 0;
6592 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
6593 {
6594 unsigned char c;
6595
6596 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6597 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
6598 +
6599 +#ifdef CONFIG_PAX_REFCOUNT
6600 + "jno 0f\n"
6601 + LOCK_PREFIX "subl %2,%0\n"
6602 + "int $4\n0:\n"
6603 + _ASM_EXTABLE(0b, 0b)
6604 +#endif
6605 +
6606 + "sets %1\n"
6607 : "+m" (v->counter), "=qm" (c)
6608 : "ir" (i) : "memory");
6609 return c;
6610 @@ -180,6 +342,46 @@ static inline int atomic_add_return(int
6611 #endif
6612 /* Modern 486+ processor */
6613 __i = i;
6614 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6615 +
6616 +#ifdef CONFIG_PAX_REFCOUNT
6617 + "jno 0f\n"
6618 + "movl %0, %1\n"
6619 + "int $4\n0:\n"
6620 + _ASM_EXTABLE(0b, 0b)
6621 +#endif
6622 +
6623 + : "+r" (i), "+m" (v->counter)
6624 + : : "memory");
6625 + return i + __i;
6626 +
6627 +#ifdef CONFIG_M386
6628 +no_xadd: /* Legacy 386 processor */
6629 + local_irq_save(flags);
6630 + __i = atomic_read(v);
6631 + atomic_set(v, i + __i);
6632 + local_irq_restore(flags);
6633 + return i + __i;
6634 +#endif
6635 +}
6636 +
6637 +/**
6638 + * atomic_add_return_unchecked - add integer and return
6639 + * @v: pointer of type atomic_unchecked_t
6640 + * @i: integer value to add
6641 + *
6642 + * Atomically adds @i to @v and returns @i + @v
6643 + */
6644 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6645 +{
6646 + int __i;
6647 +#ifdef CONFIG_M386
6648 + unsigned long flags;
6649 + if (unlikely(boot_cpu_data.x86 <= 3))
6650 + goto no_xadd;
6651 +#endif
6652 + /* Modern 486+ processor */
6653 + __i = i;
6654 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6655 : "+r" (i), "+m" (v->counter)
6656 : : "memory");
6657 @@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
6658 }
6659
6660 #define atomic_inc_return(v) (atomic_add_return(1, v))
6661 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6662 +{
6663 + return atomic_add_return_unchecked(1, v);
6664 +}
6665 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6666
6667 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6668 @@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
6669 return cmpxchg(&v->counter, old, new);
6670 }
6671
6672 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6673 +{
6674 + return cmpxchg(&v->counter, old, new);
6675 +}
6676 +
6677 static inline int atomic_xchg(atomic_t *v, int new)
6678 {
6679 return xchg(&v->counter, new);
6680 }
6681
6682 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6683 +{
6684 + return xchg(&v->counter, new);
6685 +}
6686 +
6687 /**
6688 * atomic_add_unless - add unless the number is already a given value
6689 * @v: pointer of type atomic_t
6690 @@ -231,21 +447,77 @@ static inline int atomic_xchg(atomic_t *
6691 */
6692 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6693 {
6694 - int c, old;
6695 + int c, old, new;
6696 c = atomic_read(v);
6697 for (;;) {
6698 - if (unlikely(c == (u)))
6699 + if (unlikely(c == u))
6700 break;
6701 - old = atomic_cmpxchg((v), c, c + (a));
6702 +
6703 + asm volatile("addl %2,%0\n"
6704 +
6705 +#ifdef CONFIG_PAX_REFCOUNT
6706 + "jno 0f\n"
6707 + "subl %2,%0\n"
6708 + "int $4\n0:\n"
6709 + _ASM_EXTABLE(0b, 0b)
6710 +#endif
6711 +
6712 + : "=r" (new)
6713 + : "0" (c), "ir" (a));
6714 +
6715 + old = atomic_cmpxchg(v, c, new);
6716 if (likely(old == c))
6717 break;
6718 c = old;
6719 }
6720 - return c != (u);
6721 + return c != u;
6722 }
6723
6724 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6725
6726 +/**
6727 + * atomic_inc_not_zero_hint - increment if not null
6728 + * @v: pointer of type atomic_t
6729 + * @hint: probable value of the atomic before the increment
6730 + *
6731 + * This version of atomic_inc_not_zero() gives a hint of probable
6732 + * value of the atomic. This helps processor to not read the memory
6733 + * before doing the atomic read/modify/write cycle, lowering
6734 + * number of bus transactions on some arches.
6735 + *
6736 + * Returns: 0 if increment was not done, 1 otherwise.
6737 + */
6738 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
6739 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
6740 +{
6741 + int val, c = hint, new;
6742 +
6743 + /* sanity test, should be removed by compiler if hint is a constant */
6744 + if (!hint)
6745 + return atomic_inc_not_zero(v);
6746 +
6747 + do {
6748 + asm volatile("incl %0\n"
6749 +
6750 +#ifdef CONFIG_PAX_REFCOUNT
6751 + "jno 0f\n"
6752 + "decl %0\n"
6753 + "int $4\n0:\n"
6754 + _ASM_EXTABLE(0b, 0b)
6755 +#endif
6756 +
6757 + : "=r" (new)
6758 + : "0" (c));
6759 +
6760 + val = atomic_cmpxchg(v, c, new);
6761 + if (val == c)
6762 + return 1;
6763 + c = val;
6764 + } while (c);
6765 +
6766 + return 0;
6767 +}
6768 +
6769 /*
6770 * atomic_dec_if_positive - decrement by 1 if old value positive
6771 * @v: pointer of type atomic_t
6772 diff -urNp linux-3.0.3/arch/x86/include/asm/bitops.h linux-3.0.3/arch/x86/include/asm/bitops.h
6773 --- linux-3.0.3/arch/x86/include/asm/bitops.h 2011-07-21 22:17:23.000000000 -0400
6774 +++ linux-3.0.3/arch/x86/include/asm/bitops.h 2011-08-23 21:47:55.000000000 -0400
6775 @@ -38,7 +38,7 @@
6776 * a mask operation on a byte.
6777 */
6778 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
6779 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
6780 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
6781 #define CONST_MASK(nr) (1 << ((nr) & 7))
6782
6783 /**
6784 diff -urNp linux-3.0.3/arch/x86/include/asm/boot.h linux-3.0.3/arch/x86/include/asm/boot.h
6785 --- linux-3.0.3/arch/x86/include/asm/boot.h 2011-07-21 22:17:23.000000000 -0400
6786 +++ linux-3.0.3/arch/x86/include/asm/boot.h 2011-08-23 21:47:55.000000000 -0400
6787 @@ -11,10 +11,15 @@
6788 #include <asm/pgtable_types.h>
6789
6790 /* Physical address where kernel should be loaded. */
6791 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6792 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6793 + (CONFIG_PHYSICAL_ALIGN - 1)) \
6794 & ~(CONFIG_PHYSICAL_ALIGN - 1))
6795
6796 +#ifndef __ASSEMBLY__
6797 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
6798 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
6799 +#endif
6800 +
6801 /* Minimum kernel alignment, as a power of two */
6802 #ifdef CONFIG_X86_64
6803 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
6804 diff -urNp linux-3.0.3/arch/x86/include/asm/cacheflush.h linux-3.0.3/arch/x86/include/asm/cacheflush.h
6805 --- linux-3.0.3/arch/x86/include/asm/cacheflush.h 2011-07-21 22:17:23.000000000 -0400
6806 +++ linux-3.0.3/arch/x86/include/asm/cacheflush.h 2011-08-23 21:47:55.000000000 -0400
6807 @@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
6808 unsigned long pg_flags = pg->flags & _PGMT_MASK;
6809
6810 if (pg_flags == _PGMT_DEFAULT)
6811 - return -1;
6812 + return ~0UL;
6813 else if (pg_flags == _PGMT_WC)
6814 return _PAGE_CACHE_WC;
6815 else if (pg_flags == _PGMT_UC_MINUS)
6816 diff -urNp linux-3.0.3/arch/x86/include/asm/cache.h linux-3.0.3/arch/x86/include/asm/cache.h
6817 --- linux-3.0.3/arch/x86/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
6818 +++ linux-3.0.3/arch/x86/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
6819 @@ -5,12 +5,13 @@
6820
6821 /* L1 cache line size */
6822 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
6823 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6824 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6825
6826 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
6827 +#define __read_only __attribute__((__section__(".data..read_only")))
6828
6829 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
6830 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
6831 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
6832
6833 #ifdef CONFIG_X86_VSMP
6834 #ifdef CONFIG_SMP
6835 diff -urNp linux-3.0.3/arch/x86/include/asm/checksum_32.h linux-3.0.3/arch/x86/include/asm/checksum_32.h
6836 --- linux-3.0.3/arch/x86/include/asm/checksum_32.h 2011-07-21 22:17:23.000000000 -0400
6837 +++ linux-3.0.3/arch/x86/include/asm/checksum_32.h 2011-08-23 21:47:55.000000000 -0400
6838 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
6839 int len, __wsum sum,
6840 int *src_err_ptr, int *dst_err_ptr);
6841
6842 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
6843 + int len, __wsum sum,
6844 + int *src_err_ptr, int *dst_err_ptr);
6845 +
6846 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
6847 + int len, __wsum sum,
6848 + int *src_err_ptr, int *dst_err_ptr);
6849 +
6850 /*
6851 * Note: when you get a NULL pointer exception here this means someone
6852 * passed in an incorrect kernel address to one of these functions.
6853 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
6854 int *err_ptr)
6855 {
6856 might_sleep();
6857 - return csum_partial_copy_generic((__force void *)src, dst,
6858 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
6859 len, sum, err_ptr, NULL);
6860 }
6861
6862 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
6863 {
6864 might_sleep();
6865 if (access_ok(VERIFY_WRITE, dst, len))
6866 - return csum_partial_copy_generic(src, (__force void *)dst,
6867 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
6868 len, sum, NULL, err_ptr);
6869
6870 if (len)
6871 diff -urNp linux-3.0.3/arch/x86/include/asm/cpufeature.h linux-3.0.3/arch/x86/include/asm/cpufeature.h
6872 --- linux-3.0.3/arch/x86/include/asm/cpufeature.h 2011-07-21 22:17:23.000000000 -0400
6873 +++ linux-3.0.3/arch/x86/include/asm/cpufeature.h 2011-08-23 21:47:55.000000000 -0400
6874 @@ -358,7 +358,7 @@ static __always_inline __pure bool __sta
6875 ".section .discard,\"aw\",@progbits\n"
6876 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
6877 ".previous\n"
6878 - ".section .altinstr_replacement,\"ax\"\n"
6879 + ".section .altinstr_replacement,\"a\"\n"
6880 "3: movb $1,%0\n"
6881 "4:\n"
6882 ".previous\n"
6883 diff -urNp linux-3.0.3/arch/x86/include/asm/desc_defs.h linux-3.0.3/arch/x86/include/asm/desc_defs.h
6884 --- linux-3.0.3/arch/x86/include/asm/desc_defs.h 2011-07-21 22:17:23.000000000 -0400
6885 +++ linux-3.0.3/arch/x86/include/asm/desc_defs.h 2011-08-23 21:47:55.000000000 -0400
6886 @@ -31,6 +31,12 @@ struct desc_struct {
6887 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
6888 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
6889 };
6890 + struct {
6891 + u16 offset_low;
6892 + u16 seg;
6893 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
6894 + unsigned offset_high: 16;
6895 + } gate;
6896 };
6897 } __attribute__((packed));
6898
6899 diff -urNp linux-3.0.3/arch/x86/include/asm/desc.h linux-3.0.3/arch/x86/include/asm/desc.h
6900 --- linux-3.0.3/arch/x86/include/asm/desc.h 2011-07-21 22:17:23.000000000 -0400
6901 +++ linux-3.0.3/arch/x86/include/asm/desc.h 2011-08-23 21:47:55.000000000 -0400
6902 @@ -4,6 +4,7 @@
6903 #include <asm/desc_defs.h>
6904 #include <asm/ldt.h>
6905 #include <asm/mmu.h>
6906 +#include <asm/pgtable.h>
6907
6908 #include <linux/smp.h>
6909
6910 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_
6911
6912 desc->type = (info->read_exec_only ^ 1) << 1;
6913 desc->type |= info->contents << 2;
6914 + desc->type |= info->seg_not_present ^ 1;
6915
6916 desc->s = 1;
6917 desc->dpl = 0x3;
6918 @@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_
6919 }
6920
6921 extern struct desc_ptr idt_descr;
6922 -extern gate_desc idt_table[];
6923 -
6924 -struct gdt_page {
6925 - struct desc_struct gdt[GDT_ENTRIES];
6926 -} __attribute__((aligned(PAGE_SIZE)));
6927 -
6928 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
6929 +extern gate_desc idt_table[256];
6930
6931 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
6932 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
6933 {
6934 - return per_cpu(gdt_page, cpu).gdt;
6935 + return cpu_gdt_table[cpu];
6936 }
6937
6938 #ifdef CONFIG_X86_64
6939 @@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *
6940 unsigned long base, unsigned dpl, unsigned flags,
6941 unsigned short seg)
6942 {
6943 - gate->a = (seg << 16) | (base & 0xffff);
6944 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
6945 + gate->gate.offset_low = base;
6946 + gate->gate.seg = seg;
6947 + gate->gate.reserved = 0;
6948 + gate->gate.type = type;
6949 + gate->gate.s = 0;
6950 + gate->gate.dpl = dpl;
6951 + gate->gate.p = 1;
6952 + gate->gate.offset_high = base >> 16;
6953 }
6954
6955 #endif
6956 @@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(str
6957
6958 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
6959 {
6960 + pax_open_kernel();
6961 memcpy(&idt[entry], gate, sizeof(*gate));
6962 + pax_close_kernel();
6963 }
6964
6965 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
6966 {
6967 + pax_open_kernel();
6968 memcpy(&ldt[entry], desc, 8);
6969 + pax_close_kernel();
6970 }
6971
6972 static inline void
6973 @@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struc
6974 default: size = sizeof(*gdt); break;
6975 }
6976
6977 + pax_open_kernel();
6978 memcpy(&gdt[entry], desc, size);
6979 + pax_close_kernel();
6980 }
6981
6982 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
6983 @@ -207,7 +216,9 @@ static inline void native_set_ldt(const
6984
6985 static inline void native_load_tr_desc(void)
6986 {
6987 + pax_open_kernel();
6988 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
6989 + pax_close_kernel();
6990 }
6991
6992 static inline void native_load_gdt(const struct desc_ptr *dtr)
6993 @@ -244,8 +255,10 @@ static inline void native_load_tls(struc
6994 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
6995 unsigned int i;
6996
6997 + pax_open_kernel();
6998 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
6999 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
7000 + pax_close_kernel();
7001 }
7002
7003 #define _LDT_empty(info) \
7004 @@ -307,7 +320,7 @@ static inline void set_desc_limit(struct
7005 desc->limit = (limit >> 16) & 0xf;
7006 }
7007
7008 -static inline void _set_gate(int gate, unsigned type, void *addr,
7009 +static inline void _set_gate(int gate, unsigned type, const void *addr,
7010 unsigned dpl, unsigned ist, unsigned seg)
7011 {
7012 gate_desc s;
7013 @@ -326,7 +339,7 @@ static inline void _set_gate(int gate, u
7014 * Pentium F0 0F bugfix can have resulted in the mapped
7015 * IDT being write-protected.
7016 */
7017 -static inline void set_intr_gate(unsigned int n, void *addr)
7018 +static inline void set_intr_gate(unsigned int n, const void *addr)
7019 {
7020 BUG_ON((unsigned)n > 0xFF);
7021 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7022 @@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsig
7023 /*
7024 * This routine sets up an interrupt gate at directory privilege level 3.
7025 */
7026 -static inline void set_system_intr_gate(unsigned int n, void *addr)
7027 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
7028 {
7029 BUG_ON((unsigned)n > 0xFF);
7030 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7031 }
7032
7033 -static inline void set_system_trap_gate(unsigned int n, void *addr)
7034 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
7035 {
7036 BUG_ON((unsigned)n > 0xFF);
7037 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7038 }
7039
7040 -static inline void set_trap_gate(unsigned int n, void *addr)
7041 +static inline void set_trap_gate(unsigned int n, const void *addr)
7042 {
7043 BUG_ON((unsigned)n > 0xFF);
7044 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7045 @@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigne
7046 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7047 {
7048 BUG_ON((unsigned)n > 0xFF);
7049 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7050 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7051 }
7052
7053 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7054 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7055 {
7056 BUG_ON((unsigned)n > 0xFF);
7057 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7058 }
7059
7060 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7061 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7062 {
7063 BUG_ON((unsigned)n > 0xFF);
7064 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7065 }
7066
7067 +#ifdef CONFIG_X86_32
7068 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7069 +{
7070 + struct desc_struct d;
7071 +
7072 + if (likely(limit))
7073 + limit = (limit - 1UL) >> PAGE_SHIFT;
7074 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
7075 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7076 +}
7077 +#endif
7078 +
7079 #endif /* _ASM_X86_DESC_H */
7080 diff -urNp linux-3.0.3/arch/x86/include/asm/e820.h linux-3.0.3/arch/x86/include/asm/e820.h
7081 --- linux-3.0.3/arch/x86/include/asm/e820.h 2011-07-21 22:17:23.000000000 -0400
7082 +++ linux-3.0.3/arch/x86/include/asm/e820.h 2011-08-23 21:47:55.000000000 -0400
7083 @@ -69,7 +69,7 @@ struct e820map {
7084 #define ISA_START_ADDRESS 0xa0000
7085 #define ISA_END_ADDRESS 0x100000
7086
7087 -#define BIOS_BEGIN 0x000a0000
7088 +#define BIOS_BEGIN 0x000c0000
7089 #define BIOS_END 0x00100000
7090
7091 #define BIOS_ROM_BASE 0xffe00000
7092 diff -urNp linux-3.0.3/arch/x86/include/asm/elf.h linux-3.0.3/arch/x86/include/asm/elf.h
7093 --- linux-3.0.3/arch/x86/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
7094 +++ linux-3.0.3/arch/x86/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
7095 @@ -237,7 +237,25 @@ extern int force_personality32;
7096 the loader. We need to make sure that it is out of the way of the program
7097 that it will "exec", and that there is sufficient room for the brk. */
7098
7099 +#ifdef CONFIG_PAX_SEGMEXEC
7100 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7101 +#else
7102 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7103 +#endif
7104 +
7105 +#ifdef CONFIG_PAX_ASLR
7106 +#ifdef CONFIG_X86_32
7107 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7108 +
7109 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7110 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7111 +#else
7112 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
7113 +
7114 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7115 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7116 +#endif
7117 +#endif
7118
7119 /* This yields a mask that user programs can use to figure out what
7120 instruction set this CPU supports. This could be done in user space,
7121 @@ -290,9 +308,7 @@ do { \
7122
7123 #define ARCH_DLINFO \
7124 do { \
7125 - if (vdso_enabled) \
7126 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7127 - (unsigned long)current->mm->context.vdso); \
7128 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
7129 } while (0)
7130
7131 #define AT_SYSINFO 32
7132 @@ -303,7 +319,7 @@ do { \
7133
7134 #endif /* !CONFIG_X86_32 */
7135
7136 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7137 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7138
7139 #define VDSO_ENTRY \
7140 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7141 @@ -317,7 +333,4 @@ extern int arch_setup_additional_pages(s
7142 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7143 #define compat_arch_setup_additional_pages syscall32_setup_pages
7144
7145 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7146 -#define arch_randomize_brk arch_randomize_brk
7147 -
7148 #endif /* _ASM_X86_ELF_H */
7149 diff -urNp linux-3.0.3/arch/x86/include/asm/emergency-restart.h linux-3.0.3/arch/x86/include/asm/emergency-restart.h
7150 --- linux-3.0.3/arch/x86/include/asm/emergency-restart.h 2011-07-21 22:17:23.000000000 -0400
7151 +++ linux-3.0.3/arch/x86/include/asm/emergency-restart.h 2011-08-23 21:47:55.000000000 -0400
7152 @@ -15,6 +15,6 @@ enum reboot_type {
7153
7154 extern enum reboot_type reboot_type;
7155
7156 -extern void machine_emergency_restart(void);
7157 +extern void machine_emergency_restart(void) __noreturn;
7158
7159 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7160 diff -urNp linux-3.0.3/arch/x86/include/asm/futex.h linux-3.0.3/arch/x86/include/asm/futex.h
7161 --- linux-3.0.3/arch/x86/include/asm/futex.h 2011-07-21 22:17:23.000000000 -0400
7162 +++ linux-3.0.3/arch/x86/include/asm/futex.h 2011-08-23 21:47:55.000000000 -0400
7163 @@ -12,16 +12,18 @@
7164 #include <asm/system.h>
7165
7166 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7167 + typecheck(u32 *, uaddr); \
7168 asm volatile("1:\t" insn "\n" \
7169 "2:\t.section .fixup,\"ax\"\n" \
7170 "3:\tmov\t%3, %1\n" \
7171 "\tjmp\t2b\n" \
7172 "\t.previous\n" \
7173 _ASM_EXTABLE(1b, 3b) \
7174 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7175 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
7176 : "i" (-EFAULT), "0" (oparg), "1" (0))
7177
7178 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7179 + typecheck(u32 *, uaddr); \
7180 asm volatile("1:\tmovl %2, %0\n" \
7181 "\tmovl\t%0, %3\n" \
7182 "\t" insn "\n" \
7183 @@ -34,7 +36,7 @@
7184 _ASM_EXTABLE(1b, 4b) \
7185 _ASM_EXTABLE(2b, 4b) \
7186 : "=&a" (oldval), "=&r" (ret), \
7187 - "+m" (*uaddr), "=&r" (tem) \
7188 + "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
7189 : "r" (oparg), "i" (-EFAULT), "1" (0))
7190
7191 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7192 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7193
7194 switch (op) {
7195 case FUTEX_OP_SET:
7196 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7197 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7198 break;
7199 case FUTEX_OP_ADD:
7200 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7201 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7202 uaddr, oparg);
7203 break;
7204 case FUTEX_OP_OR:
7205 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7206 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7207 return -EFAULT;
7208
7209 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7210 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7211 "2:\t.section .fixup, \"ax\"\n"
7212 "3:\tmov %3, %0\n"
7213 "\tjmp 2b\n"
7214 "\t.previous\n"
7215 _ASM_EXTABLE(1b, 3b)
7216 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7217 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
7218 : "i" (-EFAULT), "r" (newval), "1" (oldval)
7219 : "memory"
7220 );
7221 diff -urNp linux-3.0.3/arch/x86/include/asm/hw_irq.h linux-3.0.3/arch/x86/include/asm/hw_irq.h
7222 --- linux-3.0.3/arch/x86/include/asm/hw_irq.h 2011-07-21 22:17:23.000000000 -0400
7223 +++ linux-3.0.3/arch/x86/include/asm/hw_irq.h 2011-08-23 21:47:55.000000000 -0400
7224 @@ -137,8 +137,8 @@ extern void setup_ioapic_dest(void);
7225 extern void enable_IO_APIC(void);
7226
7227 /* Statistics */
7228 -extern atomic_t irq_err_count;
7229 -extern atomic_t irq_mis_count;
7230 +extern atomic_unchecked_t irq_err_count;
7231 +extern atomic_unchecked_t irq_mis_count;
7232
7233 /* EISA */
7234 extern void eisa_set_level_irq(unsigned int irq);
7235 diff -urNp linux-3.0.3/arch/x86/include/asm/i387.h linux-3.0.3/arch/x86/include/asm/i387.h
7236 --- linux-3.0.3/arch/x86/include/asm/i387.h 2011-07-21 22:17:23.000000000 -0400
7237 +++ linux-3.0.3/arch/x86/include/asm/i387.h 2011-08-23 21:47:55.000000000 -0400
7238 @@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7239 {
7240 int err;
7241
7242 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7243 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7244 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
7245 +#endif
7246 +
7247 /* See comment in fxsave() below. */
7248 #ifdef CONFIG_AS_FXSAVEQ
7249 asm volatile("1: fxrstorq %[fx]\n\t"
7250 @@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7251 {
7252 int err;
7253
7254 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7255 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7256 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7257 +#endif
7258 +
7259 /*
7260 * Clear the bytes not touched by the fxsave and reserved
7261 * for the SW usage.
7262 @@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7263 #endif /* CONFIG_X86_64 */
7264
7265 /* We need a safe address that is cheap to find and that is already
7266 - in L1 during context switch. The best choices are unfortunately
7267 - different for UP and SMP */
7268 -#ifdef CONFIG_SMP
7269 -#define safe_address (__per_cpu_offset[0])
7270 -#else
7271 -#define safe_address (kstat_cpu(0).cpustat.user)
7272 -#endif
7273 + in L1 during context switch. */
7274 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7275
7276 /*
7277 * These must be called with preempt disabled
7278 @@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7279 struct thread_info *me = current_thread_info();
7280 preempt_disable();
7281 if (me->status & TS_USEDFPU)
7282 - __save_init_fpu(me->task);
7283 + __save_init_fpu(current);
7284 else
7285 clts();
7286 }
7287 diff -urNp linux-3.0.3/arch/x86/include/asm/io.h linux-3.0.3/arch/x86/include/asm/io.h
7288 --- linux-3.0.3/arch/x86/include/asm/io.h 2011-07-21 22:17:23.000000000 -0400
7289 +++ linux-3.0.3/arch/x86/include/asm/io.h 2011-08-23 21:47:55.000000000 -0400
7290 @@ -196,6 +196,17 @@ extern void set_iounmap_nonlazy(void);
7291
7292 #include <linux/vmalloc.h>
7293
7294 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7295 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7296 +{
7297 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7298 +}
7299 +
7300 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7301 +{
7302 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7303 +}
7304 +
7305 /*
7306 * Convert a virtual cached pointer to an uncached pointer
7307 */
7308 diff -urNp linux-3.0.3/arch/x86/include/asm/irqflags.h linux-3.0.3/arch/x86/include/asm/irqflags.h
7309 --- linux-3.0.3/arch/x86/include/asm/irqflags.h 2011-07-21 22:17:23.000000000 -0400
7310 +++ linux-3.0.3/arch/x86/include/asm/irqflags.h 2011-08-23 21:47:55.000000000 -0400
7311 @@ -140,6 +140,11 @@ static inline unsigned long arch_local_i
7312 sti; \
7313 sysexit
7314
7315 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
7316 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7317 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
7318 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7319 +
7320 #else
7321 #define INTERRUPT_RETURN iret
7322 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7323 diff -urNp linux-3.0.3/arch/x86/include/asm/kprobes.h linux-3.0.3/arch/x86/include/asm/kprobes.h
7324 --- linux-3.0.3/arch/x86/include/asm/kprobes.h 2011-07-21 22:17:23.000000000 -0400
7325 +++ linux-3.0.3/arch/x86/include/asm/kprobes.h 2011-08-23 21:47:55.000000000 -0400
7326 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
7327 #define RELATIVEJUMP_SIZE 5
7328 #define RELATIVECALL_OPCODE 0xe8
7329 #define RELATIVE_ADDR_SIZE 4
7330 -#define MAX_STACK_SIZE 64
7331 -#define MIN_STACK_SIZE(ADDR) \
7332 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
7333 - THREAD_SIZE - (unsigned long)(ADDR))) \
7334 - ? (MAX_STACK_SIZE) \
7335 - : (((unsigned long)current_thread_info()) + \
7336 - THREAD_SIZE - (unsigned long)(ADDR)))
7337 +#define MAX_STACK_SIZE 64UL
7338 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
7339
7340 #define flush_insn_slot(p) do { } while (0)
7341
7342 diff -urNp linux-3.0.3/arch/x86/include/asm/kvm_host.h linux-3.0.3/arch/x86/include/asm/kvm_host.h
7343 --- linux-3.0.3/arch/x86/include/asm/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
7344 +++ linux-3.0.3/arch/x86/include/asm/kvm_host.h 2011-08-23 21:47:55.000000000 -0400
7345 @@ -441,7 +441,7 @@ struct kvm_arch {
7346 unsigned int n_used_mmu_pages;
7347 unsigned int n_requested_mmu_pages;
7348 unsigned int n_max_mmu_pages;
7349 - atomic_t invlpg_counter;
7350 + atomic_unchecked_t invlpg_counter;
7351 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
7352 /*
7353 * Hash table of struct kvm_mmu_page.
7354 @@ -618,7 +618,7 @@ struct kvm_x86_ops {
7355 struct x86_instruction_info *info,
7356 enum x86_intercept_stage stage);
7357
7358 - const struct trace_print_flags *exit_reasons_str;
7359 + const struct trace_print_flags * const exit_reasons_str;
7360 };
7361
7362 struct kvm_arch_async_pf {
7363 diff -urNp linux-3.0.3/arch/x86/include/asm/local.h linux-3.0.3/arch/x86/include/asm/local.h
7364 --- linux-3.0.3/arch/x86/include/asm/local.h 2011-07-21 22:17:23.000000000 -0400
7365 +++ linux-3.0.3/arch/x86/include/asm/local.h 2011-08-23 21:47:55.000000000 -0400
7366 @@ -18,26 +18,58 @@ typedef struct {
7367
7368 static inline void local_inc(local_t *l)
7369 {
7370 - asm volatile(_ASM_INC "%0"
7371 + asm volatile(_ASM_INC "%0\n"
7372 +
7373 +#ifdef CONFIG_PAX_REFCOUNT
7374 + "jno 0f\n"
7375 + _ASM_DEC "%0\n"
7376 + "int $4\n0:\n"
7377 + _ASM_EXTABLE(0b, 0b)
7378 +#endif
7379 +
7380 : "+m" (l->a.counter));
7381 }
7382
7383 static inline void local_dec(local_t *l)
7384 {
7385 - asm volatile(_ASM_DEC "%0"
7386 + asm volatile(_ASM_DEC "%0\n"
7387 +
7388 +#ifdef CONFIG_PAX_REFCOUNT
7389 + "jno 0f\n"
7390 + _ASM_INC "%0\n"
7391 + "int $4\n0:\n"
7392 + _ASM_EXTABLE(0b, 0b)
7393 +#endif
7394 +
7395 : "+m" (l->a.counter));
7396 }
7397
7398 static inline void local_add(long i, local_t *l)
7399 {
7400 - asm volatile(_ASM_ADD "%1,%0"
7401 + asm volatile(_ASM_ADD "%1,%0\n"
7402 +
7403 +#ifdef CONFIG_PAX_REFCOUNT
7404 + "jno 0f\n"
7405 + _ASM_SUB "%1,%0\n"
7406 + "int $4\n0:\n"
7407 + _ASM_EXTABLE(0b, 0b)
7408 +#endif
7409 +
7410 : "+m" (l->a.counter)
7411 : "ir" (i));
7412 }
7413
7414 static inline void local_sub(long i, local_t *l)
7415 {
7416 - asm volatile(_ASM_SUB "%1,%0"
7417 + asm volatile(_ASM_SUB "%1,%0\n"
7418 +
7419 +#ifdef CONFIG_PAX_REFCOUNT
7420 + "jno 0f\n"
7421 + _ASM_ADD "%1,%0\n"
7422 + "int $4\n0:\n"
7423 + _ASM_EXTABLE(0b, 0b)
7424 +#endif
7425 +
7426 : "+m" (l->a.counter)
7427 : "ir" (i));
7428 }
7429 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
7430 {
7431 unsigned char c;
7432
7433 - asm volatile(_ASM_SUB "%2,%0; sete %1"
7434 + asm volatile(_ASM_SUB "%2,%0\n"
7435 +
7436 +#ifdef CONFIG_PAX_REFCOUNT
7437 + "jno 0f\n"
7438 + _ASM_ADD "%2,%0\n"
7439 + "int $4\n0:\n"
7440 + _ASM_EXTABLE(0b, 0b)
7441 +#endif
7442 +
7443 + "sete %1\n"
7444 : "+m" (l->a.counter), "=qm" (c)
7445 : "ir" (i) : "memory");
7446 return c;
7447 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
7448 {
7449 unsigned char c;
7450
7451 - asm volatile(_ASM_DEC "%0; sete %1"
7452 + asm volatile(_ASM_DEC "%0\n"
7453 +
7454 +#ifdef CONFIG_PAX_REFCOUNT
7455 + "jno 0f\n"
7456 + _ASM_INC "%0\n"
7457 + "int $4\n0:\n"
7458 + _ASM_EXTABLE(0b, 0b)
7459 +#endif
7460 +
7461 + "sete %1\n"
7462 : "+m" (l->a.counter), "=qm" (c)
7463 : : "memory");
7464 return c != 0;
7465 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
7466 {
7467 unsigned char c;
7468
7469 - asm volatile(_ASM_INC "%0; sete %1"
7470 + asm volatile(_ASM_INC "%0\n"
7471 +
7472 +#ifdef CONFIG_PAX_REFCOUNT
7473 + "jno 0f\n"
7474 + _ASM_DEC "%0\n"
7475 + "int $4\n0:\n"
7476 + _ASM_EXTABLE(0b, 0b)
7477 +#endif
7478 +
7479 + "sete %1\n"
7480 : "+m" (l->a.counter), "=qm" (c)
7481 : : "memory");
7482 return c != 0;
7483 @@ -110,7 +169,16 @@ static inline int local_add_negative(lon
7484 {
7485 unsigned char c;
7486
7487 - asm volatile(_ASM_ADD "%2,%0; sets %1"
7488 + asm volatile(_ASM_ADD "%2,%0\n"
7489 +
7490 +#ifdef CONFIG_PAX_REFCOUNT
7491 + "jno 0f\n"
7492 + _ASM_SUB "%2,%0\n"
7493 + "int $4\n0:\n"
7494 + _ASM_EXTABLE(0b, 0b)
7495 +#endif
7496 +
7497 + "sets %1\n"
7498 : "+m" (l->a.counter), "=qm" (c)
7499 : "ir" (i) : "memory");
7500 return c;
7501 @@ -133,7 +201,15 @@ static inline long local_add_return(long
7502 #endif
7503 /* Modern 486+ processor */
7504 __i = i;
7505 - asm volatile(_ASM_XADD "%0, %1;"
7506 + asm volatile(_ASM_XADD "%0, %1\n"
7507 +
7508 +#ifdef CONFIG_PAX_REFCOUNT
7509 + "jno 0f\n"
7510 + _ASM_MOV "%0,%1\n"
7511 + "int $4\n0:\n"
7512 + _ASM_EXTABLE(0b, 0b)
7513 +#endif
7514 +
7515 : "+r" (i), "+m" (l->a.counter)
7516 : : "memory");
7517 return i + __i;
7518 diff -urNp linux-3.0.3/arch/x86/include/asm/mman.h linux-3.0.3/arch/x86/include/asm/mman.h
7519 --- linux-3.0.3/arch/x86/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
7520 +++ linux-3.0.3/arch/x86/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
7521 @@ -5,4 +5,14 @@
7522
7523 #include <asm-generic/mman.h>
7524
7525 +#ifdef __KERNEL__
7526 +#ifndef __ASSEMBLY__
7527 +#ifdef CONFIG_X86_32
7528 +#define arch_mmap_check i386_mmap_check
7529 +int i386_mmap_check(unsigned long addr, unsigned long len,
7530 + unsigned long flags);
7531 +#endif
7532 +#endif
7533 +#endif
7534 +
7535 #endif /* _ASM_X86_MMAN_H */
7536 diff -urNp linux-3.0.3/arch/x86/include/asm/mmu_context.h linux-3.0.3/arch/x86/include/asm/mmu_context.h
7537 --- linux-3.0.3/arch/x86/include/asm/mmu_context.h 2011-07-21 22:17:23.000000000 -0400
7538 +++ linux-3.0.3/arch/x86/include/asm/mmu_context.h 2011-08-23 21:48:14.000000000 -0400
7539 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
7540
7541 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7542 {
7543 +
7544 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7545 + unsigned int i;
7546 + pgd_t *pgd;
7547 +
7548 + pax_open_kernel();
7549 + pgd = get_cpu_pgd(smp_processor_id());
7550 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7551 + set_pgd_batched(pgd+i, native_make_pgd(0));
7552 + pax_close_kernel();
7553 +#endif
7554 +
7555 #ifdef CONFIG_SMP
7556 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7557 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7558 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
7559 struct task_struct *tsk)
7560 {
7561 unsigned cpu = smp_processor_id();
7562 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7563 + int tlbstate = TLBSTATE_OK;
7564 +#endif
7565
7566 if (likely(prev != next)) {
7567 #ifdef CONFIG_SMP
7568 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7569 + tlbstate = percpu_read(cpu_tlbstate.state);
7570 +#endif
7571 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7572 percpu_write(cpu_tlbstate.active_mm, next);
7573 #endif
7574 cpumask_set_cpu(cpu, mm_cpumask(next));
7575
7576 /* Re-load page tables */
7577 +#ifdef CONFIG_PAX_PER_CPU_PGD
7578 + pax_open_kernel();
7579 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7580 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7581 + pax_close_kernel();
7582 + load_cr3(get_cpu_pgd(cpu));
7583 +#else
7584 load_cr3(next->pgd);
7585 +#endif
7586
7587 /* stop flush ipis for the previous mm */
7588 cpumask_clear_cpu(cpu, mm_cpumask(prev));
7589 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
7590 */
7591 if (unlikely(prev->context.ldt != next->context.ldt))
7592 load_LDT_nolock(&next->context);
7593 - }
7594 +
7595 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7596 + if (!(__supported_pte_mask & _PAGE_NX)) {
7597 + smp_mb__before_clear_bit();
7598 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7599 + smp_mb__after_clear_bit();
7600 + cpu_set(cpu, next->context.cpu_user_cs_mask);
7601 + }
7602 +#endif
7603 +
7604 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7605 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7606 + prev->context.user_cs_limit != next->context.user_cs_limit))
7607 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7608 #ifdef CONFIG_SMP
7609 + else if (unlikely(tlbstate != TLBSTATE_OK))
7610 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7611 +#endif
7612 +#endif
7613 +
7614 + }
7615 else {
7616 +
7617 +#ifdef CONFIG_PAX_PER_CPU_PGD
7618 + pax_open_kernel();
7619 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7620 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7621 + pax_close_kernel();
7622 + load_cr3(get_cpu_pgd(cpu));
7623 +#endif
7624 +
7625 +#ifdef CONFIG_SMP
7626 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7627 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7628
7629 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
7630 * tlb flush IPI delivery. We must reload CR3
7631 * to make sure to use no freed page tables.
7632 */
7633 +
7634 +#ifndef CONFIG_PAX_PER_CPU_PGD
7635 load_cr3(next->pgd);
7636 +#endif
7637 +
7638 load_LDT_nolock(&next->context);
7639 +
7640 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7641 + if (!(__supported_pte_mask & _PAGE_NX))
7642 + cpu_set(cpu, next->context.cpu_user_cs_mask);
7643 +#endif
7644 +
7645 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7646 +#ifdef CONFIG_PAX_PAGEEXEC
7647 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7648 +#endif
7649 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7650 +#endif
7651 +
7652 }
7653 - }
7654 #endif
7655 + }
7656 }
7657
7658 #define activate_mm(prev, next) \
7659 diff -urNp linux-3.0.3/arch/x86/include/asm/mmu.h linux-3.0.3/arch/x86/include/asm/mmu.h
7660 --- linux-3.0.3/arch/x86/include/asm/mmu.h 2011-07-21 22:17:23.000000000 -0400
7661 +++ linux-3.0.3/arch/x86/include/asm/mmu.h 2011-08-23 21:47:55.000000000 -0400
7662 @@ -9,7 +9,7 @@
7663 * we put the segment information here.
7664 */
7665 typedef struct {
7666 - void *ldt;
7667 + struct desc_struct *ldt;
7668 int size;
7669
7670 #ifdef CONFIG_X86_64
7671 @@ -18,7 +18,19 @@ typedef struct {
7672 #endif
7673
7674 struct mutex lock;
7675 - void *vdso;
7676 + unsigned long vdso;
7677 +
7678 +#ifdef CONFIG_X86_32
7679 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7680 + unsigned long user_cs_base;
7681 + unsigned long user_cs_limit;
7682 +
7683 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7684 + cpumask_t cpu_user_cs_mask;
7685 +#endif
7686 +
7687 +#endif
7688 +#endif
7689 } mm_context_t;
7690
7691 #ifdef CONFIG_SMP
7692 diff -urNp linux-3.0.3/arch/x86/include/asm/module.h linux-3.0.3/arch/x86/include/asm/module.h
7693 --- linux-3.0.3/arch/x86/include/asm/module.h 2011-07-21 22:17:23.000000000 -0400
7694 +++ linux-3.0.3/arch/x86/include/asm/module.h 2011-08-23 21:48:14.000000000 -0400
7695 @@ -5,6 +5,7 @@
7696
7697 #ifdef CONFIG_X86_64
7698 /* X86_64 does not define MODULE_PROC_FAMILY */
7699 +#define MODULE_PROC_FAMILY ""
7700 #elif defined CONFIG_M386
7701 #define MODULE_PROC_FAMILY "386 "
7702 #elif defined CONFIG_M486
7703 @@ -59,8 +60,30 @@
7704 #error unknown processor family
7705 #endif
7706
7707 -#ifdef CONFIG_X86_32
7708 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
7709 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7710 +#define MODULE_PAX_UDEREF "UDEREF "
7711 +#else
7712 +#define MODULE_PAX_UDEREF ""
7713 +#endif
7714 +
7715 +#ifdef CONFIG_PAX_KERNEXEC
7716 +#define MODULE_PAX_KERNEXEC "KERNEXEC "
7717 +#else
7718 +#define MODULE_PAX_KERNEXEC ""
7719 #endif
7720
7721 +#ifdef CONFIG_PAX_REFCOUNT
7722 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
7723 +#else
7724 +#define MODULE_PAX_REFCOUNT ""
7725 +#endif
7726 +
7727 +#ifdef CONFIG_GRKERNSEC
7728 +#define MODULE_GRSEC "GRSECURITY "
7729 +#else
7730 +#define MODULE_GRSEC ""
7731 +#endif
7732 +
7733 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
7734 +
7735 #endif /* _ASM_X86_MODULE_H */
7736 diff -urNp linux-3.0.3/arch/x86/include/asm/page_64_types.h linux-3.0.3/arch/x86/include/asm/page_64_types.h
7737 --- linux-3.0.3/arch/x86/include/asm/page_64_types.h 2011-07-21 22:17:23.000000000 -0400
7738 +++ linux-3.0.3/arch/x86/include/asm/page_64_types.h 2011-08-23 21:47:55.000000000 -0400
7739 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
7740
7741 /* duplicated to the one in bootmem.h */
7742 extern unsigned long max_pfn;
7743 -extern unsigned long phys_base;
7744 +extern const unsigned long phys_base;
7745
7746 extern unsigned long __phys_addr(unsigned long);
7747 #define __phys_reloc_hide(x) (x)
7748 diff -urNp linux-3.0.3/arch/x86/include/asm/paravirt.h linux-3.0.3/arch/x86/include/asm/paravirt.h
7749 --- linux-3.0.3/arch/x86/include/asm/paravirt.h 2011-07-21 22:17:23.000000000 -0400
7750 +++ linux-3.0.3/arch/x86/include/asm/paravirt.h 2011-08-23 21:47:55.000000000 -0400
7751 @@ -658,6 +658,18 @@ static inline void set_pgd(pgd_t *pgdp,
7752 val);
7753 }
7754
7755 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
7756 +{
7757 + pgdval_t val = native_pgd_val(pgd);
7758 +
7759 + if (sizeof(pgdval_t) > sizeof(long))
7760 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
7761 + val, (u64)val >> 32);
7762 + else
7763 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
7764 + val);
7765 +}
7766 +
7767 static inline void pgd_clear(pgd_t *pgdp)
7768 {
7769 set_pgd(pgdp, __pgd(0));
7770 @@ -739,6 +751,21 @@ static inline void __set_fixmap(unsigned
7771 pv_mmu_ops.set_fixmap(idx, phys, flags);
7772 }
7773
7774 +#ifdef CONFIG_PAX_KERNEXEC
7775 +static inline unsigned long pax_open_kernel(void)
7776 +{
7777 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
7778 +}
7779 +
7780 +static inline unsigned long pax_close_kernel(void)
7781 +{
7782 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
7783 +}
7784 +#else
7785 +static inline unsigned long pax_open_kernel(void) { return 0; }
7786 +static inline unsigned long pax_close_kernel(void) { return 0; }
7787 +#endif
7788 +
7789 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
7790
7791 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
7792 @@ -955,7 +982,7 @@ extern void default_banner(void);
7793
7794 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
7795 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
7796 -#define PARA_INDIRECT(addr) *%cs:addr
7797 +#define PARA_INDIRECT(addr) *%ss:addr
7798 #endif
7799
7800 #define INTERRUPT_RETURN \
7801 @@ -1032,6 +1059,21 @@ extern void default_banner(void);
7802 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
7803 CLBR_NONE, \
7804 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
7805 +
7806 +#define GET_CR0_INTO_RDI \
7807 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
7808 + mov %rax,%rdi
7809 +
7810 +#define SET_RDI_INTO_CR0 \
7811 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
7812 +
7813 +#define GET_CR3_INTO_RDI \
7814 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
7815 + mov %rax,%rdi
7816 +
7817 +#define SET_RDI_INTO_CR3 \
7818 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
7819 +
7820 #endif /* CONFIG_X86_32 */
7821
7822 #endif /* __ASSEMBLY__ */
7823 diff -urNp linux-3.0.3/arch/x86/include/asm/paravirt_types.h linux-3.0.3/arch/x86/include/asm/paravirt_types.h
7824 --- linux-3.0.3/arch/x86/include/asm/paravirt_types.h 2011-07-21 22:17:23.000000000 -0400
7825 +++ linux-3.0.3/arch/x86/include/asm/paravirt_types.h 2011-08-23 21:47:55.000000000 -0400
7826 @@ -78,19 +78,19 @@ struct pv_init_ops {
7827 */
7828 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
7829 unsigned long addr, unsigned len);
7830 -};
7831 +} __no_const;
7832
7833
7834 struct pv_lazy_ops {
7835 /* Set deferred update mode, used for batching operations. */
7836 void (*enter)(void);
7837 void (*leave)(void);
7838 -};
7839 +} __no_const;
7840
7841 struct pv_time_ops {
7842 unsigned long long (*sched_clock)(void);
7843 unsigned long (*get_tsc_khz)(void);
7844 -};
7845 +} __no_const;
7846
7847 struct pv_cpu_ops {
7848 /* hooks for various privileged instructions */
7849 @@ -186,7 +186,7 @@ struct pv_cpu_ops {
7850
7851 void (*start_context_switch)(struct task_struct *prev);
7852 void (*end_context_switch)(struct task_struct *next);
7853 -};
7854 +} __no_const;
7855
7856 struct pv_irq_ops {
7857 /*
7858 @@ -217,7 +217,7 @@ struct pv_apic_ops {
7859 unsigned long start_eip,
7860 unsigned long start_esp);
7861 #endif
7862 -};
7863 +} __no_const;
7864
7865 struct pv_mmu_ops {
7866 unsigned long (*read_cr2)(void);
7867 @@ -306,6 +306,7 @@ struct pv_mmu_ops {
7868 struct paravirt_callee_save make_pud;
7869
7870 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
7871 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
7872 #endif /* PAGETABLE_LEVELS == 4 */
7873 #endif /* PAGETABLE_LEVELS >= 3 */
7874
7875 @@ -317,6 +318,12 @@ struct pv_mmu_ops {
7876 an mfn. We can tell which is which from the index. */
7877 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
7878 phys_addr_t phys, pgprot_t flags);
7879 +
7880 +#ifdef CONFIG_PAX_KERNEXEC
7881 + unsigned long (*pax_open_kernel)(void);
7882 + unsigned long (*pax_close_kernel)(void);
7883 +#endif
7884 +
7885 };
7886
7887 struct arch_spinlock;
7888 @@ -327,7 +334,7 @@ struct pv_lock_ops {
7889 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
7890 int (*spin_trylock)(struct arch_spinlock *lock);
7891 void (*spin_unlock)(struct arch_spinlock *lock);
7892 -};
7893 +} __no_const;
7894
7895 /* This contains all the paravirt structures: we get a convenient
7896 * number for each function using the offset which we use to indicate
7897 diff -urNp linux-3.0.3/arch/x86/include/asm/pgalloc.h linux-3.0.3/arch/x86/include/asm/pgalloc.h
7898 --- linux-3.0.3/arch/x86/include/asm/pgalloc.h 2011-07-21 22:17:23.000000000 -0400
7899 +++ linux-3.0.3/arch/x86/include/asm/pgalloc.h 2011-08-23 21:47:55.000000000 -0400
7900 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
7901 pmd_t *pmd, pte_t *pte)
7902 {
7903 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7904 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
7905 +}
7906 +
7907 +static inline void pmd_populate_user(struct mm_struct *mm,
7908 + pmd_t *pmd, pte_t *pte)
7909 +{
7910 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7911 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
7912 }
7913
7914 diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable-2level.h linux-3.0.3/arch/x86/include/asm/pgtable-2level.h
7915 --- linux-3.0.3/arch/x86/include/asm/pgtable-2level.h 2011-07-21 22:17:23.000000000 -0400
7916 +++ linux-3.0.3/arch/x86/include/asm/pgtable-2level.h 2011-08-23 21:47:55.000000000 -0400
7917 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
7918
7919 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
7920 {
7921 + pax_open_kernel();
7922 *pmdp = pmd;
7923 + pax_close_kernel();
7924 }
7925
7926 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
7927 diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable_32.h linux-3.0.3/arch/x86/include/asm/pgtable_32.h
7928 --- linux-3.0.3/arch/x86/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
7929 +++ linux-3.0.3/arch/x86/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
7930 @@ -25,9 +25,6 @@
7931 struct mm_struct;
7932 struct vm_area_struct;
7933
7934 -extern pgd_t swapper_pg_dir[1024];
7935 -extern pgd_t initial_page_table[1024];
7936 -
7937 static inline void pgtable_cache_init(void) { }
7938 static inline void check_pgt_cache(void) { }
7939 void paging_init(void);
7940 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
7941 # include <asm/pgtable-2level.h>
7942 #endif
7943
7944 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
7945 +extern pgd_t initial_page_table[PTRS_PER_PGD];
7946 +#ifdef CONFIG_X86_PAE
7947 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
7948 +#endif
7949 +
7950 #if defined(CONFIG_HIGHPTE)
7951 #define pte_offset_map(dir, address) \
7952 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
7953 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
7954 /* Clear a kernel PTE and flush it from the TLB */
7955 #define kpte_clear_flush(ptep, vaddr) \
7956 do { \
7957 + pax_open_kernel(); \
7958 pte_clear(&init_mm, (vaddr), (ptep)); \
7959 + pax_close_kernel(); \
7960 __flush_tlb_one((vaddr)); \
7961 } while (0)
7962
7963 @@ -74,6 +79,9 @@ do { \
7964
7965 #endif /* !__ASSEMBLY__ */
7966
7967 +#define HAVE_ARCH_UNMAPPED_AREA
7968 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
7969 +
7970 /*
7971 * kern_addr_valid() is (1) for FLATMEM and (0) for
7972 * SPARSEMEM and DISCONTIGMEM
7973 diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable_32_types.h linux-3.0.3/arch/x86/include/asm/pgtable_32_types.h
7974 --- linux-3.0.3/arch/x86/include/asm/pgtable_32_types.h 2011-07-21 22:17:23.000000000 -0400
7975 +++ linux-3.0.3/arch/x86/include/asm/pgtable_32_types.h 2011-08-23 21:47:55.000000000 -0400
7976 @@ -8,7 +8,7 @@
7977 */
7978 #ifdef CONFIG_X86_PAE
7979 # include <asm/pgtable-3level_types.h>
7980 -# define PMD_SIZE (1UL << PMD_SHIFT)
7981 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
7982 # define PMD_MASK (~(PMD_SIZE - 1))
7983 #else
7984 # include <asm/pgtable-2level_types.h>
7985 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
7986 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
7987 #endif
7988
7989 +#ifdef CONFIG_PAX_KERNEXEC
7990 +#ifndef __ASSEMBLY__
7991 +extern unsigned char MODULES_EXEC_VADDR[];
7992 +extern unsigned char MODULES_EXEC_END[];
7993 +#endif
7994 +#include <asm/boot.h>
7995 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
7996 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
7997 +#else
7998 +#define ktla_ktva(addr) (addr)
7999 +#define ktva_ktla(addr) (addr)
8000 +#endif
8001 +
8002 #define MODULES_VADDR VMALLOC_START
8003 #define MODULES_END VMALLOC_END
8004 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
8005 diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable-3level.h linux-3.0.3/arch/x86/include/asm/pgtable-3level.h
8006 --- linux-3.0.3/arch/x86/include/asm/pgtable-3level.h 2011-07-21 22:17:23.000000000 -0400
8007 +++ linux-3.0.3/arch/x86/include/asm/pgtable-3level.h 2011-08-23 21:47:55.000000000 -0400
8008 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
8009
8010 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8011 {
8012 + pax_open_kernel();
8013 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8014 + pax_close_kernel();
8015 }
8016
8017 static inline void native_set_pud(pud_t *pudp, pud_t pud)
8018 {
8019 + pax_open_kernel();
8020 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8021 + pax_close_kernel();
8022 }
8023
8024 /*
8025 diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable_64.h linux-3.0.3/arch/x86/include/asm/pgtable_64.h
8026 --- linux-3.0.3/arch/x86/include/asm/pgtable_64.h 2011-07-21 22:17:23.000000000 -0400
8027 +++ linux-3.0.3/arch/x86/include/asm/pgtable_64.h 2011-08-23 21:47:55.000000000 -0400
8028 @@ -16,10 +16,13 @@
8029
8030 extern pud_t level3_kernel_pgt[512];
8031 extern pud_t level3_ident_pgt[512];
8032 +extern pud_t level3_vmalloc_pgt[512];
8033 +extern pud_t level3_vmemmap_pgt[512];
8034 +extern pud_t level2_vmemmap_pgt[512];
8035 extern pmd_t level2_kernel_pgt[512];
8036 extern pmd_t level2_fixmap_pgt[512];
8037 -extern pmd_t level2_ident_pgt[512];
8038 -extern pgd_t init_level4_pgt[];
8039 +extern pmd_t level2_ident_pgt[512*2];
8040 +extern pgd_t init_level4_pgt[512];
8041
8042 #define swapper_pg_dir init_level4_pgt
8043
8044 @@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8045
8046 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8047 {
8048 + pax_open_kernel();
8049 *pmdp = pmd;
8050 + pax_close_kernel();
8051 }
8052
8053 static inline void native_pmd_clear(pmd_t *pmd)
8054 @@ -107,6 +112,13 @@ static inline void native_pud_clear(pud_
8055
8056 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8057 {
8058 + pax_open_kernel();
8059 + *pgdp = pgd;
8060 + pax_close_kernel();
8061 +}
8062 +
8063 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
8064 +{
8065 *pgdp = pgd;
8066 }
8067
8068 diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable_64_types.h linux-3.0.3/arch/x86/include/asm/pgtable_64_types.h
8069 --- linux-3.0.3/arch/x86/include/asm/pgtable_64_types.h 2011-07-21 22:17:23.000000000 -0400
8070 +++ linux-3.0.3/arch/x86/include/asm/pgtable_64_types.h 2011-08-23 21:47:55.000000000 -0400
8071 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8072 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8073 #define MODULES_END _AC(0xffffffffff000000, UL)
8074 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8075 +#define MODULES_EXEC_VADDR MODULES_VADDR
8076 +#define MODULES_EXEC_END MODULES_END
8077 +
8078 +#define ktla_ktva(addr) (addr)
8079 +#define ktva_ktla(addr) (addr)
8080
8081 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8082 diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable.h linux-3.0.3/arch/x86/include/asm/pgtable.h
8083 --- linux-3.0.3/arch/x86/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
8084 +++ linux-3.0.3/arch/x86/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
8085 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm
8086
8087 #ifndef __PAGETABLE_PUD_FOLDED
8088 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
8089 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
8090 #define pgd_clear(pgd) native_pgd_clear(pgd)
8091 #endif
8092
8093 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm
8094
8095 #define arch_end_context_switch(prev) do {} while(0)
8096
8097 +#define pax_open_kernel() native_pax_open_kernel()
8098 +#define pax_close_kernel() native_pax_close_kernel()
8099 #endif /* CONFIG_PARAVIRT */
8100
8101 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
8102 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8103 +
8104 +#ifdef CONFIG_PAX_KERNEXEC
8105 +static inline unsigned long native_pax_open_kernel(void)
8106 +{
8107 + unsigned long cr0;
8108 +
8109 + preempt_disable();
8110 + barrier();
8111 + cr0 = read_cr0() ^ X86_CR0_WP;
8112 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
8113 + write_cr0(cr0);
8114 + return cr0 ^ X86_CR0_WP;
8115 +}
8116 +
8117 +static inline unsigned long native_pax_close_kernel(void)
8118 +{
8119 + unsigned long cr0;
8120 +
8121 + cr0 = read_cr0() ^ X86_CR0_WP;
8122 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8123 + write_cr0(cr0);
8124 + barrier();
8125 + preempt_enable_no_resched();
8126 + return cr0 ^ X86_CR0_WP;
8127 +}
8128 +#else
8129 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
8130 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
8131 +#endif
8132 +
8133 /*
8134 * The following only work if pte_present() is true.
8135 * Undefined behaviour if not..
8136 */
8137 +static inline int pte_user(pte_t pte)
8138 +{
8139 + return pte_val(pte) & _PAGE_USER;
8140 +}
8141 +
8142 static inline int pte_dirty(pte_t pte)
8143 {
8144 return pte_flags(pte) & _PAGE_DIRTY;
8145 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t
8146 return pte_clear_flags(pte, _PAGE_RW);
8147 }
8148
8149 +static inline pte_t pte_mkread(pte_t pte)
8150 +{
8151 + return __pte(pte_val(pte) | _PAGE_USER);
8152 +}
8153 +
8154 static inline pte_t pte_mkexec(pte_t pte)
8155 {
8156 - return pte_clear_flags(pte, _PAGE_NX);
8157 +#ifdef CONFIG_X86_PAE
8158 + if (__supported_pte_mask & _PAGE_NX)
8159 + return pte_clear_flags(pte, _PAGE_NX);
8160 + else
8161 +#endif
8162 + return pte_set_flags(pte, _PAGE_USER);
8163 +}
8164 +
8165 +static inline pte_t pte_exprotect(pte_t pte)
8166 +{
8167 +#ifdef CONFIG_X86_PAE
8168 + if (__supported_pte_mask & _PAGE_NX)
8169 + return pte_set_flags(pte, _PAGE_NX);
8170 + else
8171 +#endif
8172 + return pte_clear_flags(pte, _PAGE_USER);
8173 }
8174
8175 static inline pte_t pte_mkdirty(pte_t pte)
8176 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long
8177 #endif
8178
8179 #ifndef __ASSEMBLY__
8180 +
8181 +#ifdef CONFIG_PAX_PER_CPU_PGD
8182 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8183 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8184 +{
8185 + return cpu_pgd[cpu];
8186 +}
8187 +#endif
8188 +
8189 #include <linux/mm_types.h>
8190
8191 static inline int pte_none(pte_t pte)
8192 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *p
8193
8194 static inline int pgd_bad(pgd_t pgd)
8195 {
8196 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8197 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8198 }
8199
8200 static inline int pgd_none(pgd_t pgd)
8201 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
8202 * pgd_offset() returns a (pgd_t *)
8203 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8204 */
8205 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8206 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8207 +
8208 +#ifdef CONFIG_PAX_PER_CPU_PGD
8209 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8210 +#endif
8211 +
8212 /*
8213 * a shortcut which implies the use of the kernel's pgd, instead
8214 * of a process's
8215 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
8216 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8217 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8218
8219 +#ifdef CONFIG_X86_32
8220 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8221 +#else
8222 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8223 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8224 +
8225 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8226 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8227 +#else
8228 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8229 +#endif
8230 +
8231 +#endif
8232 +
8233 #ifndef __ASSEMBLY__
8234
8235 extern int direct_gbpages;
8236 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(st
8237 * dst and src can be on the same page, but the range must not overlap,
8238 * and must not cross a page boundary.
8239 */
8240 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8241 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8242 {
8243 - memcpy(dst, src, count * sizeof(pgd_t));
8244 + pax_open_kernel();
8245 + while (count--)
8246 + *dst++ = *src++;
8247 + pax_close_kernel();
8248 }
8249
8250 +#ifdef CONFIG_PAX_PER_CPU_PGD
8251 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8252 +#endif
8253 +
8254 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8255 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8256 +#else
8257 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8258 +#endif
8259
8260 #include <asm-generic/pgtable.h>
8261 #endif /* __ASSEMBLY__ */
8262 diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable_types.h linux-3.0.3/arch/x86/include/asm/pgtable_types.h
8263 --- linux-3.0.3/arch/x86/include/asm/pgtable_types.h 2011-07-21 22:17:23.000000000 -0400
8264 +++ linux-3.0.3/arch/x86/include/asm/pgtable_types.h 2011-08-23 21:47:55.000000000 -0400
8265 @@ -16,13 +16,12 @@
8266 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8267 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8268 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8269 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8270 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8271 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8272 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8273 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8274 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8275 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8276 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8277 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8278 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8279 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8280
8281 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8282 @@ -40,7 +39,6 @@
8283 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8284 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8285 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8286 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8287 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8288 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8289 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8290 @@ -57,8 +55,10 @@
8291
8292 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8293 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8294 -#else
8295 +#elif defined(CONFIG_KMEMCHECK)
8296 #define _PAGE_NX (_AT(pteval_t, 0))
8297 +#else
8298 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8299 #endif
8300
8301 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8302 @@ -96,6 +96,9 @@
8303 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8304 _PAGE_ACCESSED)
8305
8306 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
8307 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
8308 +
8309 #define __PAGE_KERNEL_EXEC \
8310 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8311 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8312 @@ -106,8 +109,8 @@
8313 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8314 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8315 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8316 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8317 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
8318 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8319 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
8320 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8321 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
8322 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
8323 @@ -166,8 +169,8 @@
8324 * bits are combined, this will alow user to access the high address mapped
8325 * VDSO in the presence of CONFIG_COMPAT_VDSO
8326 */
8327 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8328 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8329 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8330 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8331 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8332 #endif
8333
8334 @@ -205,7 +208,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8335 {
8336 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8337 }
8338 +#endif
8339
8340 +#if PAGETABLE_LEVELS == 3
8341 +#include <asm-generic/pgtable-nopud.h>
8342 +#endif
8343 +
8344 +#if PAGETABLE_LEVELS == 2
8345 +#include <asm-generic/pgtable-nopmd.h>
8346 +#endif
8347 +
8348 +#ifndef __ASSEMBLY__
8349 #if PAGETABLE_LEVELS > 3
8350 typedef struct { pudval_t pud; } pud_t;
8351
8352 @@ -219,8 +232,6 @@ static inline pudval_t native_pud_val(pu
8353 return pud.pud;
8354 }
8355 #else
8356 -#include <asm-generic/pgtable-nopud.h>
8357 -
8358 static inline pudval_t native_pud_val(pud_t pud)
8359 {
8360 return native_pgd_val(pud.pgd);
8361 @@ -240,8 +251,6 @@ static inline pmdval_t native_pmd_val(pm
8362 return pmd.pmd;
8363 }
8364 #else
8365 -#include <asm-generic/pgtable-nopmd.h>
8366 -
8367 static inline pmdval_t native_pmd_val(pmd_t pmd)
8368 {
8369 return native_pgd_val(pmd.pud.pgd);
8370 @@ -281,7 +290,6 @@ typedef struct page *pgtable_t;
8371
8372 extern pteval_t __supported_pte_mask;
8373 extern void set_nx(void);
8374 -extern int nx_enabled;
8375
8376 #define pgprot_writecombine pgprot_writecombine
8377 extern pgprot_t pgprot_writecombine(pgprot_t prot);
8378 diff -urNp linux-3.0.3/arch/x86/include/asm/processor.h linux-3.0.3/arch/x86/include/asm/processor.h
8379 --- linux-3.0.3/arch/x86/include/asm/processor.h 2011-07-21 22:17:23.000000000 -0400
8380 +++ linux-3.0.3/arch/x86/include/asm/processor.h 2011-08-23 21:47:55.000000000 -0400
8381 @@ -266,7 +266,7 @@ struct tss_struct {
8382
8383 } ____cacheline_aligned;
8384
8385 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8386 +extern struct tss_struct init_tss[NR_CPUS];
8387
8388 /*
8389 * Save the original ist values for checking stack pointers during debugging
8390 @@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(co
8391 */
8392 #define TASK_SIZE PAGE_OFFSET
8393 #define TASK_SIZE_MAX TASK_SIZE
8394 +
8395 +#ifdef CONFIG_PAX_SEGMEXEC
8396 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8397 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8398 +#else
8399 #define STACK_TOP TASK_SIZE
8400 -#define STACK_TOP_MAX STACK_TOP
8401 +#endif
8402 +
8403 +#define STACK_TOP_MAX TASK_SIZE
8404
8405 #define INIT_THREAD { \
8406 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
8407 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8408 .vm86_info = NULL, \
8409 .sysenter_cs = __KERNEL_CS, \
8410 .io_bitmap_ptr = NULL, \
8411 @@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(co
8412 */
8413 #define INIT_TSS { \
8414 .x86_tss = { \
8415 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
8416 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8417 .ss0 = __KERNEL_DS, \
8418 .ss1 = __KERNEL_CS, \
8419 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8420 @@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(co
8421 extern unsigned long thread_saved_pc(struct task_struct *tsk);
8422
8423 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8424 -#define KSTK_TOP(info) \
8425 -({ \
8426 - unsigned long *__ptr = (unsigned long *)(info); \
8427 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8428 -})
8429 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
8430
8431 /*
8432 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8433 @@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(str
8434 #define task_pt_regs(task) \
8435 ({ \
8436 struct pt_regs *__regs__; \
8437 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8438 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8439 __regs__ - 1; \
8440 })
8441
8442 @@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(str
8443 /*
8444 * User space process size. 47bits minus one guard page.
8445 */
8446 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8447 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8448
8449 /* This decides where the kernel will search for a free chunk of vm
8450 * space during mmap's.
8451 */
8452 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8453 - 0xc0000000 : 0xFFFFe000)
8454 + 0xc0000000 : 0xFFFFf000)
8455
8456 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8457 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8458 @@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(str
8459 #define STACK_TOP_MAX TASK_SIZE_MAX
8460
8461 #define INIT_THREAD { \
8462 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8463 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8464 }
8465
8466 #define INIT_TSS { \
8467 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8468 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8469 }
8470
8471 /*
8472 @@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs
8473 */
8474 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
8475
8476 +#ifdef CONFIG_PAX_SEGMEXEC
8477 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8478 +#endif
8479 +
8480 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
8481
8482 /* Get/set a process' ability to use the timestamp counter instruction */
8483 diff -urNp linux-3.0.3/arch/x86/include/asm/ptrace.h linux-3.0.3/arch/x86/include/asm/ptrace.h
8484 --- linux-3.0.3/arch/x86/include/asm/ptrace.h 2011-07-21 22:17:23.000000000 -0400
8485 +++ linux-3.0.3/arch/x86/include/asm/ptrace.h 2011-08-23 21:47:55.000000000 -0400
8486 @@ -153,28 +153,29 @@ static inline unsigned long regs_return_
8487 }
8488
8489 /*
8490 - * user_mode_vm(regs) determines whether a register set came from user mode.
8491 + * user_mode(regs) determines whether a register set came from user mode.
8492 * This is true if V8086 mode was enabled OR if the register set was from
8493 * protected mode with RPL-3 CS value. This tricky test checks that with
8494 * one comparison. Many places in the kernel can bypass this full check
8495 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8496 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8497 + * be used.
8498 */
8499 -static inline int user_mode(struct pt_regs *regs)
8500 +static inline int user_mode_novm(struct pt_regs *regs)
8501 {
8502 #ifdef CONFIG_X86_32
8503 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8504 #else
8505 - return !!(regs->cs & 3);
8506 + return !!(regs->cs & SEGMENT_RPL_MASK);
8507 #endif
8508 }
8509
8510 -static inline int user_mode_vm(struct pt_regs *regs)
8511 +static inline int user_mode(struct pt_regs *regs)
8512 {
8513 #ifdef CONFIG_X86_32
8514 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8515 USER_RPL;
8516 #else
8517 - return user_mode(regs);
8518 + return user_mode_novm(regs);
8519 #endif
8520 }
8521
8522 diff -urNp linux-3.0.3/arch/x86/include/asm/reboot.h linux-3.0.3/arch/x86/include/asm/reboot.h
8523 --- linux-3.0.3/arch/x86/include/asm/reboot.h 2011-07-21 22:17:23.000000000 -0400
8524 +++ linux-3.0.3/arch/x86/include/asm/reboot.h 2011-08-23 21:47:55.000000000 -0400
8525 @@ -6,19 +6,19 @@
8526 struct pt_regs;
8527
8528 struct machine_ops {
8529 - void (*restart)(char *cmd);
8530 - void (*halt)(void);
8531 - void (*power_off)(void);
8532 + void (* __noreturn restart)(char *cmd);
8533 + void (* __noreturn halt)(void);
8534 + void (* __noreturn power_off)(void);
8535 void (*shutdown)(void);
8536 void (*crash_shutdown)(struct pt_regs *);
8537 - void (*emergency_restart)(void);
8538 -};
8539 + void (* __noreturn emergency_restart)(void);
8540 +} __no_const;
8541
8542 extern struct machine_ops machine_ops;
8543
8544 void native_machine_crash_shutdown(struct pt_regs *regs);
8545 void native_machine_shutdown(void);
8546 -void machine_real_restart(unsigned int type);
8547 +void machine_real_restart(unsigned int type) __noreturn;
8548 /* These must match dispatch_table in reboot_32.S */
8549 #define MRR_BIOS 0
8550 #define MRR_APM 1
8551 diff -urNp linux-3.0.3/arch/x86/include/asm/rwsem.h linux-3.0.3/arch/x86/include/asm/rwsem.h
8552 --- linux-3.0.3/arch/x86/include/asm/rwsem.h 2011-07-21 22:17:23.000000000 -0400
8553 +++ linux-3.0.3/arch/x86/include/asm/rwsem.h 2011-08-23 21:47:55.000000000 -0400
8554 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw
8555 {
8556 asm volatile("# beginning down_read\n\t"
8557 LOCK_PREFIX _ASM_INC "(%1)\n\t"
8558 +
8559 +#ifdef CONFIG_PAX_REFCOUNT
8560 + "jno 0f\n"
8561 + LOCK_PREFIX _ASM_DEC "(%1)\n"
8562 + "int $4\n0:\n"
8563 + _ASM_EXTABLE(0b, 0b)
8564 +#endif
8565 +
8566 /* adds 0x00000001 */
8567 " jns 1f\n"
8568 " call call_rwsem_down_read_failed\n"
8569 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
8570 "1:\n\t"
8571 " mov %1,%2\n\t"
8572 " add %3,%2\n\t"
8573 +
8574 +#ifdef CONFIG_PAX_REFCOUNT
8575 + "jno 0f\n"
8576 + "sub %3,%2\n"
8577 + "int $4\n0:\n"
8578 + _ASM_EXTABLE(0b, 0b)
8579 +#endif
8580 +
8581 " jle 2f\n\t"
8582 LOCK_PREFIX " cmpxchg %2,%0\n\t"
8583 " jnz 1b\n\t"
8584 @@ -104,6 +120,14 @@ static inline void __down_write_nested(s
8585 long tmp;
8586 asm volatile("# beginning down_write\n\t"
8587 LOCK_PREFIX " xadd %1,(%2)\n\t"
8588 +
8589 +#ifdef CONFIG_PAX_REFCOUNT
8590 + "jno 0f\n"
8591 + "mov %1,(%2)\n"
8592 + "int $4\n0:\n"
8593 + _ASM_EXTABLE(0b, 0b)
8594 +#endif
8595 +
8596 /* adds 0xffff0001, returns the old value */
8597 " test %1,%1\n\t"
8598 /* was the count 0 before? */
8599 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
8600 long tmp;
8601 asm volatile("# beginning __up_read\n\t"
8602 LOCK_PREFIX " xadd %1,(%2)\n\t"
8603 +
8604 +#ifdef CONFIG_PAX_REFCOUNT
8605 + "jno 0f\n"
8606 + "mov %1,(%2)\n"
8607 + "int $4\n0:\n"
8608 + _ASM_EXTABLE(0b, 0b)
8609 +#endif
8610 +
8611 /* subtracts 1, returns the old value */
8612 " jns 1f\n\t"
8613 " call call_rwsem_wake\n" /* expects old value in %edx */
8614 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
8615 long tmp;
8616 asm volatile("# beginning __up_write\n\t"
8617 LOCK_PREFIX " xadd %1,(%2)\n\t"
8618 +
8619 +#ifdef CONFIG_PAX_REFCOUNT
8620 + "jno 0f\n"
8621 + "mov %1,(%2)\n"
8622 + "int $4\n0:\n"
8623 + _ASM_EXTABLE(0b, 0b)
8624 +#endif
8625 +
8626 /* subtracts 0xffff0001, returns the old value */
8627 " jns 1f\n\t"
8628 " call call_rwsem_wake\n" /* expects old value in %edx */
8629 @@ -176,6 +216,14 @@ static inline void __downgrade_write(str
8630 {
8631 asm volatile("# beginning __downgrade_write\n\t"
8632 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8633 +
8634 +#ifdef CONFIG_PAX_REFCOUNT
8635 + "jno 0f\n"
8636 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8637 + "int $4\n0:\n"
8638 + _ASM_EXTABLE(0b, 0b)
8639 +#endif
8640 +
8641 /*
8642 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8643 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8644 @@ -194,7 +242,15 @@ static inline void __downgrade_write(str
8645 */
8646 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8647 {
8648 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8649 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8650 +
8651 +#ifdef CONFIG_PAX_REFCOUNT
8652 + "jno 0f\n"
8653 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
8654 + "int $4\n0:\n"
8655 + _ASM_EXTABLE(0b, 0b)
8656 +#endif
8657 +
8658 : "+m" (sem->count)
8659 : "er" (delta));
8660 }
8661 @@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
8662 {
8663 long tmp = delta;
8664
8665 - asm volatile(LOCK_PREFIX "xadd %0,%1"
8666 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8667 +
8668 +#ifdef CONFIG_PAX_REFCOUNT
8669 + "jno 0f\n"
8670 + "mov %0,%1\n"
8671 + "int $4\n0:\n"
8672 + _ASM_EXTABLE(0b, 0b)
8673 +#endif
8674 +
8675 : "+r" (tmp), "+m" (sem->count)
8676 : : "memory");
8677
8678 diff -urNp linux-3.0.3/arch/x86/include/asm/segment.h linux-3.0.3/arch/x86/include/asm/segment.h
8679 --- linux-3.0.3/arch/x86/include/asm/segment.h 2011-07-21 22:17:23.000000000 -0400
8680 +++ linux-3.0.3/arch/x86/include/asm/segment.h 2011-08-23 21:47:55.000000000 -0400
8681 @@ -64,8 +64,8 @@
8682 * 26 - ESPFIX small SS
8683 * 27 - per-cpu [ offset to per-cpu data area ]
8684 * 28 - stack_canary-20 [ for stack protector ]
8685 - * 29 - unused
8686 - * 30 - unused
8687 + * 29 - PCI BIOS CS
8688 + * 30 - PCI BIOS DS
8689 * 31 - TSS for double fault handler
8690 */
8691 #define GDT_ENTRY_TLS_MIN 6
8692 @@ -79,6 +79,8 @@
8693
8694 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
8695
8696 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
8697 +
8698 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
8699
8700 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
8701 @@ -104,6 +106,12 @@
8702 #define __KERNEL_STACK_CANARY 0
8703 #endif
8704
8705 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
8706 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
8707 +
8708 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
8709 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
8710 +
8711 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
8712
8713 /*
8714 @@ -141,7 +149,7 @@
8715 */
8716
8717 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
8718 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
8719 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
8720
8721
8722 #else
8723 @@ -165,6 +173,8 @@
8724 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
8725 #define __USER32_DS __USER_DS
8726
8727 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
8728 +
8729 #define GDT_ENTRY_TSS 8 /* needs two entries */
8730 #define GDT_ENTRY_LDT 10 /* needs two entries */
8731 #define GDT_ENTRY_TLS_MIN 12
8732 @@ -185,6 +195,7 @@
8733 #endif
8734
8735 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
8736 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
8737 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
8738 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
8739 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
8740 diff -urNp linux-3.0.3/arch/x86/include/asm/smp.h linux-3.0.3/arch/x86/include/asm/smp.h
8741 --- linux-3.0.3/arch/x86/include/asm/smp.h 2011-07-21 22:17:23.000000000 -0400
8742 +++ linux-3.0.3/arch/x86/include/asm/smp.h 2011-08-23 21:47:55.000000000 -0400
8743 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
8744 /* cpus sharing the last level cache: */
8745 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
8746 DECLARE_PER_CPU(u16, cpu_llc_id);
8747 -DECLARE_PER_CPU(int, cpu_number);
8748 +DECLARE_PER_CPU(unsigned int, cpu_number);
8749
8750 static inline struct cpumask *cpu_sibling_mask(int cpu)
8751 {
8752 @@ -77,7 +77,7 @@ struct smp_ops {
8753
8754 void (*send_call_func_ipi)(const struct cpumask *mask);
8755 void (*send_call_func_single_ipi)(int cpu);
8756 -};
8757 +} __no_const;
8758
8759 /* Globals due to paravirt */
8760 extern void set_cpu_sibling_map(int cpu);
8761 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
8762 extern int safe_smp_processor_id(void);
8763
8764 #elif defined(CONFIG_X86_64_SMP)
8765 -#define raw_smp_processor_id() (percpu_read(cpu_number))
8766 -
8767 -#define stack_smp_processor_id() \
8768 -({ \
8769 - struct thread_info *ti; \
8770 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
8771 - ti->cpu; \
8772 -})
8773 +#define raw_smp_processor_id() (percpu_read(cpu_number))
8774 +#define stack_smp_processor_id() raw_smp_processor_id()
8775 #define safe_smp_processor_id() smp_processor_id()
8776
8777 #endif
8778 diff -urNp linux-3.0.3/arch/x86/include/asm/spinlock.h linux-3.0.3/arch/x86/include/asm/spinlock.h
8779 --- linux-3.0.3/arch/x86/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
8780 +++ linux-3.0.3/arch/x86/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
8781 @@ -249,6 +249,14 @@ static inline int arch_write_can_lock(ar
8782 static inline void arch_read_lock(arch_rwlock_t *rw)
8783 {
8784 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
8785 +
8786 +#ifdef CONFIG_PAX_REFCOUNT
8787 + "jno 0f\n"
8788 + LOCK_PREFIX " addl $1,(%0)\n"
8789 + "int $4\n0:\n"
8790 + _ASM_EXTABLE(0b, 0b)
8791 +#endif
8792 +
8793 "jns 1f\n"
8794 "call __read_lock_failed\n\t"
8795 "1:\n"
8796 @@ -258,6 +266,14 @@ static inline void arch_read_lock(arch_r
8797 static inline void arch_write_lock(arch_rwlock_t *rw)
8798 {
8799 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
8800 +
8801 +#ifdef CONFIG_PAX_REFCOUNT
8802 + "jno 0f\n"
8803 + LOCK_PREFIX " addl %1,(%0)\n"
8804 + "int $4\n0:\n"
8805 + _ASM_EXTABLE(0b, 0b)
8806 +#endif
8807 +
8808 "jz 1f\n"
8809 "call __write_lock_failed\n\t"
8810 "1:\n"
8811 @@ -286,12 +302,29 @@ static inline int arch_write_trylock(arc
8812
8813 static inline void arch_read_unlock(arch_rwlock_t *rw)
8814 {
8815 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
8816 + asm volatile(LOCK_PREFIX "incl %0\n"
8817 +
8818 +#ifdef CONFIG_PAX_REFCOUNT
8819 + "jno 0f\n"
8820 + LOCK_PREFIX "decl %0\n"
8821 + "int $4\n0:\n"
8822 + _ASM_EXTABLE(0b, 0b)
8823 +#endif
8824 +
8825 + :"+m" (rw->lock) : : "memory");
8826 }
8827
8828 static inline void arch_write_unlock(arch_rwlock_t *rw)
8829 {
8830 - asm volatile(LOCK_PREFIX "addl %1, %0"
8831 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
8832 +
8833 +#ifdef CONFIG_PAX_REFCOUNT
8834 + "jno 0f\n"
8835 + LOCK_PREFIX "subl %1, %0\n"
8836 + "int $4\n0:\n"
8837 + _ASM_EXTABLE(0b, 0b)
8838 +#endif
8839 +
8840 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
8841 }
8842
8843 diff -urNp linux-3.0.3/arch/x86/include/asm/stackprotector.h linux-3.0.3/arch/x86/include/asm/stackprotector.h
8844 --- linux-3.0.3/arch/x86/include/asm/stackprotector.h 2011-07-21 22:17:23.000000000 -0400
8845 +++ linux-3.0.3/arch/x86/include/asm/stackprotector.h 2011-08-23 21:47:55.000000000 -0400
8846 @@ -48,7 +48,7 @@
8847 * head_32 for boot CPU and setup_per_cpu_areas() for others.
8848 */
8849 #define GDT_STACK_CANARY_INIT \
8850 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
8851 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
8852
8853 /*
8854 * Initialize the stackprotector canary value.
8855 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
8856
8857 static inline void load_stack_canary_segment(void)
8858 {
8859 -#ifdef CONFIG_X86_32
8860 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
8861 asm volatile ("mov %0, %%gs" : : "r" (0));
8862 #endif
8863 }
8864 diff -urNp linux-3.0.3/arch/x86/include/asm/stacktrace.h linux-3.0.3/arch/x86/include/asm/stacktrace.h
8865 --- linux-3.0.3/arch/x86/include/asm/stacktrace.h 2011-07-21 22:17:23.000000000 -0400
8866 +++ linux-3.0.3/arch/x86/include/asm/stacktrace.h 2011-08-23 21:47:55.000000000 -0400
8867 @@ -11,28 +11,20 @@
8868
8869 extern int kstack_depth_to_print;
8870
8871 -struct thread_info;
8872 +struct task_struct;
8873 struct stacktrace_ops;
8874
8875 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
8876 - unsigned long *stack,
8877 - unsigned long bp,
8878 - const struct stacktrace_ops *ops,
8879 - void *data,
8880 - unsigned long *end,
8881 - int *graph);
8882 -
8883 -extern unsigned long
8884 -print_context_stack(struct thread_info *tinfo,
8885 - unsigned long *stack, unsigned long bp,
8886 - const struct stacktrace_ops *ops, void *data,
8887 - unsigned long *end, int *graph);
8888 -
8889 -extern unsigned long
8890 -print_context_stack_bp(struct thread_info *tinfo,
8891 - unsigned long *stack, unsigned long bp,
8892 - const struct stacktrace_ops *ops, void *data,
8893 - unsigned long *end, int *graph);
8894 +typedef unsigned long walk_stack_t(struct task_struct *task,
8895 + void *stack_start,
8896 + unsigned long *stack,
8897 + unsigned long bp,
8898 + const struct stacktrace_ops *ops,
8899 + void *data,
8900 + unsigned long *end,
8901 + int *graph);
8902 +
8903 +extern walk_stack_t print_context_stack;
8904 +extern walk_stack_t print_context_stack_bp;
8905
8906 /* Generic stack tracer with callbacks */
8907
8908 @@ -40,7 +32,7 @@ struct stacktrace_ops {
8909 void (*address)(void *data, unsigned long address, int reliable);
8910 /* On negative return stop dumping */
8911 int (*stack)(void *data, char *name);
8912 - walk_stack_t walk_stack;
8913 + walk_stack_t *walk_stack;
8914 };
8915
8916 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
8917 diff -urNp linux-3.0.3/arch/x86/include/asm/system.h linux-3.0.3/arch/x86/include/asm/system.h
8918 --- linux-3.0.3/arch/x86/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
8919 +++ linux-3.0.3/arch/x86/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
8920 @@ -129,7 +129,7 @@ do { \
8921 "call __switch_to\n\t" \
8922 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
8923 __switch_canary \
8924 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
8925 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
8926 "movq %%rax,%%rdi\n\t" \
8927 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
8928 "jnz ret_from_fork\n\t" \
8929 @@ -140,7 +140,7 @@ do { \
8930 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
8931 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
8932 [_tif_fork] "i" (_TIF_FORK), \
8933 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
8934 + [thread_info] "m" (current_tinfo), \
8935 [current_task] "m" (current_task) \
8936 __switch_canary_iparam \
8937 : "memory", "cc" __EXTRA_CLOBBER)
8938 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
8939 {
8940 unsigned long __limit;
8941 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
8942 - return __limit + 1;
8943 + return __limit;
8944 }
8945
8946 static inline void native_clts(void)
8947 @@ -397,12 +397,12 @@ void enable_hlt(void);
8948
8949 void cpu_idle_wait(void);
8950
8951 -extern unsigned long arch_align_stack(unsigned long sp);
8952 +#define arch_align_stack(x) ((x) & ~0xfUL)
8953 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
8954
8955 void default_idle(void);
8956
8957 -void stop_this_cpu(void *dummy);
8958 +void stop_this_cpu(void *dummy) __noreturn;
8959
8960 /*
8961 * Force strict CPU ordering.
8962 diff -urNp linux-3.0.3/arch/x86/include/asm/thread_info.h linux-3.0.3/arch/x86/include/asm/thread_info.h
8963 --- linux-3.0.3/arch/x86/include/asm/thread_info.h 2011-07-21 22:17:23.000000000 -0400
8964 +++ linux-3.0.3/arch/x86/include/asm/thread_info.h 2011-08-23 21:47:55.000000000 -0400
8965 @@ -10,6 +10,7 @@
8966 #include <linux/compiler.h>
8967 #include <asm/page.h>
8968 #include <asm/types.h>
8969 +#include <asm/percpu.h>
8970
8971 /*
8972 * low level task data that entry.S needs immediate access to
8973 @@ -24,7 +25,6 @@ struct exec_domain;
8974 #include <asm/atomic.h>
8975
8976 struct thread_info {
8977 - struct task_struct *task; /* main task structure */
8978 struct exec_domain *exec_domain; /* execution domain */
8979 __u32 flags; /* low level flags */
8980 __u32 status; /* thread synchronous flags */
8981 @@ -34,18 +34,12 @@ struct thread_info {
8982 mm_segment_t addr_limit;
8983 struct restart_block restart_block;
8984 void __user *sysenter_return;
8985 -#ifdef CONFIG_X86_32
8986 - unsigned long previous_esp; /* ESP of the previous stack in
8987 - case of nested (IRQ) stacks
8988 - */
8989 - __u8 supervisor_stack[0];
8990 -#endif
8991 + unsigned long lowest_stack;
8992 int uaccess_err;
8993 };
8994
8995 -#define INIT_THREAD_INFO(tsk) \
8996 +#define INIT_THREAD_INFO \
8997 { \
8998 - .task = &tsk, \
8999 .exec_domain = &default_exec_domain, \
9000 .flags = 0, \
9001 .cpu = 0, \
9002 @@ -56,7 +50,7 @@ struct thread_info {
9003 }, \
9004 }
9005
9006 -#define init_thread_info (init_thread_union.thread_info)
9007 +#define init_thread_info (init_thread_union.stack)
9008 #define init_stack (init_thread_union.stack)
9009
9010 #else /* !__ASSEMBLY__ */
9011 @@ -170,6 +164,23 @@ struct thread_info {
9012 ret; \
9013 })
9014
9015 +#ifdef __ASSEMBLY__
9016 +/* how to get the thread information struct from ASM */
9017 +#define GET_THREAD_INFO(reg) \
9018 + mov PER_CPU_VAR(current_tinfo), reg
9019 +
9020 +/* use this one if reg already contains %esp */
9021 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9022 +#else
9023 +/* how to get the thread information struct from C */
9024 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9025 +
9026 +static __always_inline struct thread_info *current_thread_info(void)
9027 +{
9028 + return percpu_read_stable(current_tinfo);
9029 +}
9030 +#endif
9031 +
9032 #ifdef CONFIG_X86_32
9033
9034 #define STACK_WARN (THREAD_SIZE/8)
9035 @@ -180,35 +191,13 @@ struct thread_info {
9036 */
9037 #ifndef __ASSEMBLY__
9038
9039 -
9040 /* how to get the current stack pointer from C */
9041 register unsigned long current_stack_pointer asm("esp") __used;
9042
9043 -/* how to get the thread information struct from C */
9044 -static inline struct thread_info *current_thread_info(void)
9045 -{
9046 - return (struct thread_info *)
9047 - (current_stack_pointer & ~(THREAD_SIZE - 1));
9048 -}
9049 -
9050 -#else /* !__ASSEMBLY__ */
9051 -
9052 -/* how to get the thread information struct from ASM */
9053 -#define GET_THREAD_INFO(reg) \
9054 - movl $-THREAD_SIZE, reg; \
9055 - andl %esp, reg
9056 -
9057 -/* use this one if reg already contains %esp */
9058 -#define GET_THREAD_INFO_WITH_ESP(reg) \
9059 - andl $-THREAD_SIZE, reg
9060 -
9061 #endif
9062
9063 #else /* X86_32 */
9064
9065 -#include <asm/percpu.h>
9066 -#define KERNEL_STACK_OFFSET (5*8)
9067 -
9068 /*
9069 * macros/functions for gaining access to the thread information structure
9070 * preempt_count needs to be 1 initially, until the scheduler is functional.
9071 @@ -216,21 +205,8 @@ static inline struct thread_info *curren
9072 #ifndef __ASSEMBLY__
9073 DECLARE_PER_CPU(unsigned long, kernel_stack);
9074
9075 -static inline struct thread_info *current_thread_info(void)
9076 -{
9077 - struct thread_info *ti;
9078 - ti = (void *)(percpu_read_stable(kernel_stack) +
9079 - KERNEL_STACK_OFFSET - THREAD_SIZE);
9080 - return ti;
9081 -}
9082 -
9083 -#else /* !__ASSEMBLY__ */
9084 -
9085 -/* how to get the thread information struct from ASM */
9086 -#define GET_THREAD_INFO(reg) \
9087 - movq PER_CPU_VAR(kernel_stack),reg ; \
9088 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9089 -
9090 +/* how to get the current stack pointer from C */
9091 +register unsigned long current_stack_pointer asm("rsp") __used;
9092 #endif
9093
9094 #endif /* !X86_32 */
9095 @@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9096 extern void free_thread_info(struct thread_info *ti);
9097 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9098 #define arch_task_cache_init arch_task_cache_init
9099 +
9100 +#define __HAVE_THREAD_FUNCTIONS
9101 +#define task_thread_info(task) (&(task)->tinfo)
9102 +#define task_stack_page(task) ((task)->stack)
9103 +#define setup_thread_stack(p, org) do {} while (0)
9104 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9105 +
9106 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9107 +extern struct task_struct *alloc_task_struct_node(int node);
9108 +extern void free_task_struct(struct task_struct *);
9109 +
9110 #endif
9111 #endif /* _ASM_X86_THREAD_INFO_H */
9112 diff -urNp linux-3.0.3/arch/x86/include/asm/uaccess_32.h linux-3.0.3/arch/x86/include/asm/uaccess_32.h
9113 --- linux-3.0.3/arch/x86/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
9114 +++ linux-3.0.3/arch/x86/include/asm/uaccess_32.h 2011-08-23 21:48:14.000000000 -0400
9115 @@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_u
9116 static __always_inline unsigned long __must_check
9117 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9118 {
9119 + pax_track_stack();
9120 +
9121 + if ((long)n < 0)
9122 + return n;
9123 +
9124 if (__builtin_constant_p(n)) {
9125 unsigned long ret;
9126
9127 @@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to,
9128 return ret;
9129 }
9130 }
9131 + if (!__builtin_constant_p(n))
9132 + check_object_size(from, n, true);
9133 return __copy_to_user_ll(to, from, n);
9134 }
9135
9136 @@ -82,12 +89,16 @@ static __always_inline unsigned long __m
9137 __copy_to_user(void __user *to, const void *from, unsigned long n)
9138 {
9139 might_fault();
9140 +
9141 return __copy_to_user_inatomic(to, from, n);
9142 }
9143
9144 static __always_inline unsigned long
9145 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9146 {
9147 + if ((long)n < 0)
9148 + return n;
9149 +
9150 /* Avoid zeroing the tail if the copy fails..
9151 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9152 * but as the zeroing behaviour is only significant when n is not
9153 @@ -137,6 +148,12 @@ static __always_inline unsigned long
9154 __copy_from_user(void *to, const void __user *from, unsigned long n)
9155 {
9156 might_fault();
9157 +
9158 + pax_track_stack();
9159 +
9160 + if ((long)n < 0)
9161 + return n;
9162 +
9163 if (__builtin_constant_p(n)) {
9164 unsigned long ret;
9165
9166 @@ -152,6 +169,8 @@ __copy_from_user(void *to, const void __
9167 return ret;
9168 }
9169 }
9170 + if (!__builtin_constant_p(n))
9171 + check_object_size(to, n, false);
9172 return __copy_from_user_ll(to, from, n);
9173 }
9174
9175 @@ -159,6 +178,10 @@ static __always_inline unsigned long __c
9176 const void __user *from, unsigned long n)
9177 {
9178 might_fault();
9179 +
9180 + if ((long)n < 0)
9181 + return n;
9182 +
9183 if (__builtin_constant_p(n)) {
9184 unsigned long ret;
9185
9186 @@ -181,15 +204,19 @@ static __always_inline unsigned long
9187 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9188 unsigned long n)
9189 {
9190 - return __copy_from_user_ll_nocache_nozero(to, from, n);
9191 -}
9192 + if ((long)n < 0)
9193 + return n;
9194
9195 -unsigned long __must_check copy_to_user(void __user *to,
9196 - const void *from, unsigned long n);
9197 -unsigned long __must_check _copy_from_user(void *to,
9198 - const void __user *from,
9199 - unsigned long n);
9200 + return __copy_from_user_ll_nocache_nozero(to, from, n);
9201 +}
9202
9203 +extern void copy_to_user_overflow(void)
9204 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9205 + __compiletime_error("copy_to_user() buffer size is not provably correct")
9206 +#else
9207 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
9208 +#endif
9209 +;
9210
9211 extern void copy_from_user_overflow(void)
9212 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9213 @@ -199,17 +226,61 @@ extern void copy_from_user_overflow(void
9214 #endif
9215 ;
9216
9217 -static inline unsigned long __must_check copy_from_user(void *to,
9218 - const void __user *from,
9219 - unsigned long n)
9220 +/**
9221 + * copy_to_user: - Copy a block of data into user space.
9222 + * @to: Destination address, in user space.
9223 + * @from: Source address, in kernel space.
9224 + * @n: Number of bytes to copy.
9225 + *
9226 + * Context: User context only. This function may sleep.
9227 + *
9228 + * Copy data from kernel space to user space.
9229 + *
9230 + * Returns number of bytes that could not be copied.
9231 + * On success, this will be zero.
9232 + */
9233 +static inline unsigned long __must_check
9234 +copy_to_user(void __user *to, const void *from, unsigned long n)
9235 +{
9236 + int sz = __compiletime_object_size(from);
9237 +
9238 + if (unlikely(sz != -1 && sz < n))
9239 + copy_to_user_overflow();
9240 + else if (access_ok(VERIFY_WRITE, to, n))
9241 + n = __copy_to_user(to, from, n);
9242 + return n;
9243 +}
9244 +
9245 +/**
9246 + * copy_from_user: - Copy a block of data from user space.
9247 + * @to: Destination address, in kernel space.
9248 + * @from: Source address, in user space.
9249 + * @n: Number of bytes to copy.
9250 + *
9251 + * Context: User context only. This function may sleep.
9252 + *
9253 + * Copy data from user space to kernel space.
9254 + *
9255 + * Returns number of bytes that could not be copied.
9256 + * On success, this will be zero.
9257 + *
9258 + * If some data could not be copied, this function will pad the copied
9259 + * data to the requested size using zero bytes.
9260 + */
9261 +static inline unsigned long __must_check
9262 +copy_from_user(void *to, const void __user *from, unsigned long n)
9263 {
9264 int sz = __compiletime_object_size(to);
9265
9266 - if (likely(sz == -1 || sz >= n))
9267 - n = _copy_from_user(to, from, n);
9268 - else
9269 + if (unlikely(sz != -1 && sz < n))
9270 copy_from_user_overflow();
9271 -
9272 + else if (access_ok(VERIFY_READ, from, n))
9273 + n = __copy_from_user(to, from, n);
9274 + else if ((long)n > 0) {
9275 + if (!__builtin_constant_p(n))
9276 + check_object_size(to, n, false);
9277 + memset(to, 0, n);
9278 + }
9279 return n;
9280 }
9281
9282 diff -urNp linux-3.0.3/arch/x86/include/asm/uaccess_64.h linux-3.0.3/arch/x86/include/asm/uaccess_64.h
9283 --- linux-3.0.3/arch/x86/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
9284 +++ linux-3.0.3/arch/x86/include/asm/uaccess_64.h 2011-08-23 21:48:14.000000000 -0400
9285 @@ -10,6 +10,9 @@
9286 #include <asm/alternative.h>
9287 #include <asm/cpufeature.h>
9288 #include <asm/page.h>
9289 +#include <asm/pgtable.h>
9290 +
9291 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
9292
9293 /*
9294 * Copy To/From Userspace
9295 @@ -36,26 +39,26 @@ copy_user_generic(void *to, const void *
9296 return ret;
9297 }
9298
9299 -__must_check unsigned long
9300 -_copy_to_user(void __user *to, const void *from, unsigned len);
9301 -__must_check unsigned long
9302 -_copy_from_user(void *to, const void __user *from, unsigned len);
9303 +static __always_inline __must_check unsigned long
9304 +__copy_to_user(void __user *to, const void *from, unsigned len);
9305 +static __always_inline __must_check unsigned long
9306 +__copy_from_user(void *to, const void __user *from, unsigned len);
9307 __must_check unsigned long
9308 copy_in_user(void __user *to, const void __user *from, unsigned len);
9309
9310 static inline unsigned long __must_check copy_from_user(void *to,
9311 const void __user *from,
9312 - unsigned long n)
9313 + unsigned n)
9314 {
9315 - int sz = __compiletime_object_size(to);
9316 -
9317 might_fault();
9318 - if (likely(sz == -1 || sz >= n))
9319 - n = _copy_from_user(to, from, n);
9320 -#ifdef CONFIG_DEBUG_VM
9321 - else
9322 - WARN(1, "Buffer overflow detected!\n");
9323 -#endif
9324 +
9325 + if (access_ok(VERIFY_READ, from, n))
9326 + n = __copy_from_user(to, from, n);
9327 + else if ((int)n > 0) {
9328 + if (!__builtin_constant_p(n))
9329 + check_object_size(to, n, false);
9330 + memset(to, 0, n);
9331 + }
9332 return n;
9333 }
9334
9335 @@ -64,110 +67,198 @@ int copy_to_user(void __user *dst, const
9336 {
9337 might_fault();
9338
9339 - return _copy_to_user(dst, src, size);
9340 + if (access_ok(VERIFY_WRITE, dst, size))
9341 + size = __copy_to_user(dst, src, size);
9342 + return size;
9343 }
9344
9345 static __always_inline __must_check
9346 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
9347 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9348 {
9349 - int ret = 0;
9350 + int sz = __compiletime_object_size(dst);
9351 + unsigned ret = 0;
9352
9353 might_fault();
9354 - if (!__builtin_constant_p(size))
9355 - return copy_user_generic(dst, (__force void *)src, size);
9356 +
9357 + pax_track_stack();
9358 +
9359 + if ((int)size < 0)
9360 + return size;
9361 +
9362 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9363 + if (!__access_ok(VERIFY_READ, src, size))
9364 + return size;
9365 +#endif
9366 +
9367 + if (unlikely(sz != -1 && sz < size)) {
9368 +#ifdef CONFIG_DEBUG_VM
9369 + WARN(1, "Buffer overflow detected!\n");
9370 +#endif
9371 + return size;
9372 + }
9373 +
9374 + if (!__builtin_constant_p(size)) {
9375 + check_object_size(dst, size, false);
9376 +
9377 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9378 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9379 + src += PAX_USER_SHADOW_BASE;
9380 +#endif
9381 +
9382 + return copy_user_generic(dst, (__force const void *)src, size);
9383 + }
9384 switch (size) {
9385 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9386 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
9387 ret, "b", "b", "=q", 1);
9388 return ret;
9389 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
9390 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
9391 ret, "w", "w", "=r", 2);
9392 return ret;
9393 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
9394 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
9395 ret, "l", "k", "=r", 4);
9396 return ret;
9397 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
9398 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9399 ret, "q", "", "=r", 8);
9400 return ret;
9401 case 10:
9402 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9403 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9404 ret, "q", "", "=r", 10);
9405 if (unlikely(ret))
9406 return ret;
9407 __get_user_asm(*(u16 *)(8 + (char *)dst),
9408 - (u16 __user *)(8 + (char __user *)src),
9409 + (const u16 __user *)(8 + (const char __user *)src),
9410 ret, "w", "w", "=r", 2);
9411 return ret;
9412 case 16:
9413 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9414 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9415 ret, "q", "", "=r", 16);
9416 if (unlikely(ret))
9417 return ret;
9418 __get_user_asm(*(u64 *)(8 + (char *)dst),
9419 - (u64 __user *)(8 + (char __user *)src),
9420 + (const u64 __user *)(8 + (const char __user *)src),
9421 ret, "q", "", "=r", 8);
9422 return ret;
9423 default:
9424 - return copy_user_generic(dst, (__force void *)src, size);
9425 +
9426 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9427 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9428 + src += PAX_USER_SHADOW_BASE;
9429 +#endif
9430 +
9431 + return copy_user_generic(dst, (__force const void *)src, size);
9432 }
9433 }
9434
9435 static __always_inline __must_check
9436 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
9437 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9438 {
9439 - int ret = 0;
9440 + int sz = __compiletime_object_size(src);
9441 + unsigned ret = 0;
9442
9443 might_fault();
9444 - if (!__builtin_constant_p(size))
9445 +
9446 + pax_track_stack();
9447 +
9448 + if ((int)size < 0)
9449 + return size;
9450 +
9451 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9452 + if (!__access_ok(VERIFY_WRITE, dst, size))
9453 + return size;
9454 +#endif
9455 +
9456 + if (unlikely(sz != -1 && sz < size)) {
9457 +#ifdef CONFIG_DEBUG_VM
9458 + WARN(1, "Buffer overflow detected!\n");
9459 +#endif
9460 + return size;
9461 + }
9462 +
9463 + if (!__builtin_constant_p(size)) {
9464 + check_object_size(src, size, true);
9465 +
9466 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9467 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9468 + dst += PAX_USER_SHADOW_BASE;
9469 +#endif
9470 +
9471 return copy_user_generic((__force void *)dst, src, size);
9472 + }
9473 switch (size) {
9474 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9475 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
9476 ret, "b", "b", "iq", 1);
9477 return ret;
9478 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
9479 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
9480 ret, "w", "w", "ir", 2);
9481 return ret;
9482 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
9483 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
9484 ret, "l", "k", "ir", 4);
9485 return ret;
9486 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
9487 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9488 ret, "q", "", "er", 8);
9489 return ret;
9490 case 10:
9491 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9492 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9493 ret, "q", "", "er", 10);
9494 if (unlikely(ret))
9495 return ret;
9496 asm("":::"memory");
9497 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
9498 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
9499 ret, "w", "w", "ir", 2);
9500 return ret;
9501 case 16:
9502 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9503 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9504 ret, "q", "", "er", 16);
9505 if (unlikely(ret))
9506 return ret;
9507 asm("":::"memory");
9508 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
9509 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
9510 ret, "q", "", "er", 8);
9511 return ret;
9512 default:
9513 +
9514 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9515 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9516 + dst += PAX_USER_SHADOW_BASE;
9517 +#endif
9518 +
9519 return copy_user_generic((__force void *)dst, src, size);
9520 }
9521 }
9522
9523 static __always_inline __must_check
9524 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9525 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9526 {
9527 - int ret = 0;
9528 + unsigned ret = 0;
9529
9530 might_fault();
9531 - if (!__builtin_constant_p(size))
9532 +
9533 + if ((int)size < 0)
9534 + return size;
9535 +
9536 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9537 + if (!__access_ok(VERIFY_READ, src, size))
9538 + return size;
9539 + if (!__access_ok(VERIFY_WRITE, dst, size))
9540 + return size;
9541 +#endif
9542 +
9543 + if (!__builtin_constant_p(size)) {
9544 +
9545 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9546 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9547 + src += PAX_USER_SHADOW_BASE;
9548 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9549 + dst += PAX_USER_SHADOW_BASE;
9550 +#endif
9551 +
9552 return copy_user_generic((__force void *)dst,
9553 - (__force void *)src, size);
9554 + (__force const void *)src, size);
9555 + }
9556 switch (size) {
9557 case 1: {
9558 u8 tmp;
9559 - __get_user_asm(tmp, (u8 __user *)src,
9560 + __get_user_asm(tmp, (const u8 __user *)src,
9561 ret, "b", "b", "=q", 1);
9562 if (likely(!ret))
9563 __put_user_asm(tmp, (u8 __user *)dst,
9564 @@ -176,7 +267,7 @@ int __copy_in_user(void __user *dst, con
9565 }
9566 case 2: {
9567 u16 tmp;
9568 - __get_user_asm(tmp, (u16 __user *)src,
9569 + __get_user_asm(tmp, (const u16 __user *)src,
9570 ret, "w", "w", "=r", 2);
9571 if (likely(!ret))
9572 __put_user_asm(tmp, (u16 __user *)dst,
9573 @@ -186,7 +277,7 @@ int __copy_in_user(void __user *dst, con
9574
9575 case 4: {
9576 u32 tmp;
9577 - __get_user_asm(tmp, (u32 __user *)src,
9578 + __get_user_asm(tmp, (const u32 __user *)src,
9579 ret, "l", "k", "=r", 4);
9580 if (likely(!ret))
9581 __put_user_asm(tmp, (u32 __user *)dst,
9582 @@ -195,7 +286,7 @@ int __copy_in_user(void __user *dst, con
9583 }
9584 case 8: {
9585 u64 tmp;
9586 - __get_user_asm(tmp, (u64 __user *)src,
9587 + __get_user_asm(tmp, (const u64 __user *)src,
9588 ret, "q", "", "=r", 8);
9589 if (likely(!ret))
9590 __put_user_asm(tmp, (u64 __user *)dst,
9591 @@ -203,8 +294,16 @@ int __copy_in_user(void __user *dst, con
9592 return ret;
9593 }
9594 default:
9595 +
9596 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9597 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9598 + src += PAX_USER_SHADOW_BASE;
9599 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9600 + dst += PAX_USER_SHADOW_BASE;
9601 +#endif
9602 +
9603 return copy_user_generic((__force void *)dst,
9604 - (__force void *)src, size);
9605 + (__force const void *)src, size);
9606 }
9607 }
9608
9609 @@ -221,33 +320,72 @@ __must_check unsigned long __clear_user(
9610 static __must_check __always_inline int
9611 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9612 {
9613 + pax_track_stack();
9614 +
9615 + if ((int)size < 0)
9616 + return size;
9617 +
9618 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9619 + if (!__access_ok(VERIFY_READ, src, size))
9620 + return size;
9621 +
9622 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9623 + src += PAX_USER_SHADOW_BASE;
9624 +#endif
9625 +
9626 return copy_user_generic(dst, (__force const void *)src, size);
9627 }
9628
9629 -static __must_check __always_inline int
9630 +static __must_check __always_inline unsigned long
9631 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9632 {
9633 + if ((int)size < 0)
9634 + return size;
9635 +
9636 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9637 + if (!__access_ok(VERIFY_WRITE, dst, size))
9638 + return size;
9639 +
9640 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9641 + dst += PAX_USER_SHADOW_BASE;
9642 +#endif
9643 +
9644 return copy_user_generic((__force void *)dst, src, size);
9645 }
9646
9647 -extern long __copy_user_nocache(void *dst, const void __user *src,
9648 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9649 unsigned size, int zerorest);
9650
9651 -static inline int
9652 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9653 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9654 {
9655 might_sleep();
9656 +
9657 + if ((int)size < 0)
9658 + return size;
9659 +
9660 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9661 + if (!__access_ok(VERIFY_READ, src, size))
9662 + return size;
9663 +#endif
9664 +
9665 return __copy_user_nocache(dst, src, size, 1);
9666 }
9667
9668 -static inline int
9669 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9670 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9671 unsigned size)
9672 {
9673 + if ((int)size < 0)
9674 + return size;
9675 +
9676 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9677 + if (!__access_ok(VERIFY_READ, src, size))
9678 + return size;
9679 +#endif
9680 +
9681 return __copy_user_nocache(dst, src, size, 0);
9682 }
9683
9684 -unsigned long
9685 +extern unsigned long
9686 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9687
9688 #endif /* _ASM_X86_UACCESS_64_H */
9689 diff -urNp linux-3.0.3/arch/x86/include/asm/uaccess.h linux-3.0.3/arch/x86/include/asm/uaccess.h
9690 --- linux-3.0.3/arch/x86/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
9691 +++ linux-3.0.3/arch/x86/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
9692 @@ -7,12 +7,15 @@
9693 #include <linux/compiler.h>
9694 #include <linux/thread_info.h>
9695 #include <linux/string.h>
9696 +#include <linux/sched.h>
9697 #include <asm/asm.h>
9698 #include <asm/page.h>
9699
9700 #define VERIFY_READ 0
9701 #define VERIFY_WRITE 1
9702
9703 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
9704 +
9705 /*
9706 * The fs value determines whether argument validity checking should be
9707 * performed or not. If get_fs() == USER_DS, checking is performed, with
9708 @@ -28,7 +31,12 @@
9709
9710 #define get_ds() (KERNEL_DS)
9711 #define get_fs() (current_thread_info()->addr_limit)
9712 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9713 +void __set_fs(mm_segment_t x);
9714 +void set_fs(mm_segment_t x);
9715 +#else
9716 #define set_fs(x) (current_thread_info()->addr_limit = (x))
9717 +#endif
9718
9719 #define segment_eq(a, b) ((a).seg == (b).seg)
9720
9721 @@ -76,7 +84,33 @@
9722 * checks that the pointer is in the user space range - after calling
9723 * this function, memory access functions may still return -EFAULT.
9724 */
9725 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9726 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9727 +#define access_ok(type, addr, size) \
9728 +({ \
9729 + long __size = size; \
9730 + unsigned long __addr = (unsigned long)addr; \
9731 + unsigned long __addr_ao = __addr & PAGE_MASK; \
9732 + unsigned long __end_ao = __addr + __size - 1; \
9733 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
9734 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
9735 + while(__addr_ao <= __end_ao) { \
9736 + char __c_ao; \
9737 + __addr_ao += PAGE_SIZE; \
9738 + if (__size > PAGE_SIZE) \
9739 + cond_resched(); \
9740 + if (__get_user(__c_ao, (char __user *)__addr)) \
9741 + break; \
9742 + if (type != VERIFY_WRITE) { \
9743 + __addr = __addr_ao; \
9744 + continue; \
9745 + } \
9746 + if (__put_user(__c_ao, (char __user *)__addr)) \
9747 + break; \
9748 + __addr = __addr_ao; \
9749 + } \
9750 + } \
9751 + __ret_ao; \
9752 +})
9753
9754 /*
9755 * The exception table consists of pairs of addresses: the first is the
9756 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
9757 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
9758 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
9759
9760 -
9761 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9762 +#define __copyuser_seg "gs;"
9763 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
9764 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
9765 +#else
9766 +#define __copyuser_seg
9767 +#define __COPYUSER_SET_ES
9768 +#define __COPYUSER_RESTORE_ES
9769 +#endif
9770
9771 #ifdef CONFIG_X86_32
9772 #define __put_user_asm_u64(x, addr, err, errret) \
9773 - asm volatile("1: movl %%eax,0(%2)\n" \
9774 - "2: movl %%edx,4(%2)\n" \
9775 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
9776 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
9777 "3:\n" \
9778 ".section .fixup,\"ax\"\n" \
9779 "4: movl %3,%0\n" \
9780 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
9781 : "A" (x), "r" (addr), "i" (errret), "0" (err))
9782
9783 #define __put_user_asm_ex_u64(x, addr) \
9784 - asm volatile("1: movl %%eax,0(%1)\n" \
9785 - "2: movl %%edx,4(%1)\n" \
9786 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
9787 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
9788 "3:\n" \
9789 _ASM_EXTABLE(1b, 2b - 1b) \
9790 _ASM_EXTABLE(2b, 3b - 2b) \
9791 @@ -373,7 +415,7 @@ do { \
9792 } while (0)
9793
9794 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9795 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
9796 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
9797 "2:\n" \
9798 ".section .fixup,\"ax\"\n" \
9799 "3: mov %3,%0\n" \
9800 @@ -381,7 +423,7 @@ do { \
9801 " jmp 2b\n" \
9802 ".previous\n" \
9803 _ASM_EXTABLE(1b, 3b) \
9804 - : "=r" (err), ltype(x) \
9805 + : "=r" (err), ltype (x) \
9806 : "m" (__m(addr)), "i" (errret), "0" (err))
9807
9808 #define __get_user_size_ex(x, ptr, size) \
9809 @@ -406,7 +448,7 @@ do { \
9810 } while (0)
9811
9812 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
9813 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
9814 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
9815 "2:\n" \
9816 _ASM_EXTABLE(1b, 2b - 1b) \
9817 : ltype(x) : "m" (__m(addr)))
9818 @@ -423,13 +465,24 @@ do { \
9819 int __gu_err; \
9820 unsigned long __gu_val; \
9821 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
9822 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
9823 + (x) = (__typeof__(*(ptr)))__gu_val; \
9824 __gu_err; \
9825 })
9826
9827 /* FIXME: this hack is definitely wrong -AK */
9828 struct __large_struct { unsigned long buf[100]; };
9829 -#define __m(x) (*(struct __large_struct __user *)(x))
9830 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9831 +#define ____m(x) \
9832 +({ \
9833 + unsigned long ____x = (unsigned long)(x); \
9834 + if (____x < PAX_USER_SHADOW_BASE) \
9835 + ____x += PAX_USER_SHADOW_BASE; \
9836 + (void __user *)____x; \
9837 +})
9838 +#else
9839 +#define ____m(x) (x)
9840 +#endif
9841 +#define __m(x) (*(struct __large_struct __user *)____m(x))
9842
9843 /*
9844 * Tell gcc we read from memory instead of writing: this is because
9845 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long bu
9846 * aliasing issues.
9847 */
9848 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9849 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
9850 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
9851 "2:\n" \
9852 ".section .fixup,\"ax\"\n" \
9853 "3: mov %3,%0\n" \
9854 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long bu
9855 ".previous\n" \
9856 _ASM_EXTABLE(1b, 3b) \
9857 : "=r"(err) \
9858 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
9859 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
9860
9861 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
9862 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
9863 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
9864 "2:\n" \
9865 _ASM_EXTABLE(1b, 2b - 1b) \
9866 : : ltype(x), "m" (__m(addr)))
9867 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long bu
9868 * On error, the variable @x is set to zero.
9869 */
9870
9871 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9872 +#define __get_user(x, ptr) get_user((x), (ptr))
9873 +#else
9874 #define __get_user(x, ptr) \
9875 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
9876 +#endif
9877
9878 /**
9879 * __put_user: - Write a simple value into user space, with less checking.
9880 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long bu
9881 * Returns zero on success, or -EFAULT on error.
9882 */
9883
9884 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9885 +#define __put_user(x, ptr) put_user((x), (ptr))
9886 +#else
9887 #define __put_user(x, ptr) \
9888 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
9889 +#endif
9890
9891 #define __get_user_unaligned __get_user
9892 #define __put_user_unaligned __put_user
9893 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long bu
9894 #define get_user_ex(x, ptr) do { \
9895 unsigned long __gue_val; \
9896 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
9897 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
9898 + (x) = (__typeof__(*(ptr)))__gue_val; \
9899 } while (0)
9900
9901 #ifdef CONFIG_X86_WP_WORKS_OK
9902 diff -urNp linux-3.0.3/arch/x86/include/asm/vgtod.h linux-3.0.3/arch/x86/include/asm/vgtod.h
9903 --- linux-3.0.3/arch/x86/include/asm/vgtod.h 2011-07-21 22:17:23.000000000 -0400
9904 +++ linux-3.0.3/arch/x86/include/asm/vgtod.h 2011-08-23 21:47:55.000000000 -0400
9905 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
9906 int sysctl_enabled;
9907 struct timezone sys_tz;
9908 struct { /* extract of a clocksource struct */
9909 + char name[8];
9910 cycle_t (*vread)(void);
9911 cycle_t cycle_last;
9912 cycle_t mask;
9913 diff -urNp linux-3.0.3/arch/x86/include/asm/x86_init.h linux-3.0.3/arch/x86/include/asm/x86_init.h
9914 --- linux-3.0.3/arch/x86/include/asm/x86_init.h 2011-07-21 22:17:23.000000000 -0400
9915 +++ linux-3.0.3/arch/x86/include/asm/x86_init.h 2011-08-23 21:47:55.000000000 -0400
9916 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
9917 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
9918 void (*find_smp_config)(void);
9919 void (*get_smp_config)(unsigned int early);
9920 -};
9921 +} __no_const;
9922
9923 /**
9924 * struct x86_init_resources - platform specific resource related ops
9925 @@ -42,7 +42,7 @@ struct x86_init_resources {
9926 void (*probe_roms)(void);
9927 void (*reserve_resources)(void);
9928 char *(*memory_setup)(void);
9929 -};
9930 +} __no_const;
9931
9932 /**
9933 * struct x86_init_irqs - platform specific interrupt setup
9934 @@ -55,7 +55,7 @@ struct x86_init_irqs {
9935 void (*pre_vector_init)(void);
9936 void (*intr_init)(void);
9937 void (*trap_init)(void);
9938 -};
9939 +} __no_const;
9940
9941 /**
9942 * struct x86_init_oem - oem platform specific customizing functions
9943 @@ -65,7 +65,7 @@ struct x86_init_irqs {
9944 struct x86_init_oem {
9945 void (*arch_setup)(void);
9946 void (*banner)(void);
9947 -};
9948 +} __no_const;
9949
9950 /**
9951 * struct x86_init_mapping - platform specific initial kernel pagetable setup
9952 @@ -76,7 +76,7 @@ struct x86_init_oem {
9953 */
9954 struct x86_init_mapping {
9955 void (*pagetable_reserve)(u64 start, u64 end);
9956 -};
9957 +} __no_const;
9958
9959 /**
9960 * struct x86_init_paging - platform specific paging functions
9961 @@ -86,7 +86,7 @@ struct x86_init_mapping {
9962 struct x86_init_paging {
9963 void (*pagetable_setup_start)(pgd_t *base);
9964 void (*pagetable_setup_done)(pgd_t *base);
9965 -};
9966 +} __no_const;
9967
9968 /**
9969 * struct x86_init_timers - platform specific timer setup
9970 @@ -101,7 +101,7 @@ struct x86_init_timers {
9971 void (*tsc_pre_init)(void);
9972 void (*timer_init)(void);
9973 void (*wallclock_init)(void);
9974 -};
9975 +} __no_const;
9976
9977 /**
9978 * struct x86_init_iommu - platform specific iommu setup
9979 @@ -109,7 +109,7 @@ struct x86_init_timers {
9980 */
9981 struct x86_init_iommu {
9982 int (*iommu_init)(void);
9983 -};
9984 +} __no_const;
9985
9986 /**
9987 * struct x86_init_pci - platform specific pci init functions
9988 @@ -123,7 +123,7 @@ struct x86_init_pci {
9989 int (*init)(void);
9990 void (*init_irq)(void);
9991 void (*fixup_irqs)(void);
9992 -};
9993 +} __no_const;
9994
9995 /**
9996 * struct x86_init_ops - functions for platform specific setup
9997 @@ -139,7 +139,7 @@ struct x86_init_ops {
9998 struct x86_init_timers timers;
9999 struct x86_init_iommu iommu;
10000 struct x86_init_pci pci;
10001 -};
10002 +} __no_const;
10003
10004 /**
10005 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10006 @@ -147,7 +147,7 @@ struct x86_init_ops {
10007 */
10008 struct x86_cpuinit_ops {
10009 void (*setup_percpu_clockev)(void);
10010 -};
10011 +} __no_const;
10012
10013 /**
10014 * struct x86_platform_ops - platform specific runtime functions
10015 @@ -166,7 +166,7 @@ struct x86_platform_ops {
10016 bool (*is_untracked_pat_range)(u64 start, u64 end);
10017 void (*nmi_init)(void);
10018 int (*i8042_detect)(void);
10019 -};
10020 +} __no_const;
10021
10022 struct pci_dev;
10023
10024 @@ -174,7 +174,7 @@ struct x86_msi_ops {
10025 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10026 void (*teardown_msi_irq)(unsigned int irq);
10027 void (*teardown_msi_irqs)(struct pci_dev *dev);
10028 -};
10029 +} __no_const;
10030
10031 extern struct x86_init_ops x86_init;
10032 extern struct x86_cpuinit_ops x86_cpuinit;
10033 diff -urNp linux-3.0.3/arch/x86/include/asm/xsave.h linux-3.0.3/arch/x86/include/asm/xsave.h
10034 --- linux-3.0.3/arch/x86/include/asm/xsave.h 2011-07-21 22:17:23.000000000 -0400
10035 +++ linux-3.0.3/arch/x86/include/asm/xsave.h 2011-08-23 21:47:55.000000000 -0400
10036 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10037 {
10038 int err;
10039
10040 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10041 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10042 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10043 +#endif
10044 +
10045 /*
10046 * Clear the xsave header first, so that reserved fields are
10047 * initialized to zero.
10048 @@ -100,6 +105,11 @@ static inline int xrestore_user(struct x
10049 u32 lmask = mask;
10050 u32 hmask = mask >> 32;
10051
10052 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10053 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10054 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10055 +#endif
10056 +
10057 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10058 "2:\n"
10059 ".section .fixup,\"ax\"\n"
10060 diff -urNp linux-3.0.3/arch/x86/Kconfig linux-3.0.3/arch/x86/Kconfig
10061 --- linux-3.0.3/arch/x86/Kconfig 2011-07-21 22:17:23.000000000 -0400
10062 +++ linux-3.0.3/arch/x86/Kconfig 2011-08-23 21:48:14.000000000 -0400
10063 @@ -229,7 +229,7 @@ config X86_HT
10064
10065 config X86_32_LAZY_GS
10066 def_bool y
10067 - depends on X86_32 && !CC_STACKPROTECTOR
10068 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10069
10070 config ARCH_HWEIGHT_CFLAGS
10071 string
10072 @@ -1018,7 +1018,7 @@ choice
10073
10074 config NOHIGHMEM
10075 bool "off"
10076 - depends on !X86_NUMAQ
10077 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10078 ---help---
10079 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10080 However, the address space of 32-bit x86 processors is only 4
10081 @@ -1055,7 +1055,7 @@ config NOHIGHMEM
10082
10083 config HIGHMEM4G
10084 bool "4GB"
10085 - depends on !X86_NUMAQ
10086 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10087 ---help---
10088 Select this if you have a 32-bit processor and between 1 and 4
10089 gigabytes of physical RAM.
10090 @@ -1109,7 +1109,7 @@ config PAGE_OFFSET
10091 hex
10092 default 0xB0000000 if VMSPLIT_3G_OPT
10093 default 0x80000000 if VMSPLIT_2G
10094 - default 0x78000000 if VMSPLIT_2G_OPT
10095 + default 0x70000000 if VMSPLIT_2G_OPT
10096 default 0x40000000 if VMSPLIT_1G
10097 default 0xC0000000
10098 depends on X86_32
10099 @@ -1453,7 +1453,7 @@ config ARCH_USES_PG_UNCACHED
10100
10101 config EFI
10102 bool "EFI runtime service support"
10103 - depends on ACPI
10104 + depends on ACPI && !PAX_KERNEXEC
10105 ---help---
10106 This enables the kernel to use EFI runtime services that are
10107 available (such as the EFI variable services).
10108 @@ -1483,6 +1483,7 @@ config SECCOMP
10109
10110 config CC_STACKPROTECTOR
10111 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10112 + depends on X86_64 || !PAX_MEMORY_UDEREF
10113 ---help---
10114 This option turns on the -fstack-protector GCC feature. This
10115 feature puts, at the beginning of functions, a canary value on
10116 @@ -1540,6 +1541,7 @@ config KEXEC_JUMP
10117 config PHYSICAL_START
10118 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10119 default "0x1000000"
10120 + range 0x400000 0x40000000
10121 ---help---
10122 This gives the physical address where the kernel is loaded.
10123
10124 @@ -1603,6 +1605,7 @@ config X86_NEED_RELOCS
10125 config PHYSICAL_ALIGN
10126 hex "Alignment value to which kernel should be aligned" if X86_32
10127 default "0x1000000"
10128 + range 0x400000 0x1000000 if PAX_KERNEXEC
10129 range 0x2000 0x1000000
10130 ---help---
10131 This value puts the alignment restrictions on physical address
10132 @@ -1634,9 +1637,10 @@ config HOTPLUG_CPU
10133 Say N if you want to disable CPU hotplug.
10134
10135 config COMPAT_VDSO
10136 - def_bool y
10137 + def_bool n
10138 prompt "Compat VDSO support"
10139 depends on X86_32 || IA32_EMULATION
10140 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10141 ---help---
10142 Map the 32-bit VDSO to the predictable old-style address too.
10143
10144 diff -urNp linux-3.0.3/arch/x86/Kconfig.cpu linux-3.0.3/arch/x86/Kconfig.cpu
10145 --- linux-3.0.3/arch/x86/Kconfig.cpu 2011-07-21 22:17:23.000000000 -0400
10146 +++ linux-3.0.3/arch/x86/Kconfig.cpu 2011-08-23 21:47:55.000000000 -0400
10147 @@ -338,7 +338,7 @@ config X86_PPRO_FENCE
10148
10149 config X86_F00F_BUG
10150 def_bool y
10151 - depends on M586MMX || M586TSC || M586 || M486 || M386
10152 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
10153
10154 config X86_INVD_BUG
10155 def_bool y
10156 @@ -362,7 +362,7 @@ config X86_POPAD_OK
10157
10158 config X86_ALIGNMENT_16
10159 def_bool y
10160 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10161 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10162
10163 config X86_INTEL_USERCOPY
10164 def_bool y
10165 @@ -408,7 +408,7 @@ config X86_CMPXCHG64
10166 # generates cmov.
10167 config X86_CMOV
10168 def_bool y
10169 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10170 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10171
10172 config X86_MINIMUM_CPU_FAMILY
10173 int
10174 diff -urNp linux-3.0.3/arch/x86/Kconfig.debug linux-3.0.3/arch/x86/Kconfig.debug
10175 --- linux-3.0.3/arch/x86/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
10176 +++ linux-3.0.3/arch/x86/Kconfig.debug 2011-08-23 21:47:55.000000000 -0400
10177 @@ -81,7 +81,7 @@ config X86_PTDUMP
10178 config DEBUG_RODATA
10179 bool "Write protect kernel read-only data structures"
10180 default y
10181 - depends on DEBUG_KERNEL
10182 + depends on DEBUG_KERNEL && BROKEN
10183 ---help---
10184 Mark the kernel read-only data as write-protected in the pagetables,
10185 in order to catch accidental (and incorrect) writes to such const
10186 @@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
10187
10188 config DEBUG_SET_MODULE_RONX
10189 bool "Set loadable kernel module data as NX and text as RO"
10190 - depends on MODULES
10191 + depends on MODULES && BROKEN
10192 ---help---
10193 This option helps catch unintended modifications to loadable
10194 kernel module's text and read-only data. It also prevents execution
10195 diff -urNp linux-3.0.3/arch/x86/kernel/acpi/realmode/Makefile linux-3.0.3/arch/x86/kernel/acpi/realmode/Makefile
10196 --- linux-3.0.3/arch/x86/kernel/acpi/realmode/Makefile 2011-07-21 22:17:23.000000000 -0400
10197 +++ linux-3.0.3/arch/x86/kernel/acpi/realmode/Makefile 2011-08-23 21:47:55.000000000 -0400
10198 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
10199 $(call cc-option, -fno-stack-protector) \
10200 $(call cc-option, -mpreferred-stack-boundary=2)
10201 KBUILD_CFLAGS += $(call cc-option, -m32)
10202 +ifdef CONSTIFY_PLUGIN
10203 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10204 +endif
10205 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10206 GCOV_PROFILE := n
10207
10208 diff -urNp linux-3.0.3/arch/x86/kernel/acpi/realmode/wakeup.S linux-3.0.3/arch/x86/kernel/acpi/realmode/wakeup.S
10209 --- linux-3.0.3/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-21 22:17:23.000000000 -0400
10210 +++ linux-3.0.3/arch/x86/kernel/acpi/realmode/wakeup.S 2011-08-23 21:48:14.000000000 -0400
10211 @@ -108,6 +108,9 @@ wakeup_code:
10212 /* Do any other stuff... */
10213
10214 #ifndef CONFIG_64BIT
10215 + /* Recheck NX bit overrides (64bit path does this in trampoline */
10216 + call verify_cpu
10217 +
10218 /* This could also be done in C code... */
10219 movl pmode_cr3, %eax
10220 movl %eax, %cr3
10221 @@ -131,6 +134,7 @@ wakeup_code:
10222 movl pmode_cr0, %eax
10223 movl %eax, %cr0
10224 jmp pmode_return
10225 +# include "../../verify_cpu.S"
10226 #else
10227 pushw $0
10228 pushw trampoline_segment
10229 diff -urNp linux-3.0.3/arch/x86/kernel/acpi/sleep.c linux-3.0.3/arch/x86/kernel/acpi/sleep.c
10230 --- linux-3.0.3/arch/x86/kernel/acpi/sleep.c 2011-07-21 22:17:23.000000000 -0400
10231 +++ linux-3.0.3/arch/x86/kernel/acpi/sleep.c 2011-08-23 21:47:55.000000000 -0400
10232 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10233 header->trampoline_segment = trampoline_address() >> 4;
10234 #ifdef CONFIG_SMP
10235 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10236 +
10237 + pax_open_kernel();
10238 early_gdt_descr.address =
10239 (unsigned long)get_cpu_gdt_table(smp_processor_id());
10240 + pax_close_kernel();
10241 +
10242 initial_gs = per_cpu_offset(smp_processor_id());
10243 #endif
10244 initial_code = (unsigned long)wakeup_long64;
10245 diff -urNp linux-3.0.3/arch/x86/kernel/acpi/wakeup_32.S linux-3.0.3/arch/x86/kernel/acpi/wakeup_32.S
10246 --- linux-3.0.3/arch/x86/kernel/acpi/wakeup_32.S 2011-07-21 22:17:23.000000000 -0400
10247 +++ linux-3.0.3/arch/x86/kernel/acpi/wakeup_32.S 2011-08-23 21:47:55.000000000 -0400
10248 @@ -30,13 +30,11 @@ wakeup_pmode_return:
10249 # and restore the stack ... but you need gdt for this to work
10250 movl saved_context_esp, %esp
10251
10252 - movl %cs:saved_magic, %eax
10253 - cmpl $0x12345678, %eax
10254 + cmpl $0x12345678, saved_magic
10255 jne bogus_magic
10256
10257 # jump to place where we left off
10258 - movl saved_eip, %eax
10259 - jmp *%eax
10260 + jmp *(saved_eip)
10261
10262 bogus_magic:
10263 jmp bogus_magic
10264 diff -urNp linux-3.0.3/arch/x86/kernel/alternative.c linux-3.0.3/arch/x86/kernel/alternative.c
10265 --- linux-3.0.3/arch/x86/kernel/alternative.c 2011-07-21 22:17:23.000000000 -0400
10266 +++ linux-3.0.3/arch/x86/kernel/alternative.c 2011-08-23 21:47:55.000000000 -0400
10267 @@ -313,7 +313,7 @@ static void alternatives_smp_lock(const
10268 if (!*poff || ptr < text || ptr >= text_end)
10269 continue;
10270 /* turn DS segment override prefix into lock prefix */
10271 - if (*ptr == 0x3e)
10272 + if (*ktla_ktva(ptr) == 0x3e)
10273 text_poke(ptr, ((unsigned char []){0xf0}), 1);
10274 };
10275 mutex_unlock(&text_mutex);
10276 @@ -334,7 +334,7 @@ static void alternatives_smp_unlock(cons
10277 if (!*poff || ptr < text || ptr >= text_end)
10278 continue;
10279 /* turn lock prefix into DS segment override prefix */
10280 - if (*ptr == 0xf0)
10281 + if (*ktla_ktva(ptr) == 0xf0)
10282 text_poke(ptr, ((unsigned char []){0x3E}), 1);
10283 };
10284 mutex_unlock(&text_mutex);
10285 @@ -503,7 +503,7 @@ void __init_or_module apply_paravirt(str
10286
10287 BUG_ON(p->len > MAX_PATCH_LEN);
10288 /* prep the buffer with the original instructions */
10289 - memcpy(insnbuf, p->instr, p->len);
10290 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10291 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10292 (unsigned long)p->instr, p->len);
10293
10294 @@ -571,7 +571,7 @@ void __init alternative_instructions(voi
10295 if (smp_alt_once)
10296 free_init_pages("SMP alternatives",
10297 (unsigned long)__smp_locks,
10298 - (unsigned long)__smp_locks_end);
10299 + PAGE_ALIGN((unsigned long)__smp_locks_end));
10300
10301 restart_nmi();
10302 }
10303 @@ -588,13 +588,17 @@ void __init alternative_instructions(voi
10304 * instructions. And on the local CPU you need to be protected again NMI or MCE
10305 * handlers seeing an inconsistent instruction while you patch.
10306 */
10307 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
10308 +void *__kprobes text_poke_early(void *addr, const void *opcode,
10309 size_t len)
10310 {
10311 unsigned long flags;
10312 local_irq_save(flags);
10313 - memcpy(addr, opcode, len);
10314 +
10315 + pax_open_kernel();
10316 + memcpy(ktla_ktva(addr), opcode, len);
10317 sync_core();
10318 + pax_close_kernel();
10319 +
10320 local_irq_restore(flags);
10321 /* Could also do a CLFLUSH here to speed up CPU recovery; but
10322 that causes hangs on some VIA CPUs. */
10323 @@ -616,36 +620,22 @@ void *__init_or_module text_poke_early(v
10324 */
10325 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10326 {
10327 - unsigned long flags;
10328 - char *vaddr;
10329 + unsigned char *vaddr = ktla_ktva(addr);
10330 struct page *pages[2];
10331 - int i;
10332 + size_t i;
10333
10334 if (!core_kernel_text((unsigned long)addr)) {
10335 - pages[0] = vmalloc_to_page(addr);
10336 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10337 + pages[0] = vmalloc_to_page(vaddr);
10338 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10339 } else {
10340 - pages[0] = virt_to_page(addr);
10341 + pages[0] = virt_to_page(vaddr);
10342 WARN_ON(!PageReserved(pages[0]));
10343 - pages[1] = virt_to_page(addr + PAGE_SIZE);
10344 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10345 }
10346 BUG_ON(!pages[0]);
10347 - local_irq_save(flags);
10348 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10349 - if (pages[1])
10350 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10351 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10352 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10353 - clear_fixmap(FIX_TEXT_POKE0);
10354 - if (pages[1])
10355 - clear_fixmap(FIX_TEXT_POKE1);
10356 - local_flush_tlb();
10357 - sync_core();
10358 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
10359 - that causes hangs on some VIA CPUs. */
10360 + text_poke_early(addr, opcode, len);
10361 for (i = 0; i < len; i++)
10362 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10363 - local_irq_restore(flags);
10364 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10365 return addr;
10366 }
10367
10368 diff -urNp linux-3.0.3/arch/x86/kernel/apic/apic.c linux-3.0.3/arch/x86/kernel/apic/apic.c
10369 --- linux-3.0.3/arch/x86/kernel/apic/apic.c 2011-07-21 22:17:23.000000000 -0400
10370 +++ linux-3.0.3/arch/x86/kernel/apic/apic.c 2011-08-23 21:48:14.000000000 -0400
10371 @@ -173,7 +173,7 @@ int first_system_vector = 0xfe;
10372 /*
10373 * Debug level, exported for io_apic.c
10374 */
10375 -unsigned int apic_verbosity;
10376 +int apic_verbosity;
10377
10378 int pic_mode;
10379
10380 @@ -1834,7 +1834,7 @@ void smp_error_interrupt(struct pt_regs
10381 apic_write(APIC_ESR, 0);
10382 v1 = apic_read(APIC_ESR);
10383 ack_APIC_irq();
10384 - atomic_inc(&irq_err_count);
10385 + atomic_inc_unchecked(&irq_err_count);
10386
10387 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
10388 smp_processor_id(), v0 , v1);
10389 @@ -2190,6 +2190,8 @@ static int __cpuinit apic_cluster_num(vo
10390 u16 *bios_cpu_apicid;
10391 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
10392
10393 + pax_track_stack();
10394 +
10395 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
10396 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
10397
10398 diff -urNp linux-3.0.3/arch/x86/kernel/apic/io_apic.c linux-3.0.3/arch/x86/kernel/apic/io_apic.c
10399 --- linux-3.0.3/arch/x86/kernel/apic/io_apic.c 2011-07-21 22:17:23.000000000 -0400
10400 +++ linux-3.0.3/arch/x86/kernel/apic/io_apic.c 2011-08-23 21:47:55.000000000 -0400
10401 @@ -1028,7 +1028,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
10402 }
10403 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10404
10405 -void lock_vector_lock(void)
10406 +void lock_vector_lock(void) __acquires(vector_lock)
10407 {
10408 /* Used to the online set of cpus does not change
10409 * during assign_irq_vector.
10410 @@ -1036,7 +1036,7 @@ void lock_vector_lock(void)
10411 raw_spin_lock(&vector_lock);
10412 }
10413
10414 -void unlock_vector_lock(void)
10415 +void unlock_vector_lock(void) __releases(vector_lock)
10416 {
10417 raw_spin_unlock(&vector_lock);
10418 }
10419 @@ -2364,7 +2364,7 @@ static void ack_apic_edge(struct irq_dat
10420 ack_APIC_irq();
10421 }
10422
10423 -atomic_t irq_mis_count;
10424 +atomic_unchecked_t irq_mis_count;
10425
10426 /*
10427 * IO-APIC versions below 0x20 don't support EOI register.
10428 @@ -2472,7 +2472,7 @@ static void ack_apic_level(struct irq_da
10429 * at the cpu.
10430 */
10431 if (!(v & (1 << (i & 0x1f)))) {
10432 - atomic_inc(&irq_mis_count);
10433 + atomic_inc_unchecked(&irq_mis_count);
10434
10435 eoi_ioapic_irq(irq, cfg);
10436 }
10437 diff -urNp linux-3.0.3/arch/x86/kernel/apm_32.c linux-3.0.3/arch/x86/kernel/apm_32.c
10438 --- linux-3.0.3/arch/x86/kernel/apm_32.c 2011-07-21 22:17:23.000000000 -0400
10439 +++ linux-3.0.3/arch/x86/kernel/apm_32.c 2011-08-23 21:47:55.000000000 -0400
10440 @@ -413,7 +413,7 @@ static DEFINE_MUTEX(apm_mutex);
10441 * This is for buggy BIOS's that refer to (real mode) segment 0x40
10442 * even though they are called in protected mode.
10443 */
10444 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
10445 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
10446 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
10447
10448 static const char driver_version[] = "1.16ac"; /* no spaces */
10449 @@ -591,7 +591,10 @@ static long __apm_bios_call(void *_call)
10450 BUG_ON(cpu != 0);
10451 gdt = get_cpu_gdt_table(cpu);
10452 save_desc_40 = gdt[0x40 / 8];
10453 +
10454 + pax_open_kernel();
10455 gdt[0x40 / 8] = bad_bios_desc;
10456 + pax_close_kernel();
10457
10458 apm_irq_save(flags);
10459 APM_DO_SAVE_SEGS;
10460 @@ -600,7 +603,11 @@ static long __apm_bios_call(void *_call)
10461 &call->esi);
10462 APM_DO_RESTORE_SEGS;
10463 apm_irq_restore(flags);
10464 +
10465 + pax_open_kernel();
10466 gdt[0x40 / 8] = save_desc_40;
10467 + pax_close_kernel();
10468 +
10469 put_cpu();
10470
10471 return call->eax & 0xff;
10472 @@ -667,7 +674,10 @@ static long __apm_bios_call_simple(void
10473 BUG_ON(cpu != 0);
10474 gdt = get_cpu_gdt_table(cpu);
10475 save_desc_40 = gdt[0x40 / 8];
10476 +
10477 + pax_open_kernel();
10478 gdt[0x40 / 8] = bad_bios_desc;
10479 + pax_close_kernel();
10480
10481 apm_irq_save(flags);
10482 APM_DO_SAVE_SEGS;
10483 @@ -675,7 +685,11 @@ static long __apm_bios_call_simple(void
10484 &call->eax);
10485 APM_DO_RESTORE_SEGS;
10486 apm_irq_restore(flags);
10487 +
10488 + pax_open_kernel();
10489 gdt[0x40 / 8] = save_desc_40;
10490 + pax_close_kernel();
10491 +
10492 put_cpu();
10493 return error;
10494 }
10495 @@ -2349,12 +2363,15 @@ static int __init apm_init(void)
10496 * code to that CPU.
10497 */
10498 gdt = get_cpu_gdt_table(0);
10499 +
10500 + pax_open_kernel();
10501 set_desc_base(&gdt[APM_CS >> 3],
10502 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
10503 set_desc_base(&gdt[APM_CS_16 >> 3],
10504 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
10505 set_desc_base(&gdt[APM_DS >> 3],
10506 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
10507 + pax_close_kernel();
10508
10509 proc_create("apm", 0, NULL, &apm_file_ops);
10510
10511 diff -urNp linux-3.0.3/arch/x86/kernel/asm-offsets_64.c linux-3.0.3/arch/x86/kernel/asm-offsets_64.c
10512 --- linux-3.0.3/arch/x86/kernel/asm-offsets_64.c 2011-07-21 22:17:23.000000000 -0400
10513 +++ linux-3.0.3/arch/x86/kernel/asm-offsets_64.c 2011-08-23 21:47:55.000000000 -0400
10514 @@ -69,6 +69,7 @@ int main(void)
10515 BLANK();
10516 #undef ENTRY
10517
10518 + DEFINE(TSS_size, sizeof(struct tss_struct));
10519 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
10520 BLANK();
10521
10522 diff -urNp linux-3.0.3/arch/x86/kernel/asm-offsets.c linux-3.0.3/arch/x86/kernel/asm-offsets.c
10523 --- linux-3.0.3/arch/x86/kernel/asm-offsets.c 2011-07-21 22:17:23.000000000 -0400
10524 +++ linux-3.0.3/arch/x86/kernel/asm-offsets.c 2011-08-23 21:47:55.000000000 -0400
10525 @@ -33,6 +33,8 @@ void common(void) {
10526 OFFSET(TI_status, thread_info, status);
10527 OFFSET(TI_addr_limit, thread_info, addr_limit);
10528 OFFSET(TI_preempt_count, thread_info, preempt_count);
10529 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
10530 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
10531
10532 BLANK();
10533 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
10534 @@ -53,8 +55,26 @@ void common(void) {
10535 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10536 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10537 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
10538 +
10539 +#ifdef CONFIG_PAX_KERNEXEC
10540 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10541 +#endif
10542 +
10543 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10544 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
10545 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
10546 +#ifdef CONFIG_X86_64
10547 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
10548 +#endif
10549 #endif
10550
10551 +#endif
10552 +
10553 + BLANK();
10554 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
10555 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
10556 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
10557 +
10558 #ifdef CONFIG_XEN
10559 BLANK();
10560 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
10561 diff -urNp linux-3.0.3/arch/x86/kernel/cpu/amd.c linux-3.0.3/arch/x86/kernel/cpu/amd.c
10562 --- linux-3.0.3/arch/x86/kernel/cpu/amd.c 2011-07-21 22:17:23.000000000 -0400
10563 +++ linux-3.0.3/arch/x86/kernel/cpu/amd.c 2011-08-23 21:47:55.000000000 -0400
10564 @@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
10565 unsigned int size)
10566 {
10567 /* AMD errata T13 (order #21922) */
10568 - if ((c->x86 == 6)) {
10569 + if (c->x86 == 6) {
10570 /* Duron Rev A0 */
10571 if (c->x86_model == 3 && c->x86_mask == 0)
10572 size = 64;
10573 diff -urNp linux-3.0.3/arch/x86/kernel/cpu/common.c linux-3.0.3/arch/x86/kernel/cpu/common.c
10574 --- linux-3.0.3/arch/x86/kernel/cpu/common.c 2011-07-21 22:17:23.000000000 -0400
10575 +++ linux-3.0.3/arch/x86/kernel/cpu/common.c 2011-08-23 21:47:55.000000000 -0400
10576 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10577
10578 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10579
10580 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10581 -#ifdef CONFIG_X86_64
10582 - /*
10583 - * We need valid kernel segments for data and code in long mode too
10584 - * IRET will check the segment types kkeil 2000/10/28
10585 - * Also sysret mandates a special GDT layout
10586 - *
10587 - * TLS descriptors are currently at a different place compared to i386.
10588 - * Hopefully nobody expects them at a fixed place (Wine?)
10589 - */
10590 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10591 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10592 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10593 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10594 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10595 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10596 -#else
10597 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10598 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10599 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10600 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10601 - /*
10602 - * Segments used for calling PnP BIOS have byte granularity.
10603 - * They code segments and data segments have fixed 64k limits,
10604 - * the transfer segment sizes are set at run time.
10605 - */
10606 - /* 32-bit code */
10607 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10608 - /* 16-bit code */
10609 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10610 - /* 16-bit data */
10611 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10612 - /* 16-bit data */
10613 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
10614 - /* 16-bit data */
10615 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
10616 - /*
10617 - * The APM segments have byte granularity and their bases
10618 - * are set at run time. All have 64k limits.
10619 - */
10620 - /* 32-bit code */
10621 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10622 - /* 16-bit code */
10623 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10624 - /* data */
10625 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10626 -
10627 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10628 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10629 - GDT_STACK_CANARY_INIT
10630 -#endif
10631 -} };
10632 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10633 -
10634 static int __init x86_xsave_setup(char *s)
10635 {
10636 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10637 @@ -371,7 +317,7 @@ void switch_to_new_gdt(int cpu)
10638 {
10639 struct desc_ptr gdt_descr;
10640
10641 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10642 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10643 gdt_descr.size = GDT_SIZE - 1;
10644 load_gdt(&gdt_descr);
10645 /* Reload the per-cpu base */
10646 @@ -840,6 +786,10 @@ static void __cpuinit identify_cpu(struc
10647 /* Filter out anything that depends on CPUID levels we don't have */
10648 filter_cpuid_features(c, true);
10649
10650 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10651 + setup_clear_cpu_cap(X86_FEATURE_SEP);
10652 +#endif
10653 +
10654 /* If the model name is still unset, do table lookup. */
10655 if (!c->x86_model_id[0]) {
10656 const char *p;
10657 @@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(cha
10658 }
10659 __setup("clearcpuid=", setup_disablecpuid);
10660
10661 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
10662 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
10663 +
10664 #ifdef CONFIG_X86_64
10665 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
10666
10667 @@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
10668 EXPORT_PER_CPU_SYMBOL(current_task);
10669
10670 DEFINE_PER_CPU(unsigned long, kernel_stack) =
10671 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
10672 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
10673 EXPORT_PER_CPU_SYMBOL(kernel_stack);
10674
10675 DEFINE_PER_CPU(char *, irq_stack_ptr) =
10676 @@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(str
10677 {
10678 memset(regs, 0, sizeof(struct pt_regs));
10679 regs->fs = __KERNEL_PERCPU;
10680 - regs->gs = __KERNEL_STACK_CANARY;
10681 + savesegment(gs, regs->gs);
10682
10683 return regs;
10684 }
10685 @@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void)
10686 int i;
10687
10688 cpu = stack_smp_processor_id();
10689 - t = &per_cpu(init_tss, cpu);
10690 + t = init_tss + cpu;
10691 oist = &per_cpu(orig_ist, cpu);
10692
10693 #ifdef CONFIG_NUMA
10694 @@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void)
10695 switch_to_new_gdt(cpu);
10696 loadsegment(fs, 0);
10697
10698 - load_idt((const struct desc_ptr *)&idt_descr);
10699 + load_idt(&idt_descr);
10700
10701 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10702 syscall_init();
10703 @@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void)
10704 wrmsrl(MSR_KERNEL_GS_BASE, 0);
10705 barrier();
10706
10707 - x86_configure_nx();
10708 if (cpu != 0)
10709 enable_x2apic();
10710
10711 @@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void)
10712 {
10713 int cpu = smp_processor_id();
10714 struct task_struct *curr = current;
10715 - struct tss_struct *t = &per_cpu(init_tss, cpu);
10716 + struct tss_struct *t = init_tss + cpu;
10717 struct thread_struct *thread = &curr->thread;
10718
10719 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
10720 diff -urNp linux-3.0.3/arch/x86/kernel/cpu/intel.c linux-3.0.3/arch/x86/kernel/cpu/intel.c
10721 --- linux-3.0.3/arch/x86/kernel/cpu/intel.c 2011-08-23 21:44:40.000000000 -0400
10722 +++ linux-3.0.3/arch/x86/kernel/cpu/intel.c 2011-08-23 21:47:55.000000000 -0400
10723 @@ -172,7 +172,7 @@ static void __cpuinit trap_init_f00f_bug
10724 * Update the IDT descriptor and reload the IDT so that
10725 * it uses the read-only mapped virtual address.
10726 */
10727 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
10728 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
10729 load_idt(&idt_descr);
10730 }
10731 #endif
10732 diff -urNp linux-3.0.3/arch/x86/kernel/cpu/Makefile linux-3.0.3/arch/x86/kernel/cpu/Makefile
10733 --- linux-3.0.3/arch/x86/kernel/cpu/Makefile 2011-07-21 22:17:23.000000000 -0400
10734 +++ linux-3.0.3/arch/x86/kernel/cpu/Makefile 2011-08-23 21:47:55.000000000 -0400
10735 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
10736 CFLAGS_REMOVE_perf_event.o = -pg
10737 endif
10738
10739 -# Make sure load_percpu_segment has no stackprotector
10740 -nostackp := $(call cc-option, -fno-stack-protector)
10741 -CFLAGS_common.o := $(nostackp)
10742 -
10743 obj-y := intel_cacheinfo.o scattered.o topology.o
10744 obj-y += proc.o capflags.o powerflags.o common.o
10745 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
10746 diff -urNp linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce.c linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce.c
10747 --- linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce.c 2011-07-21 22:17:23.000000000 -0400
10748 +++ linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce.c 2011-08-23 21:47:55.000000000 -0400
10749 @@ -46,6 +46,7 @@
10750 #include <asm/ipi.h>
10751 #include <asm/mce.h>
10752 #include <asm/msr.h>
10753 +#include <asm/local.h>
10754
10755 #include "mce-internal.h"
10756
10757 @@ -208,7 +209,7 @@ static void print_mce(struct mce *m)
10758 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
10759 m->cs, m->ip);
10760
10761 - if (m->cs == __KERNEL_CS)
10762 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
10763 print_symbol("{%s}", m->ip);
10764 pr_cont("\n");
10765 }
10766 @@ -236,10 +237,10 @@ static void print_mce(struct mce *m)
10767
10768 #define PANIC_TIMEOUT 5 /* 5 seconds */
10769
10770 -static atomic_t mce_paniced;
10771 +static atomic_unchecked_t mce_paniced;
10772
10773 static int fake_panic;
10774 -static atomic_t mce_fake_paniced;
10775 +static atomic_unchecked_t mce_fake_paniced;
10776
10777 /* Panic in progress. Enable interrupts and wait for final IPI */
10778 static void wait_for_panic(void)
10779 @@ -263,7 +264,7 @@ static void mce_panic(char *msg, struct
10780 /*
10781 * Make sure only one CPU runs in machine check panic
10782 */
10783 - if (atomic_inc_return(&mce_paniced) > 1)
10784 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
10785 wait_for_panic();
10786 barrier();
10787
10788 @@ -271,7 +272,7 @@ static void mce_panic(char *msg, struct
10789 console_verbose();
10790 } else {
10791 /* Don't log too much for fake panic */
10792 - if (atomic_inc_return(&mce_fake_paniced) > 1)
10793 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
10794 return;
10795 }
10796 /* First print corrected ones that are still unlogged */
10797 @@ -638,7 +639,7 @@ static int mce_timed_out(u64 *t)
10798 * might have been modified by someone else.
10799 */
10800 rmb();
10801 - if (atomic_read(&mce_paniced))
10802 + if (atomic_read_unchecked(&mce_paniced))
10803 wait_for_panic();
10804 if (!monarch_timeout)
10805 goto out;
10806 @@ -1452,14 +1453,14 @@ void __cpuinit mcheck_cpu_init(struct cp
10807 */
10808
10809 static DEFINE_SPINLOCK(mce_state_lock);
10810 -static int open_count; /* #times opened */
10811 +static local_t open_count; /* #times opened */
10812 static int open_exclu; /* already open exclusive? */
10813
10814 static int mce_open(struct inode *inode, struct file *file)
10815 {
10816 spin_lock(&mce_state_lock);
10817
10818 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
10819 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
10820 spin_unlock(&mce_state_lock);
10821
10822 return -EBUSY;
10823 @@ -1467,7 +1468,7 @@ static int mce_open(struct inode *inode,
10824
10825 if (file->f_flags & O_EXCL)
10826 open_exclu = 1;
10827 - open_count++;
10828 + local_inc(&open_count);
10829
10830 spin_unlock(&mce_state_lock);
10831
10832 @@ -1478,7 +1479,7 @@ static int mce_release(struct inode *ino
10833 {
10834 spin_lock(&mce_state_lock);
10835
10836 - open_count--;
10837 + local_dec(&open_count);
10838 open_exclu = 0;
10839
10840 spin_unlock(&mce_state_lock);
10841 @@ -2163,7 +2164,7 @@ struct dentry *mce_get_debugfs_dir(void)
10842 static void mce_reset(void)
10843 {
10844 cpu_missing = 0;
10845 - atomic_set(&mce_fake_paniced, 0);
10846 + atomic_set_unchecked(&mce_fake_paniced, 0);
10847 atomic_set(&mce_executing, 0);
10848 atomic_set(&mce_callin, 0);
10849 atomic_set(&global_nwo, 0);
10850 diff -urNp linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce-inject.c
10851 --- linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-07-21 22:17:23.000000000 -0400
10852 +++ linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-23 21:47:55.000000000 -0400
10853 @@ -215,7 +215,9 @@ static int inject_init(void)
10854 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
10855 return -ENOMEM;
10856 printk(KERN_INFO "Machine check injector initialized\n");
10857 - mce_chrdev_ops.write = mce_write;
10858 + pax_open_kernel();
10859 + *(void **)&mce_chrdev_ops.write = mce_write;
10860 + pax_close_kernel();
10861 register_die_notifier(&mce_raise_nb);
10862 return 0;
10863 }
10864 diff -urNp linux-3.0.3/arch/x86/kernel/cpu/mtrr/main.c linux-3.0.3/arch/x86/kernel/cpu/mtrr/main.c
10865 --- linux-3.0.3/arch/x86/kernel/cpu/mtrr/main.c 2011-07-21 22:17:23.000000000 -0400
10866 +++ linux-3.0.3/arch/x86/kernel/cpu/mtrr/main.c 2011-08-23 21:47:55.000000000 -0400
10867 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
10868 u64 size_or_mask, size_and_mask;
10869 static bool mtrr_aps_delayed_init;
10870
10871 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
10872 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
10873
10874 const struct mtrr_ops *mtrr_if;
10875
10876 diff -urNp linux-3.0.3/arch/x86/kernel/cpu/mtrr/mtrr.h linux-3.0.3/arch/x86/kernel/cpu/mtrr/mtrr.h
10877 --- linux-3.0.3/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-07-21 22:17:23.000000000 -0400
10878 +++ linux-3.0.3/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-08-23 21:47:55.000000000 -0400
10879 @@ -12,8 +12,8 @@
10880 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
10881
10882 struct mtrr_ops {
10883 - u32 vendor;
10884 - u32 use_intel_if;
10885 + const u32 vendor;
10886 + const u32 use_intel_if;
10887 void (*set)(unsigned int reg, unsigned long base,
10888 unsigned long size, mtrr_type type);
10889 void (*set_all)(void);
10890 diff -urNp linux-3.0.3/arch/x86/kernel/cpu/perf_event.c linux-3.0.3/arch/x86/kernel/cpu/perf_event.c
10891 --- linux-3.0.3/arch/x86/kernel/cpu/perf_event.c 2011-07-21 22:17:23.000000000 -0400
10892 +++ linux-3.0.3/arch/x86/kernel/cpu/perf_event.c 2011-08-23 21:48:14.000000000 -0400
10893 @@ -781,6 +781,8 @@ static int x86_schedule_events(struct cp
10894 int i, j, w, wmax, num = 0;
10895 struct hw_perf_event *hwc;
10896
10897 + pax_track_stack();
10898 +
10899 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
10900
10901 for (i = 0; i < n; i++) {
10902 @@ -1872,7 +1874,7 @@ perf_callchain_user(struct perf_callchai
10903 break;
10904
10905 perf_callchain_store(entry, frame.return_address);
10906 - fp = frame.next_frame;
10907 + fp = (__force const void __user *)frame.next_frame;
10908 }
10909 }
10910
10911 diff -urNp linux-3.0.3/arch/x86/kernel/crash.c linux-3.0.3/arch/x86/kernel/crash.c
10912 --- linux-3.0.3/arch/x86/kernel/crash.c 2011-07-21 22:17:23.000000000 -0400
10913 +++ linux-3.0.3/arch/x86/kernel/crash.c 2011-08-23 21:47:55.000000000 -0400
10914 @@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
10915 regs = args->regs;
10916
10917 #ifdef CONFIG_X86_32
10918 - if (!user_mode_vm(regs)) {
10919 + if (!user_mode(regs)) {
10920 crash_fixup_ss_esp(&fixed_regs, regs);
10921 regs = &fixed_regs;
10922 }
10923 diff -urNp linux-3.0.3/arch/x86/kernel/doublefault_32.c linux-3.0.3/arch/x86/kernel/doublefault_32.c
10924 --- linux-3.0.3/arch/x86/kernel/doublefault_32.c 2011-07-21 22:17:23.000000000 -0400
10925 +++ linux-3.0.3/arch/x86/kernel/doublefault_32.c 2011-08-23 21:47:55.000000000 -0400
10926 @@ -11,7 +11,7 @@
10927
10928 #define DOUBLEFAULT_STACKSIZE (1024)
10929 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
10930 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
10931 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
10932
10933 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
10934
10935 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
10936 unsigned long gdt, tss;
10937
10938 store_gdt(&gdt_desc);
10939 - gdt = gdt_desc.address;
10940 + gdt = (unsigned long)gdt_desc.address;
10941
10942 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
10943
10944 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
10945 /* 0x2 bit is always set */
10946 .flags = X86_EFLAGS_SF | 0x2,
10947 .sp = STACK_START,
10948 - .es = __USER_DS,
10949 + .es = __KERNEL_DS,
10950 .cs = __KERNEL_CS,
10951 .ss = __KERNEL_DS,
10952 - .ds = __USER_DS,
10953 + .ds = __KERNEL_DS,
10954 .fs = __KERNEL_PERCPU,
10955
10956 .__cr3 = __pa_nodebug(swapper_pg_dir),
10957 diff -urNp linux-3.0.3/arch/x86/kernel/dumpstack_32.c linux-3.0.3/arch/x86/kernel/dumpstack_32.c
10958 --- linux-3.0.3/arch/x86/kernel/dumpstack_32.c 2011-07-21 22:17:23.000000000 -0400
10959 +++ linux-3.0.3/arch/x86/kernel/dumpstack_32.c 2011-08-23 21:47:55.000000000 -0400
10960 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
10961 bp = stack_frame(task, regs);
10962
10963 for (;;) {
10964 - struct thread_info *context;
10965 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
10966
10967 - context = (struct thread_info *)
10968 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
10969 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
10970 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
10971
10972 - stack = (unsigned long *)context->previous_esp;
10973 - if (!stack)
10974 + if (stack_start == task_stack_page(task))
10975 break;
10976 + stack = *(unsigned long **)stack_start;
10977 if (ops->stack(data, "IRQ") < 0)
10978 break;
10979 touch_nmi_watchdog();
10980 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
10981 * When in-kernel, we also print out the stack and code at the
10982 * time of the fault..
10983 */
10984 - if (!user_mode_vm(regs)) {
10985 + if (!user_mode(regs)) {
10986 unsigned int code_prologue = code_bytes * 43 / 64;
10987 unsigned int code_len = code_bytes;
10988 unsigned char c;
10989 u8 *ip;
10990 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
10991
10992 printk(KERN_EMERG "Stack:\n");
10993 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
10994
10995 printk(KERN_EMERG "Code: ");
10996
10997 - ip = (u8 *)regs->ip - code_prologue;
10998 + ip = (u8 *)regs->ip - code_prologue + cs_base;
10999 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
11000 /* try starting at IP */
11001 - ip = (u8 *)regs->ip;
11002 + ip = (u8 *)regs->ip + cs_base;
11003 code_len = code_len - code_prologue + 1;
11004 }
11005 for (i = 0; i < code_len; i++, ip++) {
11006 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
11007 printk(" Bad EIP value.");
11008 break;
11009 }
11010 - if (ip == (u8 *)regs->ip)
11011 + if (ip == (u8 *)regs->ip + cs_base)
11012 printk("<%02x> ", c);
11013 else
11014 printk("%02x ", c);
11015 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11016 {
11017 unsigned short ud2;
11018
11019 + ip = ktla_ktva(ip);
11020 if (ip < PAGE_OFFSET)
11021 return 0;
11022 if (probe_kernel_address((unsigned short *)ip, ud2))
11023 diff -urNp linux-3.0.3/arch/x86/kernel/dumpstack_64.c linux-3.0.3/arch/x86/kernel/dumpstack_64.c
11024 --- linux-3.0.3/arch/x86/kernel/dumpstack_64.c 2011-07-21 22:17:23.000000000 -0400
11025 +++ linux-3.0.3/arch/x86/kernel/dumpstack_64.c 2011-08-23 21:47:55.000000000 -0400
11026 @@ -147,9 +147,9 @@ void dump_trace(struct task_struct *task
11027 unsigned long *irq_stack_end =
11028 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11029 unsigned used = 0;
11030 - struct thread_info *tinfo;
11031 int graph = 0;
11032 unsigned long dummy;
11033 + void *stack_start;
11034
11035 if (!task)
11036 task = current;
11037 @@ -167,10 +167,10 @@ void dump_trace(struct task_struct *task
11038 * current stack address. If the stacks consist of nested
11039 * exceptions
11040 */
11041 - tinfo = task_thread_info(task);
11042 for (;;) {
11043 char *id;
11044 unsigned long *estack_end;
11045 +
11046 estack_end = in_exception_stack(cpu, (unsigned long)stack,
11047 &used, &id);
11048
11049 @@ -178,7 +178,7 @@ void dump_trace(struct task_struct *task
11050 if (ops->stack(data, id) < 0)
11051 break;
11052
11053 - bp = ops->walk_stack(tinfo, stack, bp, ops,
11054 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11055 data, estack_end, &graph);
11056 ops->stack(data, "<EOE>");
11057 /*
11058 @@ -197,7 +197,7 @@ void dump_trace(struct task_struct *task
11059 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11060 if (ops->stack(data, "IRQ") < 0)
11061 break;
11062 - bp = ops->walk_stack(tinfo, stack, bp,
11063 + bp = ops->walk_stack(task, irq_stack, stack, bp,
11064 ops, data, irq_stack_end, &graph);
11065 /*
11066 * We link to the next stack (which would be
11067 @@ -218,7 +218,8 @@ void dump_trace(struct task_struct *task
11068 /*
11069 * This handles the process stack:
11070 */
11071 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11072 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11073 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11074 put_cpu();
11075 }
11076 EXPORT_SYMBOL(dump_trace);
11077 diff -urNp linux-3.0.3/arch/x86/kernel/dumpstack.c linux-3.0.3/arch/x86/kernel/dumpstack.c
11078 --- linux-3.0.3/arch/x86/kernel/dumpstack.c 2011-07-21 22:17:23.000000000 -0400
11079 +++ linux-3.0.3/arch/x86/kernel/dumpstack.c 2011-08-23 21:48:14.000000000 -0400
11080 @@ -2,6 +2,9 @@
11081 * Copyright (C) 1991, 1992 Linus Torvalds
11082 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11083 */
11084 +#ifdef CONFIG_GRKERNSEC_HIDESYM
11085 +#define __INCLUDED_BY_HIDESYM 1
11086 +#endif
11087 #include <linux/kallsyms.h>
11088 #include <linux/kprobes.h>
11089 #include <linux/uaccess.h>
11090 @@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11091 static void
11092 print_ftrace_graph_addr(unsigned long addr, void *data,
11093 const struct stacktrace_ops *ops,
11094 - struct thread_info *tinfo, int *graph)
11095 + struct task_struct *task, int *graph)
11096 {
11097 - struct task_struct *task = tinfo->task;
11098 unsigned long ret_addr;
11099 int index = task->curr_ret_stack;
11100
11101 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11102 static inline void
11103 print_ftrace_graph_addr(unsigned long addr, void *data,
11104 const struct stacktrace_ops *ops,
11105 - struct thread_info *tinfo, int *graph)
11106 + struct task_struct *task, int *graph)
11107 { }
11108 #endif
11109
11110 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11111 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11112 */
11113
11114 -static inline int valid_stack_ptr(struct thread_info *tinfo,
11115 - void *p, unsigned int size, void *end)
11116 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11117 {
11118 - void *t = tinfo;
11119 if (end) {
11120 if (p < end && p >= (end-THREAD_SIZE))
11121 return 1;
11122 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11123 }
11124
11125 unsigned long
11126 -print_context_stack(struct thread_info *tinfo,
11127 +print_context_stack(struct task_struct *task, void *stack_start,
11128 unsigned long *stack, unsigned long bp,
11129 const struct stacktrace_ops *ops, void *data,
11130 unsigned long *end, int *graph)
11131 {
11132 struct stack_frame *frame = (struct stack_frame *)bp;
11133
11134 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11135 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11136 unsigned long addr;
11137
11138 addr = *stack;
11139 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11140 } else {
11141 ops->address(data, addr, 0);
11142 }
11143 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11144 + print_ftrace_graph_addr(addr, data, ops, task, graph);
11145 }
11146 stack++;
11147 }
11148 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11149 EXPORT_SYMBOL_GPL(print_context_stack);
11150
11151 unsigned long
11152 -print_context_stack_bp(struct thread_info *tinfo,
11153 +print_context_stack_bp(struct task_struct *task, void *stack_start,
11154 unsigned long *stack, unsigned long bp,
11155 const struct stacktrace_ops *ops, void *data,
11156 unsigned long *end, int *graph)
11157 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11158 struct stack_frame *frame = (struct stack_frame *)bp;
11159 unsigned long *ret_addr = &frame->return_address;
11160
11161 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11162 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11163 unsigned long addr = *ret_addr;
11164
11165 if (!__kernel_text_address(addr))
11166 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11167 ops->address(data, addr, 1);
11168 frame = frame->next_frame;
11169 ret_addr = &frame->return_address;
11170 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11171 + print_ftrace_graph_addr(addr, data, ops, task, graph);
11172 }
11173
11174 return (unsigned long)frame;
11175 @@ -186,7 +186,7 @@ void dump_stack(void)
11176
11177 bp = stack_frame(current, NULL);
11178 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
11179 - current->pid, current->comm, print_tainted(),
11180 + task_pid_nr(current), current->comm, print_tainted(),
11181 init_utsname()->release,
11182 (int)strcspn(init_utsname()->version, " "),
11183 init_utsname()->version);
11184 @@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
11185 }
11186 EXPORT_SYMBOL_GPL(oops_begin);
11187
11188 +extern void gr_handle_kernel_exploit(void);
11189 +
11190 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11191 {
11192 if (regs && kexec_should_crash(current))
11193 @@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long fl
11194 panic("Fatal exception in interrupt");
11195 if (panic_on_oops)
11196 panic("Fatal exception");
11197 - do_exit(signr);
11198 +
11199 + gr_handle_kernel_exploit();
11200 +
11201 + do_group_exit(signr);
11202 }
11203
11204 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11205 @@ -269,7 +274,7 @@ int __kprobes __die(const char *str, str
11206
11207 show_registers(regs);
11208 #ifdef CONFIG_X86_32
11209 - if (user_mode_vm(regs)) {
11210 + if (user_mode(regs)) {
11211 sp = regs->sp;
11212 ss = regs->ss & 0xffff;
11213 } else {
11214 @@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs
11215 unsigned long flags = oops_begin();
11216 int sig = SIGSEGV;
11217
11218 - if (!user_mode_vm(regs))
11219 + if (!user_mode(regs))
11220 report_bug(regs->ip, regs);
11221
11222 if (__die(str, regs, err))
11223 diff -urNp linux-3.0.3/arch/x86/kernel/early_printk.c linux-3.0.3/arch/x86/kernel/early_printk.c
11224 --- linux-3.0.3/arch/x86/kernel/early_printk.c 2011-07-21 22:17:23.000000000 -0400
11225 +++ linux-3.0.3/arch/x86/kernel/early_printk.c 2011-08-23 21:48:14.000000000 -0400
11226 @@ -7,6 +7,7 @@
11227 #include <linux/pci_regs.h>
11228 #include <linux/pci_ids.h>
11229 #include <linux/errno.h>
11230 +#include <linux/sched.h>
11231 #include <asm/io.h>
11232 #include <asm/processor.h>
11233 #include <asm/fcntl.h>
11234 @@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
11235 int n;
11236 va_list ap;
11237
11238 + pax_track_stack();
11239 +
11240 va_start(ap, fmt);
11241 n = vscnprintf(buf, sizeof(buf), fmt, ap);
11242 early_console->write(early_console, buf, n);
11243 diff -urNp linux-3.0.3/arch/x86/kernel/entry_32.S linux-3.0.3/arch/x86/kernel/entry_32.S
11244 --- linux-3.0.3/arch/x86/kernel/entry_32.S 2011-07-21 22:17:23.000000000 -0400
11245 +++ linux-3.0.3/arch/x86/kernel/entry_32.S 2011-08-23 21:48:14.000000000 -0400
11246 @@ -185,13 +185,146 @@
11247 /*CFI_REL_OFFSET gs, PT_GS*/
11248 .endm
11249 .macro SET_KERNEL_GS reg
11250 +
11251 +#ifdef CONFIG_CC_STACKPROTECTOR
11252 movl $(__KERNEL_STACK_CANARY), \reg
11253 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11254 + movl $(__USER_DS), \reg
11255 +#else
11256 + xorl \reg, \reg
11257 +#endif
11258 +
11259 movl \reg, %gs
11260 .endm
11261
11262 #endif /* CONFIG_X86_32_LAZY_GS */
11263
11264 -.macro SAVE_ALL
11265 +.macro pax_enter_kernel
11266 +#ifdef CONFIG_PAX_KERNEXEC
11267 + call pax_enter_kernel
11268 +#endif
11269 +.endm
11270 +
11271 +.macro pax_exit_kernel
11272 +#ifdef CONFIG_PAX_KERNEXEC
11273 + call pax_exit_kernel
11274 +#endif
11275 +.endm
11276 +
11277 +#ifdef CONFIG_PAX_KERNEXEC
11278 +ENTRY(pax_enter_kernel)
11279 +#ifdef CONFIG_PARAVIRT
11280 + pushl %eax
11281 + pushl %ecx
11282 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11283 + mov %eax, %esi
11284 +#else
11285 + mov %cr0, %esi
11286 +#endif
11287 + bts $16, %esi
11288 + jnc 1f
11289 + mov %cs, %esi
11290 + cmp $__KERNEL_CS, %esi
11291 + jz 3f
11292 + ljmp $__KERNEL_CS, $3f
11293 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
11294 +2:
11295 +#ifdef CONFIG_PARAVIRT
11296 + mov %esi, %eax
11297 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11298 +#else
11299 + mov %esi, %cr0
11300 +#endif
11301 +3:
11302 +#ifdef CONFIG_PARAVIRT
11303 + popl %ecx
11304 + popl %eax
11305 +#endif
11306 + ret
11307 +ENDPROC(pax_enter_kernel)
11308 +
11309 +ENTRY(pax_exit_kernel)
11310 +#ifdef CONFIG_PARAVIRT
11311 + pushl %eax
11312 + pushl %ecx
11313 +#endif
11314 + mov %cs, %esi
11315 + cmp $__KERNEXEC_KERNEL_CS, %esi
11316 + jnz 2f
11317 +#ifdef CONFIG_PARAVIRT
11318 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11319 + mov %eax, %esi
11320 +#else
11321 + mov %cr0, %esi
11322 +#endif
11323 + btr $16, %esi
11324 + ljmp $__KERNEL_CS, $1f
11325 +1:
11326 +#ifdef CONFIG_PARAVIRT
11327 + mov %esi, %eax
11328 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11329 +#else
11330 + mov %esi, %cr0
11331 +#endif
11332 +2:
11333 +#ifdef CONFIG_PARAVIRT
11334 + popl %ecx
11335 + popl %eax
11336 +#endif
11337 + ret
11338 +ENDPROC(pax_exit_kernel)
11339 +#endif
11340 +
11341 +.macro pax_erase_kstack
11342 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11343 + call pax_erase_kstack
11344 +#endif
11345 +.endm
11346 +
11347 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11348 +/*
11349 + * ebp: thread_info
11350 + * ecx, edx: can be clobbered
11351 + */
11352 +ENTRY(pax_erase_kstack)
11353 + pushl %edi
11354 + pushl %eax
11355 +
11356 + mov TI_lowest_stack(%ebp), %edi
11357 + mov $-0xBEEF, %eax
11358 + std
11359 +
11360 +1: mov %edi, %ecx
11361 + and $THREAD_SIZE_asm - 1, %ecx
11362 + shr $2, %ecx
11363 + repne scasl
11364 + jecxz 2f
11365 +
11366 + cmp $2*16, %ecx
11367 + jc 2f
11368 +
11369 + mov $2*16, %ecx
11370 + repe scasl
11371 + jecxz 2f
11372 + jne 1b
11373 +
11374 +2: cld
11375 + mov %esp, %ecx
11376 + sub %edi, %ecx
11377 + shr $2, %ecx
11378 + rep stosl
11379 +
11380 + mov TI_task_thread_sp0(%ebp), %edi
11381 + sub $128, %edi
11382 + mov %edi, TI_lowest_stack(%ebp)
11383 +
11384 + popl %eax
11385 + popl %edi
11386 + ret
11387 +ENDPROC(pax_erase_kstack)
11388 +#endif
11389 +
11390 +.macro __SAVE_ALL _DS
11391 cld
11392 PUSH_GS
11393 pushl_cfi %fs
11394 @@ -214,7 +347,7 @@
11395 CFI_REL_OFFSET ecx, 0
11396 pushl_cfi %ebx
11397 CFI_REL_OFFSET ebx, 0
11398 - movl $(__USER_DS), %edx
11399 + movl $\_DS, %edx
11400 movl %edx, %ds
11401 movl %edx, %es
11402 movl $(__KERNEL_PERCPU), %edx
11403 @@ -222,6 +355,15 @@
11404 SET_KERNEL_GS %edx
11405 .endm
11406
11407 +.macro SAVE_ALL
11408 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11409 + __SAVE_ALL __KERNEL_DS
11410 + pax_enter_kernel
11411 +#else
11412 + __SAVE_ALL __USER_DS
11413 +#endif
11414 +.endm
11415 +
11416 .macro RESTORE_INT_REGS
11417 popl_cfi %ebx
11418 CFI_RESTORE ebx
11419 @@ -332,7 +474,15 @@ check_userspace:
11420 movb PT_CS(%esp), %al
11421 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11422 cmpl $USER_RPL, %eax
11423 +
11424 +#ifdef CONFIG_PAX_KERNEXEC
11425 + jae resume_userspace
11426 +
11427 + PAX_EXIT_KERNEL
11428 + jmp resume_kernel
11429 +#else
11430 jb resume_kernel # not returning to v8086 or userspace
11431 +#endif
11432
11433 ENTRY(resume_userspace)
11434 LOCKDEP_SYS_EXIT
11435 @@ -344,7 +494,7 @@ ENTRY(resume_userspace)
11436 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
11437 # int/exception return?
11438 jne work_pending
11439 - jmp restore_all
11440 + jmp restore_all_pax
11441 END(ret_from_exception)
11442
11443 #ifdef CONFIG_PREEMPT
11444 @@ -394,23 +544,34 @@ sysenter_past_esp:
11445 /*CFI_REL_OFFSET cs, 0*/
11446 /*
11447 * Push current_thread_info()->sysenter_return to the stack.
11448 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
11449 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
11450 */
11451 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
11452 + pushl_cfi $0
11453 CFI_REL_OFFSET eip, 0
11454
11455 pushl_cfi %eax
11456 SAVE_ALL
11457 + GET_THREAD_INFO(%ebp)
11458 + movl TI_sysenter_return(%ebp),%ebp
11459 + movl %ebp,PT_EIP(%esp)
11460 ENABLE_INTERRUPTS(CLBR_NONE)
11461
11462 /*
11463 * Load the potential sixth argument from user stack.
11464 * Careful about security.
11465 */
11466 + movl PT_OLDESP(%esp),%ebp
11467 +
11468 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11469 + mov PT_OLDSS(%esp),%ds
11470 +1: movl %ds:(%ebp),%ebp
11471 + push %ss
11472 + pop %ds
11473 +#else
11474 cmpl $__PAGE_OFFSET-3,%ebp
11475 jae syscall_fault
11476 1: movl (%ebp),%ebp
11477 +#endif
11478 +
11479 movl %ebp,PT_EBP(%esp)
11480 .section __ex_table,"a"
11481 .align 4
11482 @@ -433,12 +594,23 @@ sysenter_do_call:
11483 testl $_TIF_ALLWORK_MASK, %ecx
11484 jne sysexit_audit
11485 sysenter_exit:
11486 +
11487 +#ifdef CONFIG_PAX_RANDKSTACK
11488 + pushl_cfi %eax
11489 + call pax_randomize_kstack
11490 + popl_cfi %eax
11491 +#endif
11492 +
11493 + pax_erase_kstack
11494 +
11495 /* if something modifies registers it must also disable sysexit */
11496 movl PT_EIP(%esp), %edx
11497 movl PT_OLDESP(%esp), %ecx
11498 xorl %ebp,%ebp
11499 TRACE_IRQS_ON
11500 1: mov PT_FS(%esp), %fs
11501 +2: mov PT_DS(%esp), %ds
11502 +3: mov PT_ES(%esp), %es
11503 PTGS_TO_GS
11504 ENABLE_INTERRUPTS_SYSEXIT
11505
11506 @@ -455,6 +627,9 @@ sysenter_audit:
11507 movl %eax,%edx /* 2nd arg: syscall number */
11508 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
11509 call audit_syscall_entry
11510 +
11511 + pax_erase_kstack
11512 +
11513 pushl_cfi %ebx
11514 movl PT_EAX(%esp),%eax /* reload syscall number */
11515 jmp sysenter_do_call
11516 @@ -481,11 +656,17 @@ sysexit_audit:
11517
11518 CFI_ENDPROC
11519 .pushsection .fixup,"ax"
11520 -2: movl $0,PT_FS(%esp)
11521 +4: movl $0,PT_FS(%esp)
11522 + jmp 1b
11523 +5: movl $0,PT_DS(%esp)
11524 + jmp 1b
11525 +6: movl $0,PT_ES(%esp)
11526 jmp 1b
11527 .section __ex_table,"a"
11528 .align 4
11529 - .long 1b,2b
11530 + .long 1b,4b
11531 + .long 2b,5b
11532 + .long 3b,6b
11533 .popsection
11534 PTGS_TO_GS_EX
11535 ENDPROC(ia32_sysenter_target)
11536 @@ -518,6 +699,14 @@ syscall_exit:
11537 testl $_TIF_ALLWORK_MASK, %ecx # current->work
11538 jne syscall_exit_work
11539
11540 +restore_all_pax:
11541 +
11542 +#ifdef CONFIG_PAX_RANDKSTACK
11543 + call pax_randomize_kstack
11544 +#endif
11545 +
11546 + pax_erase_kstack
11547 +
11548 restore_all:
11549 TRACE_IRQS_IRET
11550 restore_all_notrace:
11551 @@ -577,14 +766,34 @@ ldt_ss:
11552 * compensating for the offset by changing to the ESPFIX segment with
11553 * a base address that matches for the difference.
11554 */
11555 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
11556 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
11557 mov %esp, %edx /* load kernel esp */
11558 mov PT_OLDESP(%esp), %eax /* load userspace esp */
11559 mov %dx, %ax /* eax: new kernel esp */
11560 sub %eax, %edx /* offset (low word is 0) */
11561 +#ifdef CONFIG_SMP
11562 + movl PER_CPU_VAR(cpu_number), %ebx
11563 + shll $PAGE_SHIFT_asm, %ebx
11564 + addl $cpu_gdt_table, %ebx
11565 +#else
11566 + movl $cpu_gdt_table, %ebx
11567 +#endif
11568 shr $16, %edx
11569 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
11570 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
11571 +
11572 +#ifdef CONFIG_PAX_KERNEXEC
11573 + mov %cr0, %esi
11574 + btr $16, %esi
11575 + mov %esi, %cr0
11576 +#endif
11577 +
11578 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
11579 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
11580 +
11581 +#ifdef CONFIG_PAX_KERNEXEC
11582 + bts $16, %esi
11583 + mov %esi, %cr0
11584 +#endif
11585 +
11586 pushl_cfi $__ESPFIX_SS
11587 pushl_cfi %eax /* new kernel esp */
11588 /* Disable interrupts, but do not irqtrace this section: we
11589 @@ -613,29 +822,23 @@ work_resched:
11590 movl TI_flags(%ebp), %ecx
11591 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
11592 # than syscall tracing?
11593 - jz restore_all
11594 + jz restore_all_pax
11595 testb $_TIF_NEED_RESCHED, %cl
11596 jnz work_resched
11597
11598 work_notifysig: # deal with pending signals and
11599 # notify-resume requests
11600 + movl %esp, %eax
11601 #ifdef CONFIG_VM86
11602 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
11603 - movl %esp, %eax
11604 - jne work_notifysig_v86 # returning to kernel-space or
11605 + jz 1f # returning to kernel-space or
11606 # vm86-space
11607 - xorl %edx, %edx
11608 - call do_notify_resume
11609 - jmp resume_userspace_sig
11610
11611 - ALIGN
11612 -work_notifysig_v86:
11613 pushl_cfi %ecx # save ti_flags for do_notify_resume
11614 call save_v86_state # %eax contains pt_regs pointer
11615 popl_cfi %ecx
11616 movl %eax, %esp
11617 -#else
11618 - movl %esp, %eax
11619 +1:
11620 #endif
11621 xorl %edx, %edx
11622 call do_notify_resume
11623 @@ -648,6 +851,9 @@ syscall_trace_entry:
11624 movl $-ENOSYS,PT_EAX(%esp)
11625 movl %esp, %eax
11626 call syscall_trace_enter
11627 +
11628 + pax_erase_kstack
11629 +
11630 /* What it returned is what we'll actually use. */
11631 cmpl $(nr_syscalls), %eax
11632 jnae syscall_call
11633 @@ -670,6 +876,10 @@ END(syscall_exit_work)
11634
11635 RING0_INT_FRAME # can't unwind into user space anyway
11636 syscall_fault:
11637 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11638 + push %ss
11639 + pop %ds
11640 +#endif
11641 GET_THREAD_INFO(%ebp)
11642 movl $-EFAULT,PT_EAX(%esp)
11643 jmp resume_userspace
11644 @@ -752,6 +962,36 @@ ptregs_clone:
11645 CFI_ENDPROC
11646 ENDPROC(ptregs_clone)
11647
11648 + ALIGN;
11649 +ENTRY(kernel_execve)
11650 + CFI_STARTPROC
11651 + pushl_cfi %ebp
11652 + sub $PT_OLDSS+4,%esp
11653 + pushl_cfi %edi
11654 + pushl_cfi %ecx
11655 + pushl_cfi %eax
11656 + lea 3*4(%esp),%edi
11657 + mov $PT_OLDSS/4+1,%ecx
11658 + xorl %eax,%eax
11659 + rep stosl
11660 + popl_cfi %eax
11661 + popl_cfi %ecx
11662 + popl_cfi %edi
11663 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
11664 + pushl_cfi %esp
11665 + call sys_execve
11666 + add $4,%esp
11667 + CFI_ADJUST_CFA_OFFSET -4
11668 + GET_THREAD_INFO(%ebp)
11669 + test %eax,%eax
11670 + jz syscall_exit
11671 + add $PT_OLDSS+4,%esp
11672 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
11673 + popl_cfi %ebp
11674 + ret
11675 + CFI_ENDPROC
11676 +ENDPROC(kernel_execve)
11677 +
11678 .macro FIXUP_ESPFIX_STACK
11679 /*
11680 * Switch back for ESPFIX stack to the normal zerobased stack
11681 @@ -761,8 +1001,15 @@ ENDPROC(ptregs_clone)
11682 * normal stack and adjusts ESP with the matching offset.
11683 */
11684 /* fixup the stack */
11685 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
11686 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
11687 +#ifdef CONFIG_SMP
11688 + movl PER_CPU_VAR(cpu_number), %ebx
11689 + shll $PAGE_SHIFT_asm, %ebx
11690 + addl $cpu_gdt_table, %ebx
11691 +#else
11692 + movl $cpu_gdt_table, %ebx
11693 +#endif
11694 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
11695 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
11696 shl $16, %eax
11697 addl %esp, %eax /* the adjusted stack pointer */
11698 pushl_cfi $__KERNEL_DS
11699 @@ -1213,7 +1460,6 @@ return_to_handler:
11700 jmp *%ecx
11701 #endif
11702
11703 -.section .rodata,"a"
11704 #include "syscall_table_32.S"
11705
11706 syscall_table_size=(.-sys_call_table)
11707 @@ -1259,9 +1505,12 @@ error_code:
11708 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
11709 REG_TO_PTGS %ecx
11710 SET_KERNEL_GS %ecx
11711 - movl $(__USER_DS), %ecx
11712 + movl $(__KERNEL_DS), %ecx
11713 movl %ecx, %ds
11714 movl %ecx, %es
11715 +
11716 + pax_enter_kernel
11717 +
11718 TRACE_IRQS_OFF
11719 movl %esp,%eax # pt_regs pointer
11720 call *%edi
11721 @@ -1346,6 +1595,9 @@ nmi_stack_correct:
11722 xorl %edx,%edx # zero error code
11723 movl %esp,%eax # pt_regs pointer
11724 call do_nmi
11725 +
11726 + pax_exit_kernel
11727 +
11728 jmp restore_all_notrace
11729 CFI_ENDPROC
11730
11731 @@ -1382,6 +1634,9 @@ nmi_espfix_stack:
11732 FIXUP_ESPFIX_STACK # %eax == %esp
11733 xorl %edx,%edx # zero error code
11734 call do_nmi
11735 +
11736 + pax_exit_kernel
11737 +
11738 RESTORE_REGS
11739 lss 12+4(%esp), %esp # back to espfix stack
11740 CFI_ADJUST_CFA_OFFSET -24
11741 diff -urNp linux-3.0.3/arch/x86/kernel/entry_64.S linux-3.0.3/arch/x86/kernel/entry_64.S
11742 --- linux-3.0.3/arch/x86/kernel/entry_64.S 2011-07-21 22:17:23.000000000 -0400
11743 +++ linux-3.0.3/arch/x86/kernel/entry_64.S 2011-08-23 21:48:14.000000000 -0400
11744 @@ -53,6 +53,7 @@
11745 #include <asm/paravirt.h>
11746 #include <asm/ftrace.h>
11747 #include <asm/percpu.h>
11748 +#include <asm/pgtable.h>
11749
11750 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11751 #include <linux/elf-em.h>
11752 @@ -176,6 +177,259 @@ ENTRY(native_usergs_sysret64)
11753 ENDPROC(native_usergs_sysret64)
11754 #endif /* CONFIG_PARAVIRT */
11755
11756 + .macro ljmpq sel, off
11757 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
11758 + .byte 0x48; ljmp *1234f(%rip)
11759 + .pushsection .rodata
11760 + .align 16
11761 + 1234: .quad \off; .word \sel
11762 + .popsection
11763 +#else
11764 + pushq $\sel
11765 + pushq $\off
11766 + lretq
11767 +#endif
11768 + .endm
11769 +
11770 + .macro pax_enter_kernel
11771 +#ifdef CONFIG_PAX_KERNEXEC
11772 + call pax_enter_kernel
11773 +#endif
11774 + .endm
11775 +
11776 + .macro pax_exit_kernel
11777 +#ifdef CONFIG_PAX_KERNEXEC
11778 + call pax_exit_kernel
11779 +#endif
11780 + .endm
11781 +
11782 +#ifdef CONFIG_PAX_KERNEXEC
11783 +ENTRY(pax_enter_kernel)
11784 + pushq %rdi
11785 +
11786 +#ifdef CONFIG_PARAVIRT
11787 + PV_SAVE_REGS(CLBR_RDI)
11788 +#endif
11789 +
11790 + GET_CR0_INTO_RDI
11791 + bts $16,%rdi
11792 + jnc 1f
11793 + mov %cs,%edi
11794 + cmp $__KERNEL_CS,%edi
11795 + jz 3f
11796 + ljmpq __KERNEL_CS,3f
11797 +1: ljmpq __KERNEXEC_KERNEL_CS,2f
11798 +2: SET_RDI_INTO_CR0
11799 +3:
11800 +
11801 +#ifdef CONFIG_PARAVIRT
11802 + PV_RESTORE_REGS(CLBR_RDI)
11803 +#endif
11804 +
11805 + popq %rdi
11806 + retq
11807 +ENDPROC(pax_enter_kernel)
11808 +
11809 +ENTRY(pax_exit_kernel)
11810 + pushq %rdi
11811 +
11812 +#ifdef CONFIG_PARAVIRT
11813 + PV_SAVE_REGS(CLBR_RDI)
11814 +#endif
11815 +
11816 + mov %cs,%rdi
11817 + cmp $__KERNEXEC_KERNEL_CS,%edi
11818 + jnz 2f
11819 + GET_CR0_INTO_RDI
11820 + btr $16,%rdi
11821 + ljmpq __KERNEL_CS,1f
11822 +1: SET_RDI_INTO_CR0
11823 +2:
11824 +
11825 +#ifdef CONFIG_PARAVIRT
11826 + PV_RESTORE_REGS(CLBR_RDI);
11827 +#endif
11828 +
11829 + popq %rdi
11830 + retq
11831 +ENDPROC(pax_exit_kernel)
11832 +#endif
11833 +
11834 + .macro pax_enter_kernel_user
11835 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11836 + call pax_enter_kernel_user
11837 +#endif
11838 + .endm
11839 +
11840 + .macro pax_exit_kernel_user
11841 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11842 + call pax_exit_kernel_user
11843 +#endif
11844 +#ifdef CONFIG_PAX_RANDKSTACK
11845 + push %rax
11846 + call pax_randomize_kstack
11847 + pop %rax
11848 +#endif
11849 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11850 + call pax_erase_kstack
11851 +#endif
11852 + .endm
11853 +
11854 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11855 +ENTRY(pax_enter_kernel_user)
11856 + pushq %rdi
11857 + pushq %rbx
11858 +
11859 +#ifdef CONFIG_PARAVIRT
11860 + PV_SAVE_REGS(CLBR_RDI)
11861 +#endif
11862 +
11863 + GET_CR3_INTO_RDI
11864 + mov %rdi,%rbx
11865 + add $__START_KERNEL_map,%rbx
11866 + sub phys_base(%rip),%rbx
11867 +
11868 +#ifdef CONFIG_PARAVIRT
11869 + pushq %rdi
11870 + cmpl $0, pv_info+PARAVIRT_enabled
11871 + jz 1f
11872 + i = 0
11873 + .rept USER_PGD_PTRS
11874 + mov i*8(%rbx),%rsi
11875 + mov $0,%sil
11876 + lea i*8(%rbx),%rdi
11877 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
11878 + i = i + 1
11879 + .endr
11880 + jmp 2f
11881 +1:
11882 +#endif
11883 +
11884 + i = 0
11885 + .rept USER_PGD_PTRS
11886 + movb $0,i*8(%rbx)
11887 + i = i + 1
11888 + .endr
11889 +
11890 +#ifdef CONFIG_PARAVIRT
11891 +2: popq %rdi
11892 +#endif
11893 + SET_RDI_INTO_CR3
11894 +
11895 +#ifdef CONFIG_PAX_KERNEXEC
11896 + GET_CR0_INTO_RDI
11897 + bts $16,%rdi
11898 + SET_RDI_INTO_CR0
11899 +#endif
11900 +
11901 +#ifdef CONFIG_PARAVIRT
11902 + PV_RESTORE_REGS(CLBR_RDI)
11903 +#endif
11904 +
11905 + popq %rbx
11906 + popq %rdi
11907 + retq
11908 +ENDPROC(pax_enter_kernel_user)
11909 +
11910 +ENTRY(pax_exit_kernel_user)
11911 + push %rdi
11912 +
11913 +#ifdef CONFIG_PARAVIRT
11914 + pushq %rbx
11915 + PV_SAVE_REGS(CLBR_RDI)
11916 +#endif
11917 +
11918 +#ifdef CONFIG_PAX_KERNEXEC
11919 + GET_CR0_INTO_RDI
11920 + btr $16,%rdi
11921 + SET_RDI_INTO_CR0
11922 +#endif
11923 +
11924 + GET_CR3_INTO_RDI
11925 + add $__START_KERNEL_map,%rdi
11926 + sub phys_base(%rip),%rdi
11927 +
11928 +#ifdef CONFIG_PARAVIRT
11929 + cmpl $0, pv_info+PARAVIRT_enabled
11930 + jz 1f
11931 + mov %rdi,%rbx
11932 + i = 0
11933 + .rept USER_PGD_PTRS
11934 + mov i*8(%rbx),%rsi
11935 + mov $0x67,%sil
11936 + lea i*8(%rbx),%rdi
11937 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
11938 + i = i + 1
11939 + .endr
11940 + jmp 2f
11941 +1:
11942 +#endif
11943 +
11944 + i = 0
11945 + .rept USER_PGD_PTRS
11946 + movb $0x67,i*8(%rdi)
11947 + i = i + 1
11948 + .endr
11949 +
11950 +#ifdef CONFIG_PARAVIRT
11951 +2: PV_RESTORE_REGS(CLBR_RDI)
11952 + popq %rbx
11953 +#endif
11954 +
11955 + popq %rdi
11956 + retq
11957 +ENDPROC(pax_exit_kernel_user)
11958 +#endif
11959 +
11960 + .macro pax_erase_kstack
11961 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11962 + call pax_erase_kstack
11963 +#endif
11964 + .endm
11965 +
11966 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11967 +/*
11968 + * r10: thread_info
11969 + * rcx, rdx: can be clobbered
11970 + */
11971 +ENTRY(pax_erase_kstack)
11972 + pushq %rdi
11973 + pushq %rax
11974 +
11975 + GET_THREAD_INFO(%r10)
11976 + mov TI_lowest_stack(%r10), %rdi
11977 + mov $-0xBEEF, %rax
11978 + std
11979 +
11980 +1: mov %edi, %ecx
11981 + and $THREAD_SIZE_asm - 1, %ecx
11982 + shr $3, %ecx
11983 + repne scasq
11984 + jecxz 2f
11985 +
11986 + cmp $2*8, %ecx
11987 + jc 2f
11988 +
11989 + mov $2*8, %ecx
11990 + repe scasq
11991 + jecxz 2f
11992 + jne 1b
11993 +
11994 +2: cld
11995 + mov %esp, %ecx
11996 + sub %edi, %ecx
11997 + shr $3, %ecx
11998 + rep stosq
11999 +
12000 + mov TI_task_thread_sp0(%r10), %rdi
12001 + sub $256, %rdi
12002 + mov %rdi, TI_lowest_stack(%r10)
12003 +
12004 + popq %rax
12005 + popq %rdi
12006 + ret
12007 +ENDPROC(pax_erase_kstack)
12008 +#endif
12009
12010 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
12011 #ifdef CONFIG_TRACE_IRQFLAGS
12012 @@ -318,7 +572,7 @@ ENTRY(save_args)
12013 leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
12014 movq_cfi rbp, 8 /* push %rbp */
12015 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
12016 - testl $3, CS(%rdi)
12017 + testb $3, CS(%rdi)
12018 je 1f
12019 SWAPGS
12020 /*
12021 @@ -409,7 +663,7 @@ ENTRY(ret_from_fork)
12022
12023 RESTORE_REST
12024
12025 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12026 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12027 je int_ret_from_sys_call
12028
12029 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
12030 @@ -455,7 +709,7 @@ END(ret_from_fork)
12031 ENTRY(system_call)
12032 CFI_STARTPROC simple
12033 CFI_SIGNAL_FRAME
12034 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12035 + CFI_DEF_CFA rsp,0
12036 CFI_REGISTER rip,rcx
12037 /*CFI_REGISTER rflags,r11*/
12038 SWAPGS_UNSAFE_STACK
12039 @@ -468,12 +722,13 @@ ENTRY(system_call_after_swapgs)
12040
12041 movq %rsp,PER_CPU_VAR(old_rsp)
12042 movq PER_CPU_VAR(kernel_stack),%rsp
12043 + pax_enter_kernel_user
12044 /*
12045 * No need to follow this irqs off/on section - it's straight
12046 * and short:
12047 */
12048 ENABLE_INTERRUPTS(CLBR_NONE)
12049 - SAVE_ARGS 8,1
12050 + SAVE_ARGS 8*6,1
12051 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12052 movq %rcx,RIP-ARGOFFSET(%rsp)
12053 CFI_REL_OFFSET rip,RIP-ARGOFFSET
12054 @@ -502,6 +757,7 @@ sysret_check:
12055 andl %edi,%edx
12056 jnz sysret_careful
12057 CFI_REMEMBER_STATE
12058 + pax_exit_kernel_user
12059 /*
12060 * sysretq will re-enable interrupts:
12061 */
12062 @@ -560,6 +816,9 @@ auditsys:
12063 movq %rax,%rsi /* 2nd arg: syscall number */
12064 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
12065 call audit_syscall_entry
12066 +
12067 + pax_erase_kstack
12068 +
12069 LOAD_ARGS 0 /* reload call-clobbered registers */
12070 jmp system_call_fastpath
12071
12072 @@ -590,6 +849,9 @@ tracesys:
12073 FIXUP_TOP_OF_STACK %rdi
12074 movq %rsp,%rdi
12075 call syscall_trace_enter
12076 +
12077 + pax_erase_kstack
12078 +
12079 /*
12080 * Reload arg registers from stack in case ptrace changed them.
12081 * We don't reload %rax because syscall_trace_enter() returned
12082 @@ -611,7 +873,7 @@ tracesys:
12083 GLOBAL(int_ret_from_sys_call)
12084 DISABLE_INTERRUPTS(CLBR_NONE)
12085 TRACE_IRQS_OFF
12086 - testl $3,CS-ARGOFFSET(%rsp)
12087 + testb $3,CS-ARGOFFSET(%rsp)
12088 je retint_restore_args
12089 movl $_TIF_ALLWORK_MASK,%edi
12090 /* edi: mask to check */
12091 @@ -793,6 +1055,16 @@ END(interrupt)
12092 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
12093 call save_args
12094 PARTIAL_FRAME 0
12095 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12096 + testb $3, CS(%rdi)
12097 + jnz 1f
12098 + pax_enter_kernel
12099 + jmp 2f
12100 +1: pax_enter_kernel_user
12101 +2:
12102 +#else
12103 + pax_enter_kernel
12104 +#endif
12105 call \func
12106 .endm
12107
12108 @@ -825,7 +1097,7 @@ ret_from_intr:
12109 CFI_ADJUST_CFA_OFFSET -8
12110 exit_intr:
12111 GET_THREAD_INFO(%rcx)
12112 - testl $3,CS-ARGOFFSET(%rsp)
12113 + testb $3,CS-ARGOFFSET(%rsp)
12114 je retint_kernel
12115
12116 /* Interrupt came from user space */
12117 @@ -847,12 +1119,14 @@ retint_swapgs: /* return to user-space
12118 * The iretq could re-enable interrupts:
12119 */
12120 DISABLE_INTERRUPTS(CLBR_ANY)
12121 + pax_exit_kernel_user
12122 TRACE_IRQS_IRETQ
12123 SWAPGS
12124 jmp restore_args
12125
12126 retint_restore_args: /* return to kernel space */
12127 DISABLE_INTERRUPTS(CLBR_ANY)
12128 + pax_exit_kernel
12129 /*
12130 * The iretq could re-enable interrupts:
12131 */
12132 @@ -1027,6 +1301,16 @@ ENTRY(\sym)
12133 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12134 call error_entry
12135 DEFAULT_FRAME 0
12136 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12137 + testb $3, CS(%rsp)
12138 + jnz 1f
12139 + pax_enter_kernel
12140 + jmp 2f
12141 +1: pax_enter_kernel_user
12142 +2:
12143 +#else
12144 + pax_enter_kernel
12145 +#endif
12146 movq %rsp,%rdi /* pt_regs pointer */
12147 xorl %esi,%esi /* no error code */
12148 call \do_sym
12149 @@ -1044,6 +1328,16 @@ ENTRY(\sym)
12150 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12151 call save_paranoid
12152 TRACE_IRQS_OFF
12153 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12154 + testb $3, CS(%rsp)
12155 + jnz 1f
12156 + pax_enter_kernel
12157 + jmp 2f
12158 +1: pax_enter_kernel_user
12159 +2:
12160 +#else
12161 + pax_enter_kernel
12162 +#endif
12163 movq %rsp,%rdi /* pt_regs pointer */
12164 xorl %esi,%esi /* no error code */
12165 call \do_sym
12166 @@ -1052,7 +1346,7 @@ ENTRY(\sym)
12167 END(\sym)
12168 .endm
12169
12170 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
12171 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
12172 .macro paranoidzeroentry_ist sym do_sym ist
12173 ENTRY(\sym)
12174 INTR_FRAME
12175 @@ -1062,8 +1356,24 @@ ENTRY(\sym)
12176 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12177 call save_paranoid
12178 TRACE_IRQS_OFF
12179 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12180 + testb $3, CS(%rsp)
12181 + jnz 1f
12182 + pax_enter_kernel
12183 + jmp 2f
12184 +1: pax_enter_kernel_user
12185 +2:
12186 +#else
12187 + pax_enter_kernel
12188 +#endif
12189 movq %rsp,%rdi /* pt_regs pointer */
12190 xorl %esi,%esi /* no error code */
12191 +#ifdef CONFIG_SMP
12192 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
12193 + lea init_tss(%r12), %r12
12194 +#else
12195 + lea init_tss(%rip), %r12
12196 +#endif
12197 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12198 call \do_sym
12199 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12200 @@ -1080,6 +1390,16 @@ ENTRY(\sym)
12201 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12202 call error_entry
12203 DEFAULT_FRAME 0
12204 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12205 + testb $3, CS(%rsp)
12206 + jnz 1f
12207 + pax_enter_kernel
12208 + jmp 2f
12209 +1: pax_enter_kernel_user
12210 +2:
12211 +#else
12212 + pax_enter_kernel
12213 +#endif
12214 movq %rsp,%rdi /* pt_regs pointer */
12215 movq ORIG_RAX(%rsp),%rsi /* get error code */
12216 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12217 @@ -1099,6 +1419,16 @@ ENTRY(\sym)
12218 call save_paranoid
12219 DEFAULT_FRAME 0
12220 TRACE_IRQS_OFF
12221 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12222 + testb $3, CS(%rsp)
12223 + jnz 1f
12224 + pax_enter_kernel
12225 + jmp 2f
12226 +1: pax_enter_kernel_user
12227 +2:
12228 +#else
12229 + pax_enter_kernel
12230 +#endif
12231 movq %rsp,%rdi /* pt_regs pointer */
12232 movq ORIG_RAX(%rsp),%rsi /* get error code */
12233 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12234 @@ -1361,14 +1691,27 @@ ENTRY(paranoid_exit)
12235 TRACE_IRQS_OFF
12236 testl %ebx,%ebx /* swapgs needed? */
12237 jnz paranoid_restore
12238 - testl $3,CS(%rsp)
12239 + testb $3,CS(%rsp)
12240 jnz paranoid_userspace
12241 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12242 + pax_exit_kernel
12243 + TRACE_IRQS_IRETQ 0
12244 + SWAPGS_UNSAFE_STACK
12245 + RESTORE_ALL 8
12246 + jmp irq_return
12247 +#endif
12248 paranoid_swapgs:
12249 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12250 + pax_exit_kernel_user
12251 +#else
12252 + pax_exit_kernel
12253 +#endif
12254 TRACE_IRQS_IRETQ 0
12255 SWAPGS_UNSAFE_STACK
12256 RESTORE_ALL 8
12257 jmp irq_return
12258 paranoid_restore:
12259 + pax_exit_kernel
12260 TRACE_IRQS_IRETQ 0
12261 RESTORE_ALL 8
12262 jmp irq_return
12263 @@ -1426,7 +1769,7 @@ ENTRY(error_entry)
12264 movq_cfi r14, R14+8
12265 movq_cfi r15, R15+8
12266 xorl %ebx,%ebx
12267 - testl $3,CS+8(%rsp)
12268 + testb $3,CS+8(%rsp)
12269 je error_kernelspace
12270 error_swapgs:
12271 SWAPGS
12272 @@ -1490,6 +1833,16 @@ ENTRY(nmi)
12273 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12274 call save_paranoid
12275 DEFAULT_FRAME 0
12276 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12277 + testb $3, CS(%rsp)
12278 + jnz 1f
12279 + pax_enter_kernel
12280 + jmp 2f
12281 +1: pax_enter_kernel_user
12282 +2:
12283 +#else
12284 + pax_enter_kernel
12285 +#endif
12286 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
12287 movq %rsp,%rdi
12288 movq $-1,%rsi
12289 @@ -1500,11 +1853,25 @@ ENTRY(nmi)
12290 DISABLE_INTERRUPTS(CLBR_NONE)
12291 testl %ebx,%ebx /* swapgs needed? */
12292 jnz nmi_restore
12293 - testl $3,CS(%rsp)
12294 + testb $3,CS(%rsp)
12295 jnz nmi_userspace
12296 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12297 + pax_exit_kernel
12298 + SWAPGS_UNSAFE_STACK
12299 + RESTORE_ALL 8
12300 + jmp irq_return
12301 +#endif
12302 nmi_swapgs:
12303 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12304 + pax_exit_kernel_user
12305 +#else
12306 + pax_exit_kernel
12307 +#endif
12308 SWAPGS_UNSAFE_STACK
12309 + RESTORE_ALL 8
12310 + jmp irq_return
12311 nmi_restore:
12312 + pax_exit_kernel
12313 RESTORE_ALL 8
12314 jmp irq_return
12315 nmi_userspace:
12316 diff -urNp linux-3.0.3/arch/x86/kernel/ftrace.c linux-3.0.3/arch/x86/kernel/ftrace.c
12317 --- linux-3.0.3/arch/x86/kernel/ftrace.c 2011-07-21 22:17:23.000000000 -0400
12318 +++ linux-3.0.3/arch/x86/kernel/ftrace.c 2011-08-23 21:47:55.000000000 -0400
12319 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
12320 static const void *mod_code_newcode; /* holds the text to write to the IP */
12321
12322 static unsigned nmi_wait_count;
12323 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
12324 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
12325
12326 int ftrace_arch_read_dyn_info(char *buf, int size)
12327 {
12328 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
12329
12330 r = snprintf(buf, size, "%u %u",
12331 nmi_wait_count,
12332 - atomic_read(&nmi_update_count));
12333 + atomic_read_unchecked(&nmi_update_count));
12334 return r;
12335 }
12336
12337 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
12338
12339 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
12340 smp_rmb();
12341 + pax_open_kernel();
12342 ftrace_mod_code();
12343 - atomic_inc(&nmi_update_count);
12344 + pax_close_kernel();
12345 + atomic_inc_unchecked(&nmi_update_count);
12346 }
12347 /* Must have previous changes seen before executions */
12348 smp_mb();
12349 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
12350 {
12351 unsigned char replaced[MCOUNT_INSN_SIZE];
12352
12353 + ip = ktla_ktva(ip);
12354 +
12355 /*
12356 * Note: Due to modules and __init, code can
12357 * disappear and change, we need to protect against faulting
12358 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
12359 unsigned char old[MCOUNT_INSN_SIZE], *new;
12360 int ret;
12361
12362 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
12363 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
12364 new = ftrace_call_replace(ip, (unsigned long)func);
12365 ret = ftrace_modify_code(ip, old, new);
12366
12367 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
12368 {
12369 unsigned char code[MCOUNT_INSN_SIZE];
12370
12371 + ip = ktla_ktva(ip);
12372 +
12373 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
12374 return -EFAULT;
12375
12376 diff -urNp linux-3.0.3/arch/x86/kernel/head32.c linux-3.0.3/arch/x86/kernel/head32.c
12377 --- linux-3.0.3/arch/x86/kernel/head32.c 2011-07-21 22:17:23.000000000 -0400
12378 +++ linux-3.0.3/arch/x86/kernel/head32.c 2011-08-23 21:47:55.000000000 -0400
12379 @@ -19,6 +19,7 @@
12380 #include <asm/io_apic.h>
12381 #include <asm/bios_ebda.h>
12382 #include <asm/tlbflush.h>
12383 +#include <asm/boot.h>
12384
12385 static void __init i386_default_early_setup(void)
12386 {
12387 @@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
12388 {
12389 memblock_init();
12390
12391 - memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12392 + memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12393
12394 #ifdef CONFIG_BLK_DEV_INITRD
12395 /* Reserve INITRD */
12396 diff -urNp linux-3.0.3/arch/x86/kernel/head_32.S linux-3.0.3/arch/x86/kernel/head_32.S
12397 --- linux-3.0.3/arch/x86/kernel/head_32.S 2011-07-21 22:17:23.000000000 -0400
12398 +++ linux-3.0.3/arch/x86/kernel/head_32.S 2011-08-23 21:47:55.000000000 -0400
12399 @@ -25,6 +25,12 @@
12400 /* Physical address */
12401 #define pa(X) ((X) - __PAGE_OFFSET)
12402
12403 +#ifdef CONFIG_PAX_KERNEXEC
12404 +#define ta(X) (X)
12405 +#else
12406 +#define ta(X) ((X) - __PAGE_OFFSET)
12407 +#endif
12408 +
12409 /*
12410 * References to members of the new_cpu_data structure.
12411 */
12412 @@ -54,11 +60,7 @@
12413 * and small than max_low_pfn, otherwise will waste some page table entries
12414 */
12415
12416 -#if PTRS_PER_PMD > 1
12417 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
12418 -#else
12419 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
12420 -#endif
12421 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
12422
12423 /* Number of possible pages in the lowmem region */
12424 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
12425 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
12426 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12427
12428 /*
12429 + * Real beginning of normal "text" segment
12430 + */
12431 +ENTRY(stext)
12432 +ENTRY(_stext)
12433 +
12434 +/*
12435 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
12436 * %esi points to the real-mode code as a 32-bit pointer.
12437 * CS and DS must be 4 GB flat segments, but we don't depend on
12438 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12439 * can.
12440 */
12441 __HEAD
12442 +
12443 +#ifdef CONFIG_PAX_KERNEXEC
12444 + jmp startup_32
12445 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
12446 +.fill PAGE_SIZE-5,1,0xcc
12447 +#endif
12448 +
12449 ENTRY(startup_32)
12450 movl pa(stack_start),%ecx
12451
12452 @@ -105,6 +120,57 @@ ENTRY(startup_32)
12453 2:
12454 leal -__PAGE_OFFSET(%ecx),%esp
12455
12456 +#ifdef CONFIG_SMP
12457 + movl $pa(cpu_gdt_table),%edi
12458 + movl $__per_cpu_load,%eax
12459 + movw %ax,__KERNEL_PERCPU + 2(%edi)
12460 + rorl $16,%eax
12461 + movb %al,__KERNEL_PERCPU + 4(%edi)
12462 + movb %ah,__KERNEL_PERCPU + 7(%edi)
12463 + movl $__per_cpu_end - 1,%eax
12464 + subl $__per_cpu_start,%eax
12465 + movw %ax,__KERNEL_PERCPU + 0(%edi)
12466 +#endif
12467 +
12468 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12469 + movl $NR_CPUS,%ecx
12470 + movl $pa(cpu_gdt_table),%edi
12471 +1:
12472 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
12473 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
12474 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
12475 + addl $PAGE_SIZE_asm,%edi
12476 + loop 1b
12477 +#endif
12478 +
12479 +#ifdef CONFIG_PAX_KERNEXEC
12480 + movl $pa(boot_gdt),%edi
12481 + movl $__LOAD_PHYSICAL_ADDR,%eax
12482 + movw %ax,__BOOT_CS + 2(%edi)
12483 + rorl $16,%eax
12484 + movb %al,__BOOT_CS + 4(%edi)
12485 + movb %ah,__BOOT_CS + 7(%edi)
12486 + rorl $16,%eax
12487 +
12488 + ljmp $(__BOOT_CS),$1f
12489 +1:
12490 +
12491 + movl $NR_CPUS,%ecx
12492 + movl $pa(cpu_gdt_table),%edi
12493 + addl $__PAGE_OFFSET,%eax
12494 +1:
12495 + movw %ax,__KERNEL_CS + 2(%edi)
12496 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
12497 + rorl $16,%eax
12498 + movb %al,__KERNEL_CS + 4(%edi)
12499 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
12500 + movb %ah,__KERNEL_CS + 7(%edi)
12501 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
12502 + rorl $16,%eax
12503 + addl $PAGE_SIZE_asm,%edi
12504 + loop 1b
12505 +#endif
12506 +
12507 /*
12508 * Clear BSS first so that there are no surprises...
12509 */
12510 @@ -195,8 +261,11 @@ ENTRY(startup_32)
12511 movl %eax, pa(max_pfn_mapped)
12512
12513 /* Do early initialization of the fixmap area */
12514 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12515 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
12516 +#ifdef CONFIG_COMPAT_VDSO
12517 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
12518 +#else
12519 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
12520 +#endif
12521 #else /* Not PAE */
12522
12523 page_pde_offset = (__PAGE_OFFSET >> 20);
12524 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12525 movl %eax, pa(max_pfn_mapped)
12526
12527 /* Do early initialization of the fixmap area */
12528 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12529 - movl %eax,pa(initial_page_table+0xffc)
12530 +#ifdef CONFIG_COMPAT_VDSO
12531 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
12532 +#else
12533 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
12534 +#endif
12535 #endif
12536
12537 #ifdef CONFIG_PARAVIRT
12538 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12539 cmpl $num_subarch_entries, %eax
12540 jae bad_subarch
12541
12542 - movl pa(subarch_entries)(,%eax,4), %eax
12543 - subl $__PAGE_OFFSET, %eax
12544 - jmp *%eax
12545 + jmp *pa(subarch_entries)(,%eax,4)
12546
12547 bad_subarch:
12548 WEAK(lguest_entry)
12549 @@ -255,10 +325,10 @@ WEAK(xen_entry)
12550 __INITDATA
12551
12552 subarch_entries:
12553 - .long default_entry /* normal x86/PC */
12554 - .long lguest_entry /* lguest hypervisor */
12555 - .long xen_entry /* Xen hypervisor */
12556 - .long default_entry /* Moorestown MID */
12557 + .long ta(default_entry) /* normal x86/PC */
12558 + .long ta(lguest_entry) /* lguest hypervisor */
12559 + .long ta(xen_entry) /* Xen hypervisor */
12560 + .long ta(default_entry) /* Moorestown MID */
12561 num_subarch_entries = (. - subarch_entries) / 4
12562 .previous
12563 #else
12564 @@ -312,6 +382,7 @@ default_entry:
12565 orl %edx,%eax
12566 movl %eax,%cr4
12567
12568 +#ifdef CONFIG_X86_PAE
12569 testb $X86_CR4_PAE, %al # check if PAE is enabled
12570 jz 6f
12571
12572 @@ -340,6 +411,9 @@ default_entry:
12573 /* Make changes effective */
12574 wrmsr
12575
12576 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
12577 +#endif
12578 +
12579 6:
12580
12581 /*
12582 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
12583 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
12584 movl %eax,%ss # after changing gdt.
12585
12586 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
12587 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
12588 movl %eax,%ds
12589 movl %eax,%es
12590
12591 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
12592 */
12593 cmpb $0,ready
12594 jne 1f
12595 - movl $gdt_page,%eax
12596 + movl $cpu_gdt_table,%eax
12597 movl $stack_canary,%ecx
12598 +#ifdef CONFIG_SMP
12599 + addl $__per_cpu_load,%ecx
12600 +#endif
12601 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
12602 shrl $16, %ecx
12603 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
12604 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
12605 1:
12606 -#endif
12607 movl $(__KERNEL_STACK_CANARY),%eax
12608 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12609 + movl $(__USER_DS),%eax
12610 +#else
12611 + xorl %eax,%eax
12612 +#endif
12613 movl %eax,%gs
12614
12615 xorl %eax,%eax # Clear LDT
12616 @@ -558,22 +639,22 @@ early_page_fault:
12617 jmp early_fault
12618
12619 early_fault:
12620 - cld
12621 #ifdef CONFIG_PRINTK
12622 + cmpl $1,%ss:early_recursion_flag
12623 + je hlt_loop
12624 + incl %ss:early_recursion_flag
12625 + cld
12626 pusha
12627 movl $(__KERNEL_DS),%eax
12628 movl %eax,%ds
12629 movl %eax,%es
12630 - cmpl $2,early_recursion_flag
12631 - je hlt_loop
12632 - incl early_recursion_flag
12633 movl %cr2,%eax
12634 pushl %eax
12635 pushl %edx /* trapno */
12636 pushl $fault_msg
12637 call printk
12638 +; call dump_stack
12639 #endif
12640 - call dump_stack
12641 hlt_loop:
12642 hlt
12643 jmp hlt_loop
12644 @@ -581,8 +662,11 @@ hlt_loop:
12645 /* This is the default interrupt "handler" :-) */
12646 ALIGN
12647 ignore_int:
12648 - cld
12649 #ifdef CONFIG_PRINTK
12650 + cmpl $2,%ss:early_recursion_flag
12651 + je hlt_loop
12652 + incl %ss:early_recursion_flag
12653 + cld
12654 pushl %eax
12655 pushl %ecx
12656 pushl %edx
12657 @@ -591,9 +675,6 @@ ignore_int:
12658 movl $(__KERNEL_DS),%eax
12659 movl %eax,%ds
12660 movl %eax,%es
12661 - cmpl $2,early_recursion_flag
12662 - je hlt_loop
12663 - incl early_recursion_flag
12664 pushl 16(%esp)
12665 pushl 24(%esp)
12666 pushl 32(%esp)
12667 @@ -622,29 +703,43 @@ ENTRY(initial_code)
12668 /*
12669 * BSS section
12670 */
12671 -__PAGE_ALIGNED_BSS
12672 - .align PAGE_SIZE
12673 #ifdef CONFIG_X86_PAE
12674 +.section .initial_pg_pmd,"a",@progbits
12675 initial_pg_pmd:
12676 .fill 1024*KPMDS,4,0
12677 #else
12678 +.section .initial_page_table,"a",@progbits
12679 ENTRY(initial_page_table)
12680 .fill 1024,4,0
12681 #endif
12682 +.section .initial_pg_fixmap,"a",@progbits
12683 initial_pg_fixmap:
12684 .fill 1024,4,0
12685 +.section .empty_zero_page,"a",@progbits
12686 ENTRY(empty_zero_page)
12687 .fill 4096,1,0
12688 +.section .swapper_pg_dir,"a",@progbits
12689 ENTRY(swapper_pg_dir)
12690 +#ifdef CONFIG_X86_PAE
12691 + .fill 4,8,0
12692 +#else
12693 .fill 1024,4,0
12694 +#endif
12695 +
12696 +/*
12697 + * The IDT has to be page-aligned to simplify the Pentium
12698 + * F0 0F bug workaround.. We have a special link segment
12699 + * for this.
12700 + */
12701 +.section .idt,"a",@progbits
12702 +ENTRY(idt_table)
12703 + .fill 256,8,0
12704
12705 /*
12706 * This starts the data section.
12707 */
12708 #ifdef CONFIG_X86_PAE
12709 -__PAGE_ALIGNED_DATA
12710 - /* Page-aligned for the benefit of paravirt? */
12711 - .align PAGE_SIZE
12712 +.section .initial_page_table,"a",@progbits
12713 ENTRY(initial_page_table)
12714 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
12715 # if KPMDS == 3
12716 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
12717 # error "Kernel PMDs should be 1, 2 or 3"
12718 # endif
12719 .align PAGE_SIZE /* needs to be page-sized too */
12720 +
12721 +#ifdef CONFIG_PAX_PER_CPU_PGD
12722 +ENTRY(cpu_pgd)
12723 + .rept NR_CPUS
12724 + .fill 4,8,0
12725 + .endr
12726 +#endif
12727 +
12728 #endif
12729
12730 .data
12731 .balign 4
12732 ENTRY(stack_start)
12733 - .long init_thread_union+THREAD_SIZE
12734 + .long init_thread_union+THREAD_SIZE-8
12735 +
12736 +ready: .byte 0
12737
12738 +.section .rodata,"a",@progbits
12739 early_recursion_flag:
12740 .long 0
12741
12742 -ready: .byte 0
12743 -
12744 int_msg:
12745 .asciz "Unknown interrupt or fault at: %p %p %p\n"
12746
12747 @@ -707,7 +811,7 @@ fault_msg:
12748 .word 0 # 32 bit align gdt_desc.address
12749 boot_gdt_descr:
12750 .word __BOOT_DS+7
12751 - .long boot_gdt - __PAGE_OFFSET
12752 + .long pa(boot_gdt)
12753
12754 .word 0 # 32-bit align idt_desc.address
12755 idt_descr:
12756 @@ -718,7 +822,7 @@ idt_descr:
12757 .word 0 # 32 bit align gdt_desc.address
12758 ENTRY(early_gdt_descr)
12759 .word GDT_ENTRIES*8-1
12760 - .long gdt_page /* Overwritten for secondary CPUs */
12761 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
12762
12763 /*
12764 * The boot_gdt must mirror the equivalent in setup.S and is
12765 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
12766 .align L1_CACHE_BYTES
12767 ENTRY(boot_gdt)
12768 .fill GDT_ENTRY_BOOT_CS,8,0
12769 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
12770 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
12771 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
12772 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
12773 +
12774 + .align PAGE_SIZE_asm
12775 +ENTRY(cpu_gdt_table)
12776 + .rept NR_CPUS
12777 + .quad 0x0000000000000000 /* NULL descriptor */
12778 + .quad 0x0000000000000000 /* 0x0b reserved */
12779 + .quad 0x0000000000000000 /* 0x13 reserved */
12780 + .quad 0x0000000000000000 /* 0x1b reserved */
12781 +
12782 +#ifdef CONFIG_PAX_KERNEXEC
12783 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
12784 +#else
12785 + .quad 0x0000000000000000 /* 0x20 unused */
12786 +#endif
12787 +
12788 + .quad 0x0000000000000000 /* 0x28 unused */
12789 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
12790 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
12791 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
12792 + .quad 0x0000000000000000 /* 0x4b reserved */
12793 + .quad 0x0000000000000000 /* 0x53 reserved */
12794 + .quad 0x0000000000000000 /* 0x5b reserved */
12795 +
12796 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
12797 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
12798 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
12799 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
12800 +
12801 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
12802 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
12803 +
12804 + /*
12805 + * Segments used for calling PnP BIOS have byte granularity.
12806 + * The code segments and data segments have fixed 64k limits,
12807 + * the transfer segment sizes are set at run time.
12808 + */
12809 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
12810 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
12811 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
12812 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
12813 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
12814 +
12815 + /*
12816 + * The APM segments have byte granularity and their bases
12817 + * are set at run time. All have 64k limits.
12818 + */
12819 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
12820 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
12821 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
12822 +
12823 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
12824 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
12825 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
12826 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
12827 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
12828 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
12829 +
12830 + /* Be sure this is zeroed to avoid false validations in Xen */
12831 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
12832 + .endr
12833 diff -urNp linux-3.0.3/arch/x86/kernel/head_64.S linux-3.0.3/arch/x86/kernel/head_64.S
12834 --- linux-3.0.3/arch/x86/kernel/head_64.S 2011-07-21 22:17:23.000000000 -0400
12835 +++ linux-3.0.3/arch/x86/kernel/head_64.S 2011-08-23 21:47:55.000000000 -0400
12836 @@ -19,6 +19,7 @@
12837 #include <asm/cache.h>
12838 #include <asm/processor-flags.h>
12839 #include <asm/percpu.h>
12840 +#include <asm/cpufeature.h>
12841
12842 #ifdef CONFIG_PARAVIRT
12843 #include <asm/asm-offsets.h>
12844 @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
12845 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
12846 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
12847 L3_START_KERNEL = pud_index(__START_KERNEL_map)
12848 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
12849 +L3_VMALLOC_START = pud_index(VMALLOC_START)
12850 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
12851 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
12852
12853 .text
12854 __HEAD
12855 @@ -85,35 +90,22 @@ startup_64:
12856 */
12857 addq %rbp, init_level4_pgt + 0(%rip)
12858 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
12859 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
12860 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
12861 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
12862
12863 addq %rbp, level3_ident_pgt + 0(%rip)
12864 +#ifndef CONFIG_XEN
12865 + addq %rbp, level3_ident_pgt + 8(%rip)
12866 +#endif
12867
12868 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
12869 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
12870 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
12871
12872 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12873 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
12874 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
12875
12876 - /* Add an Identity mapping if I am above 1G */
12877 - leaq _text(%rip), %rdi
12878 - andq $PMD_PAGE_MASK, %rdi
12879 -
12880 - movq %rdi, %rax
12881 - shrq $PUD_SHIFT, %rax
12882 - andq $(PTRS_PER_PUD - 1), %rax
12883 - jz ident_complete
12884 -
12885 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
12886 - leaq level3_ident_pgt(%rip), %rbx
12887 - movq %rdx, 0(%rbx, %rax, 8)
12888 -
12889 - movq %rdi, %rax
12890 - shrq $PMD_SHIFT, %rax
12891 - andq $(PTRS_PER_PMD - 1), %rax
12892 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
12893 - leaq level2_spare_pgt(%rip), %rbx
12894 - movq %rdx, 0(%rbx, %rax, 8)
12895 -ident_complete:
12896 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12897 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
12898
12899 /*
12900 * Fixup the kernel text+data virtual addresses. Note that
12901 @@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
12902 * after the boot processor executes this code.
12903 */
12904
12905 - /* Enable PAE mode and PGE */
12906 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
12907 + /* Enable PAE mode and PSE/PGE */
12908 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
12909 movq %rax, %cr4
12910
12911 /* Setup early boot stage 4 level pagetables. */
12912 @@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
12913 movl $MSR_EFER, %ecx
12914 rdmsr
12915 btsl $_EFER_SCE, %eax /* Enable System Call */
12916 - btl $20,%edi /* No Execute supported? */
12917 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
12918 jnc 1f
12919 btsl $_EFER_NX, %eax
12920 + leaq init_level4_pgt(%rip), %rdi
12921 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
12922 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
12923 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
12924 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
12925 1: wrmsr /* Make changes effective */
12926
12927 /* Setup cr0 */
12928 @@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
12929 bad_address:
12930 jmp bad_address
12931
12932 - .section ".init.text","ax"
12933 + __INIT
12934 #ifdef CONFIG_EARLY_PRINTK
12935 .globl early_idt_handlers
12936 early_idt_handlers:
12937 @@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
12938 #endif /* EARLY_PRINTK */
12939 1: hlt
12940 jmp 1b
12941 + .previous
12942
12943 #ifdef CONFIG_EARLY_PRINTK
12944 + __INITDATA
12945 early_recursion_flag:
12946 .long 0
12947 + .previous
12948
12949 + .section .rodata,"a",@progbits
12950 early_idt_msg:
12951 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
12952 early_idt_ripmsg:
12953 .asciz "RIP %s\n"
12954 -#endif /* CONFIG_EARLY_PRINTK */
12955 .previous
12956 +#endif /* CONFIG_EARLY_PRINTK */
12957
12958 + .section .rodata,"a",@progbits
12959 #define NEXT_PAGE(name) \
12960 .balign PAGE_SIZE; \
12961 ENTRY(name)
12962 @@ -338,7 +340,6 @@ ENTRY(name)
12963 i = i + 1 ; \
12964 .endr
12965
12966 - .data
12967 /*
12968 * This default setting generates an ident mapping at address 0x100000
12969 * and a mapping for the kernel that precisely maps virtual address
12970 @@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
12971 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12972 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
12973 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12974 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
12975 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
12976 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
12977 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
12978 .org init_level4_pgt + L4_START_KERNEL*8, 0
12979 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
12980 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
12981
12982 +#ifdef CONFIG_PAX_PER_CPU_PGD
12983 +NEXT_PAGE(cpu_pgd)
12984 + .rept NR_CPUS
12985 + .fill 512,8,0
12986 + .endr
12987 +#endif
12988 +
12989 NEXT_PAGE(level3_ident_pgt)
12990 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12991 +#ifdef CONFIG_XEN
12992 .fill 511,8,0
12993 +#else
12994 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
12995 + .fill 510,8,0
12996 +#endif
12997 +
12998 +NEXT_PAGE(level3_vmalloc_pgt)
12999 + .fill 512,8,0
13000 +
13001 +NEXT_PAGE(level3_vmemmap_pgt)
13002 + .fill L3_VMEMMAP_START,8,0
13003 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13004
13005 NEXT_PAGE(level3_kernel_pgt)
13006 .fill L3_START_KERNEL,8,0
13007 @@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
13008 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
13009 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13010
13011 +NEXT_PAGE(level2_vmemmap_pgt)
13012 + .fill 512,8,0
13013 +
13014 NEXT_PAGE(level2_fixmap_pgt)
13015 - .fill 506,8,0
13016 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13017 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
13018 - .fill 5,8,0
13019 + .fill 507,8,0
13020 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
13021 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
13022 + .fill 4,8,0
13023
13024 -NEXT_PAGE(level1_fixmap_pgt)
13025 +NEXT_PAGE(level1_vsyscall_pgt)
13026 .fill 512,8,0
13027
13028 -NEXT_PAGE(level2_ident_pgt)
13029 - /* Since I easily can, map the first 1G.
13030 + /* Since I easily can, map the first 2G.
13031 * Don't set NX because code runs from these pages.
13032 */
13033 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
13034 +NEXT_PAGE(level2_ident_pgt)
13035 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
13036
13037 NEXT_PAGE(level2_kernel_pgt)
13038 /*
13039 @@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
13040 * If you want to increase this then increase MODULES_VADDR
13041 * too.)
13042 */
13043 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
13044 - KERNEL_IMAGE_SIZE/PMD_SIZE)
13045 -
13046 -NEXT_PAGE(level2_spare_pgt)
13047 - .fill 512, 8, 0
13048 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
13049
13050 #undef PMDS
13051 #undef NEXT_PAGE
13052
13053 - .data
13054 + .align PAGE_SIZE
13055 +ENTRY(cpu_gdt_table)
13056 + .rept NR_CPUS
13057 + .quad 0x0000000000000000 /* NULL descriptor */
13058 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
13059 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
13060 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
13061 + .quad 0x00cffb000000ffff /* __USER32_CS */
13062 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
13063 + .quad 0x00affb000000ffff /* __USER_CS */
13064 +
13065 +#ifdef CONFIG_PAX_KERNEXEC
13066 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
13067 +#else
13068 + .quad 0x0 /* unused */
13069 +#endif
13070 +
13071 + .quad 0,0 /* TSS */
13072 + .quad 0,0 /* LDT */
13073 + .quad 0,0,0 /* three TLS descriptors */
13074 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
13075 + /* asm/segment.h:GDT_ENTRIES must match this */
13076 +
13077 + /* zero the remaining page */
13078 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
13079 + .endr
13080 +
13081 .align 16
13082 .globl early_gdt_descr
13083 early_gdt_descr:
13084 .word GDT_ENTRIES*8-1
13085 early_gdt_descr_base:
13086 - .quad INIT_PER_CPU_VAR(gdt_page)
13087 + .quad cpu_gdt_table
13088
13089 ENTRY(phys_base)
13090 /* This must match the first entry in level2_kernel_pgt */
13091 .quad 0x0000000000000000
13092
13093 #include "../../x86/xen/xen-head.S"
13094 -
13095 - .section .bss, "aw", @nobits
13096 +
13097 + .section .rodata,"a",@progbits
13098 .align L1_CACHE_BYTES
13099 ENTRY(idt_table)
13100 - .skip IDT_ENTRIES * 16
13101 + .fill 512,8,0
13102
13103 __PAGE_ALIGNED_BSS
13104 .align PAGE_SIZE
13105 diff -urNp linux-3.0.3/arch/x86/kernel/i386_ksyms_32.c linux-3.0.3/arch/x86/kernel/i386_ksyms_32.c
13106 --- linux-3.0.3/arch/x86/kernel/i386_ksyms_32.c 2011-07-21 22:17:23.000000000 -0400
13107 +++ linux-3.0.3/arch/x86/kernel/i386_ksyms_32.c 2011-08-23 21:47:55.000000000 -0400
13108 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
13109 EXPORT_SYMBOL(cmpxchg8b_emu);
13110 #endif
13111
13112 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
13113 +
13114 /* Networking helper routines. */
13115 EXPORT_SYMBOL(csum_partial_copy_generic);
13116 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
13117 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
13118
13119 EXPORT_SYMBOL(__get_user_1);
13120 EXPORT_SYMBOL(__get_user_2);
13121 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
13122
13123 EXPORT_SYMBOL(csum_partial);
13124 EXPORT_SYMBOL(empty_zero_page);
13125 +
13126 +#ifdef CONFIG_PAX_KERNEXEC
13127 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
13128 +#endif
13129 diff -urNp linux-3.0.3/arch/x86/kernel/i8259.c linux-3.0.3/arch/x86/kernel/i8259.c
13130 --- linux-3.0.3/arch/x86/kernel/i8259.c 2011-07-21 22:17:23.000000000 -0400
13131 +++ linux-3.0.3/arch/x86/kernel/i8259.c 2011-08-23 21:47:55.000000000 -0400
13132 @@ -210,7 +210,7 @@ spurious_8259A_irq:
13133 "spurious 8259A interrupt: IRQ%d.\n", irq);
13134 spurious_irq_mask |= irqmask;
13135 }
13136 - atomic_inc(&irq_err_count);
13137 + atomic_inc_unchecked(&irq_err_count);
13138 /*
13139 * Theoretically we do not have to handle this IRQ,
13140 * but in Linux this does not cause problems and is
13141 diff -urNp linux-3.0.3/arch/x86/kernel/init_task.c linux-3.0.3/arch/x86/kernel/init_task.c
13142 --- linux-3.0.3/arch/x86/kernel/init_task.c 2011-07-21 22:17:23.000000000 -0400
13143 +++ linux-3.0.3/arch/x86/kernel/init_task.c 2011-08-23 21:47:55.000000000 -0400
13144 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
13145 * way process stacks are handled. This is done by having a special
13146 * "init_task" linker map entry..
13147 */
13148 -union thread_union init_thread_union __init_task_data =
13149 - { INIT_THREAD_INFO(init_task) };
13150 +union thread_union init_thread_union __init_task_data;
13151
13152 /*
13153 * Initial task structure.
13154 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
13155 * section. Since TSS's are completely CPU-local, we want them
13156 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
13157 */
13158 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
13159 -
13160 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
13161 +EXPORT_SYMBOL(init_tss);
13162 diff -urNp linux-3.0.3/arch/x86/kernel/ioport.c linux-3.0.3/arch/x86/kernel/ioport.c
13163 --- linux-3.0.3/arch/x86/kernel/ioport.c 2011-07-21 22:17:23.000000000 -0400
13164 +++ linux-3.0.3/arch/x86/kernel/ioport.c 2011-08-23 21:48:14.000000000 -0400
13165 @@ -6,6 +6,7 @@
13166 #include <linux/sched.h>
13167 #include <linux/kernel.h>
13168 #include <linux/capability.h>
13169 +#include <linux/security.h>
13170 #include <linux/errno.h>
13171 #include <linux/types.h>
13172 #include <linux/ioport.h>
13173 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
13174
13175 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
13176 return -EINVAL;
13177 +#ifdef CONFIG_GRKERNSEC_IO
13178 + if (turn_on && grsec_disable_privio) {
13179 + gr_handle_ioperm();
13180 + return -EPERM;
13181 + }
13182 +#endif
13183 if (turn_on && !capable(CAP_SYS_RAWIO))
13184 return -EPERM;
13185
13186 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
13187 * because the ->io_bitmap_max value must match the bitmap
13188 * contents:
13189 */
13190 - tss = &per_cpu(init_tss, get_cpu());
13191 + tss = init_tss + get_cpu();
13192
13193 if (turn_on)
13194 bitmap_clear(t->io_bitmap_ptr, from, num);
13195 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
13196 return -EINVAL;
13197 /* Trying to gain more privileges? */
13198 if (level > old) {
13199 +#ifdef CONFIG_GRKERNSEC_IO
13200 + if (grsec_disable_privio) {
13201 + gr_handle_iopl();
13202 + return -EPERM;
13203 + }
13204 +#endif
13205 if (!capable(CAP_SYS_RAWIO))
13206 return -EPERM;
13207 }
13208 diff -urNp linux-3.0.3/arch/x86/kernel/irq_32.c linux-3.0.3/arch/x86/kernel/irq_32.c
13209 --- linux-3.0.3/arch/x86/kernel/irq_32.c 2011-07-21 22:17:23.000000000 -0400
13210 +++ linux-3.0.3/arch/x86/kernel/irq_32.c 2011-08-23 21:47:55.000000000 -0400
13211 @@ -36,7 +36,7 @@ static int check_stack_overflow(void)
13212 __asm__ __volatile__("andl %%esp,%0" :
13213 "=r" (sp) : "0" (THREAD_SIZE - 1));
13214
13215 - return sp < (sizeof(struct thread_info) + STACK_WARN);
13216 + return sp < STACK_WARN;
13217 }
13218
13219 static void print_stack_overflow(void)
13220 @@ -54,8 +54,8 @@ static inline void print_stack_overflow(
13221 * per-CPU IRQ handling contexts (thread information and stack)
13222 */
13223 union irq_ctx {
13224 - struct thread_info tinfo;
13225 - u32 stack[THREAD_SIZE/sizeof(u32)];
13226 + unsigned long previous_esp;
13227 + u32 stack[THREAD_SIZE/sizeof(u32)];
13228 } __attribute__((aligned(THREAD_SIZE)));
13229
13230 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
13231 @@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
13232 static inline int
13233 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
13234 {
13235 - union irq_ctx *curctx, *irqctx;
13236 + union irq_ctx *irqctx;
13237 u32 *isp, arg1, arg2;
13238
13239 - curctx = (union irq_ctx *) current_thread_info();
13240 irqctx = __this_cpu_read(hardirq_ctx);
13241
13242 /*
13243 @@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
13244 * handler) we can't do that and just have to keep using the
13245 * current stack (which is the irq stack already after all)
13246 */
13247 - if (unlikely(curctx == irqctx))
13248 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
13249 return 0;
13250
13251 /* build the stack frame on the IRQ stack */
13252 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13253 - irqctx->tinfo.task = curctx->tinfo.task;
13254 - irqctx->tinfo.previous_esp = current_stack_pointer;
13255 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13256 + irqctx->previous_esp = current_stack_pointer;
13257
13258 - /*
13259 - * Copy the softirq bits in preempt_count so that the
13260 - * softirq checks work in the hardirq context.
13261 - */
13262 - irqctx->tinfo.preempt_count =
13263 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
13264 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
13265 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13266 + __set_fs(MAKE_MM_SEG(0));
13267 +#endif
13268
13269 if (unlikely(overflow))
13270 call_on_stack(print_stack_overflow, isp);
13271 @@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
13272 : "0" (irq), "1" (desc), "2" (isp),
13273 "D" (desc->handle_irq)
13274 : "memory", "cc", "ecx");
13275 +
13276 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13277 + __set_fs(current_thread_info()->addr_limit);
13278 +#endif
13279 +
13280 return 1;
13281 }
13282
13283 @@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
13284 */
13285 void __cpuinit irq_ctx_init(int cpu)
13286 {
13287 - union irq_ctx *irqctx;
13288 -
13289 if (per_cpu(hardirq_ctx, cpu))
13290 return;
13291
13292 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13293 - THREAD_FLAGS,
13294 - THREAD_ORDER));
13295 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13296 - irqctx->tinfo.cpu = cpu;
13297 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
13298 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13299 -
13300 - per_cpu(hardirq_ctx, cpu) = irqctx;
13301 -
13302 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13303 - THREAD_FLAGS,
13304 - THREAD_ORDER));
13305 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13306 - irqctx->tinfo.cpu = cpu;
13307 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13308 -
13309 - per_cpu(softirq_ctx, cpu) = irqctx;
13310 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13311 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13312
13313 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
13314 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
13315 @@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
13316 asmlinkage void do_softirq(void)
13317 {
13318 unsigned long flags;
13319 - struct thread_info *curctx;
13320 union irq_ctx *irqctx;
13321 u32 *isp;
13322
13323 @@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
13324 local_irq_save(flags);
13325
13326 if (local_softirq_pending()) {
13327 - curctx = current_thread_info();
13328 irqctx = __this_cpu_read(softirq_ctx);
13329 - irqctx->tinfo.task = curctx->task;
13330 - irqctx->tinfo.previous_esp = current_stack_pointer;
13331 + irqctx->previous_esp = current_stack_pointer;
13332
13333 /* build the stack frame on the softirq stack */
13334 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13335 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13336 +
13337 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13338 + __set_fs(MAKE_MM_SEG(0));
13339 +#endif
13340
13341 call_on_stack(__do_softirq, isp);
13342 +
13343 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13344 + __set_fs(current_thread_info()->addr_limit);
13345 +#endif
13346 +
13347 /*
13348 * Shouldn't happen, we returned above if in_interrupt():
13349 */
13350 diff -urNp linux-3.0.3/arch/x86/kernel/irq.c linux-3.0.3/arch/x86/kernel/irq.c
13351 --- linux-3.0.3/arch/x86/kernel/irq.c 2011-07-21 22:17:23.000000000 -0400
13352 +++ linux-3.0.3/arch/x86/kernel/irq.c 2011-08-23 21:47:55.000000000 -0400
13353 @@ -17,7 +17,7 @@
13354 #include <asm/mce.h>
13355 #include <asm/hw_irq.h>
13356
13357 -atomic_t irq_err_count;
13358 +atomic_unchecked_t irq_err_count;
13359
13360 /* Function pointer for generic interrupt vector handling */
13361 void (*x86_platform_ipi_callback)(void) = NULL;
13362 @@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
13363 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
13364 seq_printf(p, " Machine check polls\n");
13365 #endif
13366 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
13367 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
13368 #if defined(CONFIG_X86_IO_APIC)
13369 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
13370 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
13371 #endif
13372 return 0;
13373 }
13374 @@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
13375
13376 u64 arch_irq_stat(void)
13377 {
13378 - u64 sum = atomic_read(&irq_err_count);
13379 + u64 sum = atomic_read_unchecked(&irq_err_count);
13380
13381 #ifdef CONFIG_X86_IO_APIC
13382 - sum += atomic_read(&irq_mis_count);
13383 + sum += atomic_read_unchecked(&irq_mis_count);
13384 #endif
13385 return sum;
13386 }
13387 diff -urNp linux-3.0.3/arch/x86/kernel/kgdb.c linux-3.0.3/arch/x86/kernel/kgdb.c
13388 --- linux-3.0.3/arch/x86/kernel/kgdb.c 2011-07-21 22:17:23.000000000 -0400
13389 +++ linux-3.0.3/arch/x86/kernel/kgdb.c 2011-08-23 21:47:55.000000000 -0400
13390 @@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
13391 #ifdef CONFIG_X86_32
13392 switch (regno) {
13393 case GDB_SS:
13394 - if (!user_mode_vm(regs))
13395 + if (!user_mode(regs))
13396 *(unsigned long *)mem = __KERNEL_DS;
13397 break;
13398 case GDB_SP:
13399 - if (!user_mode_vm(regs))
13400 + if (!user_mode(regs))
13401 *(unsigned long *)mem = kernel_stack_pointer(regs);
13402 break;
13403 case GDB_GS:
13404 @@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
13405 case 'k':
13406 /* clear the trace bit */
13407 linux_regs->flags &= ~X86_EFLAGS_TF;
13408 - atomic_set(&kgdb_cpu_doing_single_step, -1);
13409 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
13410
13411 /* set the trace bit if we're stepping */
13412 if (remcomInBuffer[0] == 's') {
13413 linux_regs->flags |= X86_EFLAGS_TF;
13414 - atomic_set(&kgdb_cpu_doing_single_step,
13415 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
13416 raw_smp_processor_id());
13417 }
13418
13419 @@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
13420 return NOTIFY_DONE;
13421
13422 case DIE_DEBUG:
13423 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
13424 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
13425 if (user_mode(regs))
13426 return single_step_cont(regs, args);
13427 break;
13428 diff -urNp linux-3.0.3/arch/x86/kernel/kprobes.c linux-3.0.3/arch/x86/kernel/kprobes.c
13429 --- linux-3.0.3/arch/x86/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
13430 +++ linux-3.0.3/arch/x86/kernel/kprobes.c 2011-08-23 21:47:55.000000000 -0400
13431 @@ -115,8 +115,11 @@ static void __kprobes __synthesize_relat
13432 } __attribute__((packed)) *insn;
13433
13434 insn = (struct __arch_relative_insn *)from;
13435 +
13436 + pax_open_kernel();
13437 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
13438 insn->op = op;
13439 + pax_close_kernel();
13440 }
13441
13442 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
13443 @@ -153,7 +156,7 @@ static int __kprobes can_boost(kprobe_op
13444 kprobe_opcode_t opcode;
13445 kprobe_opcode_t *orig_opcodes = opcodes;
13446
13447 - if (search_exception_tables((unsigned long)opcodes))
13448 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
13449 return 0; /* Page fault may occur on this address. */
13450
13451 retry:
13452 @@ -314,7 +317,9 @@ static int __kprobes __copy_instruction(
13453 }
13454 }
13455 insn_get_length(&insn);
13456 + pax_open_kernel();
13457 memcpy(dest, insn.kaddr, insn.length);
13458 + pax_close_kernel();
13459
13460 #ifdef CONFIG_X86_64
13461 if (insn_rip_relative(&insn)) {
13462 @@ -338,7 +343,9 @@ static int __kprobes __copy_instruction(
13463 (u8 *) dest;
13464 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
13465 disp = (u8 *) dest + insn_offset_displacement(&insn);
13466 + pax_open_kernel();
13467 *(s32 *) disp = (s32) newdisp;
13468 + pax_close_kernel();
13469 }
13470 #endif
13471 return insn.length;
13472 @@ -352,12 +359,12 @@ static void __kprobes arch_copy_kprobe(s
13473 */
13474 __copy_instruction(p->ainsn.insn, p->addr, 0);
13475
13476 - if (can_boost(p->addr))
13477 + if (can_boost(ktla_ktva(p->addr)))
13478 p->ainsn.boostable = 0;
13479 else
13480 p->ainsn.boostable = -1;
13481
13482 - p->opcode = *p->addr;
13483 + p->opcode = *(ktla_ktva(p->addr));
13484 }
13485
13486 int __kprobes arch_prepare_kprobe(struct kprobe *p)
13487 @@ -474,7 +481,7 @@ static void __kprobes setup_singlestep(s
13488 * nor set current_kprobe, because it doesn't use single
13489 * stepping.
13490 */
13491 - regs->ip = (unsigned long)p->ainsn.insn;
13492 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13493 preempt_enable_no_resched();
13494 return;
13495 }
13496 @@ -493,7 +500,7 @@ static void __kprobes setup_singlestep(s
13497 if (p->opcode == BREAKPOINT_INSTRUCTION)
13498 regs->ip = (unsigned long)p->addr;
13499 else
13500 - regs->ip = (unsigned long)p->ainsn.insn;
13501 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13502 }
13503
13504 /*
13505 @@ -572,7 +579,7 @@ static int __kprobes kprobe_handler(stru
13506 setup_singlestep(p, regs, kcb, 0);
13507 return 1;
13508 }
13509 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
13510 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
13511 /*
13512 * The breakpoint instruction was removed right
13513 * after we hit it. Another cpu has removed
13514 @@ -817,7 +824,7 @@ static void __kprobes resume_execution(s
13515 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
13516 {
13517 unsigned long *tos = stack_addr(regs);
13518 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
13519 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
13520 unsigned long orig_ip = (unsigned long)p->addr;
13521 kprobe_opcode_t *insn = p->ainsn.insn;
13522
13523 @@ -999,7 +1006,7 @@ int __kprobes kprobe_exceptions_notify(s
13524 struct die_args *args = data;
13525 int ret = NOTIFY_DONE;
13526
13527 - if (args->regs && user_mode_vm(args->regs))
13528 + if (args->regs && user_mode(args->regs))
13529 return ret;
13530
13531 switch (val) {
13532 @@ -1381,7 +1388,7 @@ int __kprobes arch_prepare_optimized_kpr
13533 * Verify if the address gap is in 2GB range, because this uses
13534 * a relative jump.
13535 */
13536 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
13537 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
13538 if (abs(rel) > 0x7fffffff)
13539 return -ERANGE;
13540
13541 @@ -1402,11 +1409,11 @@ int __kprobes arch_prepare_optimized_kpr
13542 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
13543
13544 /* Set probe function call */
13545 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
13546 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
13547
13548 /* Set returning jmp instruction at the tail of out-of-line buffer */
13549 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
13550 - (u8 *)op->kp.addr + op->optinsn.size);
13551 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
13552
13553 flush_icache_range((unsigned long) buf,
13554 (unsigned long) buf + TMPL_END_IDX +
13555 @@ -1428,7 +1435,7 @@ static void __kprobes setup_optimize_kpr
13556 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
13557
13558 /* Backup instructions which will be replaced by jump address */
13559 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
13560 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
13561 RELATIVE_ADDR_SIZE);
13562
13563 insn_buf[0] = RELATIVEJUMP_OPCODE;
13564 diff -urNp linux-3.0.3/arch/x86/kernel/ldt.c linux-3.0.3/arch/x86/kernel/ldt.c
13565 --- linux-3.0.3/arch/x86/kernel/ldt.c 2011-07-21 22:17:23.000000000 -0400
13566 +++ linux-3.0.3/arch/x86/kernel/ldt.c 2011-08-23 21:47:55.000000000 -0400
13567 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
13568 if (reload) {
13569 #ifdef CONFIG_SMP
13570 preempt_disable();
13571 - load_LDT(pc);
13572 + load_LDT_nolock(pc);
13573 if (!cpumask_equal(mm_cpumask(current->mm),
13574 cpumask_of(smp_processor_id())))
13575 smp_call_function(flush_ldt, current->mm, 1);
13576 preempt_enable();
13577 #else
13578 - load_LDT(pc);
13579 + load_LDT_nolock(pc);
13580 #endif
13581 }
13582 if (oldsize) {
13583 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
13584 return err;
13585
13586 for (i = 0; i < old->size; i++)
13587 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
13588 + write_ldt_entry(new->ldt, i, old->ldt + i);
13589 return 0;
13590 }
13591
13592 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct
13593 retval = copy_ldt(&mm->context, &old_mm->context);
13594 mutex_unlock(&old_mm->context.lock);
13595 }
13596 +
13597 + if (tsk == current) {
13598 + mm->context.vdso = 0;
13599 +
13600 +#ifdef CONFIG_X86_32
13601 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13602 + mm->context.user_cs_base = 0UL;
13603 + mm->context.user_cs_limit = ~0UL;
13604 +
13605 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13606 + cpus_clear(mm->context.cpu_user_cs_mask);
13607 +#endif
13608 +
13609 +#endif
13610 +#endif
13611 +
13612 + }
13613 +
13614 return retval;
13615 }
13616
13617 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
13618 }
13619 }
13620
13621 +#ifdef CONFIG_PAX_SEGMEXEC
13622 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
13623 + error = -EINVAL;
13624 + goto out_unlock;
13625 + }
13626 +#endif
13627 +
13628 fill_ldt(&ldt, &ldt_info);
13629 if (oldmode)
13630 ldt.avl = 0;
13631 diff -urNp linux-3.0.3/arch/x86/kernel/machine_kexec_32.c linux-3.0.3/arch/x86/kernel/machine_kexec_32.c
13632 --- linux-3.0.3/arch/x86/kernel/machine_kexec_32.c 2011-07-21 22:17:23.000000000 -0400
13633 +++ linux-3.0.3/arch/x86/kernel/machine_kexec_32.c 2011-08-23 21:47:55.000000000 -0400
13634 @@ -27,7 +27,7 @@
13635 #include <asm/cacheflush.h>
13636 #include <asm/debugreg.h>
13637
13638 -static void set_idt(void *newidt, __u16 limit)
13639 +static void set_idt(struct desc_struct *newidt, __u16 limit)
13640 {
13641 struct desc_ptr curidt;
13642
13643 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
13644 }
13645
13646
13647 -static void set_gdt(void *newgdt, __u16 limit)
13648 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
13649 {
13650 struct desc_ptr curgdt;
13651
13652 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
13653 }
13654
13655 control_page = page_address(image->control_code_page);
13656 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
13657 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
13658
13659 relocate_kernel_ptr = control_page;
13660 page_list[PA_CONTROL_PAGE] = __pa(control_page);
13661 diff -urNp linux-3.0.3/arch/x86/kernel/microcode_intel.c linux-3.0.3/arch/x86/kernel/microcode_intel.c
13662 --- linux-3.0.3/arch/x86/kernel/microcode_intel.c 2011-07-21 22:17:23.000000000 -0400
13663 +++ linux-3.0.3/arch/x86/kernel/microcode_intel.c 2011-08-23 21:47:55.000000000 -0400
13664 @@ -440,13 +440,13 @@ static enum ucode_state request_microcod
13665
13666 static int get_ucode_user(void *to, const void *from, size_t n)
13667 {
13668 - return copy_from_user(to, from, n);
13669 + return copy_from_user(to, (__force const void __user *)from, n);
13670 }
13671
13672 static enum ucode_state
13673 request_microcode_user(int cpu, const void __user *buf, size_t size)
13674 {
13675 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
13676 + return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
13677 }
13678
13679 static void microcode_fini_cpu(int cpu)
13680 diff -urNp linux-3.0.3/arch/x86/kernel/module.c linux-3.0.3/arch/x86/kernel/module.c
13681 --- linux-3.0.3/arch/x86/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
13682 +++ linux-3.0.3/arch/x86/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
13683 @@ -36,21 +36,66 @@
13684 #define DEBUGP(fmt...)
13685 #endif
13686
13687 -void *module_alloc(unsigned long size)
13688 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
13689 {
13690 if (PAGE_ALIGN(size) > MODULES_LEN)
13691 return NULL;
13692 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
13693 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
13694 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
13695 -1, __builtin_return_address(0));
13696 }
13697
13698 +void *module_alloc(unsigned long size)
13699 +{
13700 +
13701 +#ifdef CONFIG_PAX_KERNEXEC
13702 + return __module_alloc(size, PAGE_KERNEL);
13703 +#else
13704 + return __module_alloc(size, PAGE_KERNEL_EXEC);
13705 +#endif
13706 +
13707 +}
13708 +
13709 /* Free memory returned from module_alloc */
13710 void module_free(struct module *mod, void *module_region)
13711 {
13712 vfree(module_region);
13713 }
13714
13715 +#ifdef CONFIG_PAX_KERNEXEC
13716 +#ifdef CONFIG_X86_32
13717 +void *module_alloc_exec(unsigned long size)
13718 +{
13719 + struct vm_struct *area;
13720 +
13721 + if (size == 0)
13722 + return NULL;
13723 +
13724 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
13725 + return area ? area->addr : NULL;
13726 +}
13727 +EXPORT_SYMBOL(module_alloc_exec);
13728 +
13729 +void module_free_exec(struct module *mod, void *module_region)
13730 +{
13731 + vunmap(module_region);
13732 +}
13733 +EXPORT_SYMBOL(module_free_exec);
13734 +#else
13735 +void module_free_exec(struct module *mod, void *module_region)
13736 +{
13737 + module_free(mod, module_region);
13738 +}
13739 +EXPORT_SYMBOL(module_free_exec);
13740 +
13741 +void *module_alloc_exec(unsigned long size)
13742 +{
13743 + return __module_alloc(size, PAGE_KERNEL_RX);
13744 +}
13745 +EXPORT_SYMBOL(module_alloc_exec);
13746 +#endif
13747 +#endif
13748 +
13749 /* We don't need anything special. */
13750 int module_frob_arch_sections(Elf_Ehdr *hdr,
13751 Elf_Shdr *sechdrs,
13752 @@ -70,14 +115,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13753 unsigned int i;
13754 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
13755 Elf32_Sym *sym;
13756 - uint32_t *location;
13757 + uint32_t *plocation, location;
13758
13759 DEBUGP("Applying relocate section %u to %u\n", relsec,
13760 sechdrs[relsec].sh_info);
13761 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
13762 /* This is where to make the change */
13763 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
13764 - + rel[i].r_offset;
13765 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
13766 + location = (uint32_t)plocation;
13767 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
13768 + plocation = ktla_ktva((void *)plocation);
13769 /* This is the symbol it is referring to. Note that all
13770 undefined symbols have been resolved. */
13771 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
13772 @@ -86,11 +133,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13773 switch (ELF32_R_TYPE(rel[i].r_info)) {
13774 case R_386_32:
13775 /* We add the value into the location given */
13776 - *location += sym->st_value;
13777 + pax_open_kernel();
13778 + *plocation += sym->st_value;
13779 + pax_close_kernel();
13780 break;
13781 case R_386_PC32:
13782 /* Add the value, subtract its postition */
13783 - *location += sym->st_value - (uint32_t)location;
13784 + pax_open_kernel();
13785 + *plocation += sym->st_value - location;
13786 + pax_close_kernel();
13787 break;
13788 default:
13789 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
13790 @@ -146,21 +197,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
13791 case R_X86_64_NONE:
13792 break;
13793 case R_X86_64_64:
13794 + pax_open_kernel();
13795 *(u64 *)loc = val;
13796 + pax_close_kernel();
13797 break;
13798 case R_X86_64_32:
13799 + pax_open_kernel();
13800 *(u32 *)loc = val;
13801 + pax_close_kernel();
13802 if (val != *(u32 *)loc)
13803 goto overflow;
13804 break;
13805 case R_X86_64_32S:
13806 + pax_open_kernel();
13807 *(s32 *)loc = val;
13808 + pax_close_kernel();
13809 if ((s64)val != *(s32 *)loc)
13810 goto overflow;
13811 break;
13812 case R_X86_64_PC32:
13813 val -= (u64)loc;
13814 + pax_open_kernel();
13815 *(u32 *)loc = val;
13816 + pax_close_kernel();
13817 +
13818 #if 0
13819 if ((s64)val != *(s32 *)loc)
13820 goto overflow;
13821 diff -urNp linux-3.0.3/arch/x86/kernel/paravirt.c linux-3.0.3/arch/x86/kernel/paravirt.c
13822 --- linux-3.0.3/arch/x86/kernel/paravirt.c 2011-07-21 22:17:23.000000000 -0400
13823 +++ linux-3.0.3/arch/x86/kernel/paravirt.c 2011-08-23 21:48:14.000000000 -0400
13824 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
13825 {
13826 return x;
13827 }
13828 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13829 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
13830 +#endif
13831
13832 void __init default_banner(void)
13833 {
13834 @@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
13835 * corresponding structure. */
13836 static void *get_call_destination(u8 type)
13837 {
13838 - struct paravirt_patch_template tmpl = {
13839 + const struct paravirt_patch_template tmpl = {
13840 .pv_init_ops = pv_init_ops,
13841 .pv_time_ops = pv_time_ops,
13842 .pv_cpu_ops = pv_cpu_ops,
13843 @@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
13844 .pv_lock_ops = pv_lock_ops,
13845 #endif
13846 };
13847 +
13848 + pax_track_stack();
13849 +
13850 return *((void **)&tmpl + type);
13851 }
13852
13853 @@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
13854 if (opfunc == NULL)
13855 /* If there's no function, patch it with a ud2a (BUG) */
13856 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
13857 - else if (opfunc == _paravirt_nop)
13858 + else if (opfunc == (void *)_paravirt_nop)
13859 /* If the operation is a nop, then nop the callsite */
13860 ret = paravirt_patch_nop();
13861
13862 /* identity functions just return their single argument */
13863 - else if (opfunc == _paravirt_ident_32)
13864 + else if (opfunc == (void *)_paravirt_ident_32)
13865 ret = paravirt_patch_ident_32(insnbuf, len);
13866 - else if (opfunc == _paravirt_ident_64)
13867 + else if (opfunc == (void *)_paravirt_ident_64)
13868 ret = paravirt_patch_ident_64(insnbuf, len);
13869 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13870 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
13871 + ret = paravirt_patch_ident_64(insnbuf, len);
13872 +#endif
13873
13874 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
13875 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
13876 @@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
13877 if (insn_len > len || start == NULL)
13878 insn_len = len;
13879 else
13880 - memcpy(insnbuf, start, insn_len);
13881 + memcpy(insnbuf, ktla_ktva(start), insn_len);
13882
13883 return insn_len;
13884 }
13885 @@ -294,22 +304,22 @@ void arch_flush_lazy_mmu_mode(void)
13886 preempt_enable();
13887 }
13888
13889 -struct pv_info pv_info = {
13890 +struct pv_info pv_info __read_only = {
13891 .name = "bare hardware",
13892 .paravirt_enabled = 0,
13893 .kernel_rpl = 0,
13894 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
13895 };
13896
13897 -struct pv_init_ops pv_init_ops = {
13898 +struct pv_init_ops pv_init_ops __read_only = {
13899 .patch = native_patch,
13900 };
13901
13902 -struct pv_time_ops pv_time_ops = {
13903 +struct pv_time_ops pv_time_ops __read_only = {
13904 .sched_clock = native_sched_clock,
13905 };
13906
13907 -struct pv_irq_ops pv_irq_ops = {
13908 +struct pv_irq_ops pv_irq_ops __read_only = {
13909 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
13910 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
13911 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
13912 @@ -321,7 +331,7 @@ struct pv_irq_ops pv_irq_ops = {
13913 #endif
13914 };
13915
13916 -struct pv_cpu_ops pv_cpu_ops = {
13917 +struct pv_cpu_ops pv_cpu_ops __read_only = {
13918 .cpuid = native_cpuid,
13919 .get_debugreg = native_get_debugreg,
13920 .set_debugreg = native_set_debugreg,
13921 @@ -382,21 +392,26 @@ struct pv_cpu_ops pv_cpu_ops = {
13922 .end_context_switch = paravirt_nop,
13923 };
13924
13925 -struct pv_apic_ops pv_apic_ops = {
13926 +struct pv_apic_ops pv_apic_ops __read_only = {
13927 #ifdef CONFIG_X86_LOCAL_APIC
13928 .startup_ipi_hook = paravirt_nop,
13929 #endif
13930 };
13931
13932 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
13933 +#ifdef CONFIG_X86_32
13934 +#ifdef CONFIG_X86_PAE
13935 +/* 64-bit pagetable entries */
13936 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
13937 +#else
13938 /* 32-bit pagetable entries */
13939 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
13940 +#endif
13941 #else
13942 /* 64-bit pagetable entries */
13943 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
13944 #endif
13945
13946 -struct pv_mmu_ops pv_mmu_ops = {
13947 +struct pv_mmu_ops pv_mmu_ops __read_only = {
13948
13949 .read_cr2 = native_read_cr2,
13950 .write_cr2 = native_write_cr2,
13951 @@ -446,6 +461,7 @@ struct pv_mmu_ops pv_mmu_ops = {
13952 .make_pud = PTE_IDENT,
13953
13954 .set_pgd = native_set_pgd,
13955 + .set_pgd_batched = native_set_pgd_batched,
13956 #endif
13957 #endif /* PAGETABLE_LEVELS >= 3 */
13958
13959 @@ -465,6 +481,12 @@ struct pv_mmu_ops pv_mmu_ops = {
13960 },
13961
13962 .set_fixmap = native_set_fixmap,
13963 +
13964 +#ifdef CONFIG_PAX_KERNEXEC
13965 + .pax_open_kernel = native_pax_open_kernel,
13966 + .pax_close_kernel = native_pax_close_kernel,
13967 +#endif
13968 +
13969 };
13970
13971 EXPORT_SYMBOL_GPL(pv_time_ops);
13972 diff -urNp linux-3.0.3/arch/x86/kernel/paravirt-spinlocks.c linux-3.0.3/arch/x86/kernel/paravirt-spinlocks.c
13973 --- linux-3.0.3/arch/x86/kernel/paravirt-spinlocks.c 2011-07-21 22:17:23.000000000 -0400
13974 +++ linux-3.0.3/arch/x86/kernel/paravirt-spinlocks.c 2011-08-23 21:47:55.000000000 -0400
13975 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
13976 arch_spin_lock(lock);
13977 }
13978
13979 -struct pv_lock_ops pv_lock_ops = {
13980 +struct pv_lock_ops pv_lock_ops __read_only = {
13981 #ifdef CONFIG_SMP
13982 .spin_is_locked = __ticket_spin_is_locked,
13983 .spin_is_contended = __ticket_spin_is_contended,
13984 diff -urNp linux-3.0.3/arch/x86/kernel/pci-iommu_table.c linux-3.0.3/arch/x86/kernel/pci-iommu_table.c
13985 --- linux-3.0.3/arch/x86/kernel/pci-iommu_table.c 2011-07-21 22:17:23.000000000 -0400
13986 +++ linux-3.0.3/arch/x86/kernel/pci-iommu_table.c 2011-08-23 21:48:14.000000000 -0400
13987 @@ -2,7 +2,7 @@
13988 #include <asm/iommu_table.h>
13989 #include <linux/string.h>
13990 #include <linux/kallsyms.h>
13991 -
13992 +#include <linux/sched.h>
13993
13994 #define DEBUG 1
13995
13996 @@ -51,6 +51,8 @@ void __init check_iommu_entries(struct i
13997 {
13998 struct iommu_table_entry *p, *q, *x;
13999
14000 + pax_track_stack();
14001 +
14002 /* Simple cyclic dependency checker. */
14003 for (p = start; p < finish; p++) {
14004 q = find_dependents_of(start, finish, p);
14005 diff -urNp linux-3.0.3/arch/x86/kernel/process_32.c linux-3.0.3/arch/x86/kernel/process_32.c
14006 --- linux-3.0.3/arch/x86/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
14007 +++ linux-3.0.3/arch/x86/kernel/process_32.c 2011-08-23 21:47:55.000000000 -0400
14008 @@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __as
14009 unsigned long thread_saved_pc(struct task_struct *tsk)
14010 {
14011 return ((unsigned long *)tsk->thread.sp)[3];
14012 +//XXX return tsk->thread.eip;
14013 }
14014
14015 #ifndef CONFIG_SMP
14016 @@ -126,15 +127,14 @@ void __show_regs(struct pt_regs *regs, i
14017 unsigned long sp;
14018 unsigned short ss, gs;
14019
14020 - if (user_mode_vm(regs)) {
14021 + if (user_mode(regs)) {
14022 sp = regs->sp;
14023 ss = regs->ss & 0xffff;
14024 - gs = get_user_gs(regs);
14025 } else {
14026 sp = kernel_stack_pointer(regs);
14027 savesegment(ss, ss);
14028 - savesegment(gs, gs);
14029 }
14030 + gs = get_user_gs(regs);
14031
14032 show_regs_common();
14033
14034 @@ -196,13 +196,14 @@ int copy_thread(unsigned long clone_flag
14035 struct task_struct *tsk;
14036 int err;
14037
14038 - childregs = task_pt_regs(p);
14039 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
14040 *childregs = *regs;
14041 childregs->ax = 0;
14042 childregs->sp = sp;
14043
14044 p->thread.sp = (unsigned long) childregs;
14045 p->thread.sp0 = (unsigned long) (childregs+1);
14046 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14047
14048 p->thread.ip = (unsigned long) ret_from_fork;
14049
14050 @@ -292,7 +293,7 @@ __switch_to(struct task_struct *prev_p,
14051 struct thread_struct *prev = &prev_p->thread,
14052 *next = &next_p->thread;
14053 int cpu = smp_processor_id();
14054 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
14055 + struct tss_struct *tss = init_tss + cpu;
14056 bool preload_fpu;
14057
14058 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
14059 @@ -327,6 +328,10 @@ __switch_to(struct task_struct *prev_p,
14060 */
14061 lazy_save_gs(prev->gs);
14062
14063 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14064 + __set_fs(task_thread_info(next_p)->addr_limit);
14065 +#endif
14066 +
14067 /*
14068 * Load the per-thread Thread-Local Storage descriptor.
14069 */
14070 @@ -362,6 +367,9 @@ __switch_to(struct task_struct *prev_p,
14071 */
14072 arch_end_context_switch(next_p);
14073
14074 + percpu_write(current_task, next_p);
14075 + percpu_write(current_tinfo, &next_p->tinfo);
14076 +
14077 if (preload_fpu)
14078 __math_state_restore();
14079
14080 @@ -371,8 +379,6 @@ __switch_to(struct task_struct *prev_p,
14081 if (prev->gs | next->gs)
14082 lazy_load_gs(next->gs);
14083
14084 - percpu_write(current_task, next_p);
14085 -
14086 return prev_p;
14087 }
14088
14089 @@ -402,4 +408,3 @@ unsigned long get_wchan(struct task_stru
14090 } while (count++ < 16);
14091 return 0;
14092 }
14093 -
14094 diff -urNp linux-3.0.3/arch/x86/kernel/process_64.c linux-3.0.3/arch/x86/kernel/process_64.c
14095 --- linux-3.0.3/arch/x86/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
14096 +++ linux-3.0.3/arch/x86/kernel/process_64.c 2011-08-23 21:47:55.000000000 -0400
14097 @@ -87,7 +87,7 @@ static void __exit_idle(void)
14098 void exit_idle(void)
14099 {
14100 /* idle loop has pid 0 */
14101 - if (current->pid)
14102 + if (task_pid_nr(current))
14103 return;
14104 __exit_idle();
14105 }
14106 @@ -260,8 +260,7 @@ int copy_thread(unsigned long clone_flag
14107 struct pt_regs *childregs;
14108 struct task_struct *me = current;
14109
14110 - childregs = ((struct pt_regs *)
14111 - (THREAD_SIZE + task_stack_page(p))) - 1;
14112 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
14113 *childregs = *regs;
14114
14115 childregs->ax = 0;
14116 @@ -273,6 +272,7 @@ int copy_thread(unsigned long clone_flag
14117 p->thread.sp = (unsigned long) childregs;
14118 p->thread.sp0 = (unsigned long) (childregs+1);
14119 p->thread.usersp = me->thread.usersp;
14120 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14121
14122 set_tsk_thread_flag(p, TIF_FORK);
14123
14124 @@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p,
14125 struct thread_struct *prev = &prev_p->thread;
14126 struct thread_struct *next = &next_p->thread;
14127 int cpu = smp_processor_id();
14128 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
14129 + struct tss_struct *tss = init_tss + cpu;
14130 unsigned fsindex, gsindex;
14131 bool preload_fpu;
14132
14133 @@ -471,10 +471,9 @@ __switch_to(struct task_struct *prev_p,
14134 prev->usersp = percpu_read(old_rsp);
14135 percpu_write(old_rsp, next->usersp);
14136 percpu_write(current_task, next_p);
14137 + percpu_write(current_tinfo, &next_p->tinfo);
14138
14139 - percpu_write(kernel_stack,
14140 - (unsigned long)task_stack_page(next_p) +
14141 - THREAD_SIZE - KERNEL_STACK_OFFSET);
14142 + percpu_write(kernel_stack, next->sp0);
14143
14144 /*
14145 * Now maybe reload the debug registers and handle I/O bitmaps
14146 @@ -536,12 +535,11 @@ unsigned long get_wchan(struct task_stru
14147 if (!p || p == current || p->state == TASK_RUNNING)
14148 return 0;
14149 stack = (unsigned long)task_stack_page(p);
14150 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
14151 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
14152 return 0;
14153 fp = *(u64 *)(p->thread.sp);
14154 do {
14155 - if (fp < (unsigned long)stack ||
14156 - fp >= (unsigned long)stack+THREAD_SIZE)
14157 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
14158 return 0;
14159 ip = *(u64 *)(fp+8);
14160 if (!in_sched_functions(ip))
14161 diff -urNp linux-3.0.3/arch/x86/kernel/process.c linux-3.0.3/arch/x86/kernel/process.c
14162 --- linux-3.0.3/arch/x86/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
14163 +++ linux-3.0.3/arch/x86/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
14164 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
14165
14166 void free_thread_info(struct thread_info *ti)
14167 {
14168 - free_thread_xstate(ti->task);
14169 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
14170 }
14171
14172 +static struct kmem_cache *task_struct_cachep;
14173 +
14174 void arch_task_cache_init(void)
14175 {
14176 - task_xstate_cachep =
14177 - kmem_cache_create("task_xstate", xstate_size,
14178 + /* create a slab on which task_structs can be allocated */
14179 + task_struct_cachep =
14180 + kmem_cache_create("task_struct", sizeof(struct task_struct),
14181 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
14182 +
14183 + task_xstate_cachep =
14184 + kmem_cache_create("task_xstate", xstate_size,
14185 __alignof__(union thread_xstate),
14186 - SLAB_PANIC | SLAB_NOTRACK, NULL);
14187 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
14188 +}
14189 +
14190 +struct task_struct *alloc_task_struct_node(int node)
14191 +{
14192 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
14193 +}
14194 +
14195 +void free_task_struct(struct task_struct *task)
14196 +{
14197 + free_thread_xstate(task);
14198 + kmem_cache_free(task_struct_cachep, task);
14199 }
14200
14201 /*
14202 @@ -70,7 +87,7 @@ void exit_thread(void)
14203 unsigned long *bp = t->io_bitmap_ptr;
14204
14205 if (bp) {
14206 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
14207 + struct tss_struct *tss = init_tss + get_cpu();
14208
14209 t->io_bitmap_ptr = NULL;
14210 clear_thread_flag(TIF_IO_BITMAP);
14211 @@ -106,7 +123,7 @@ void show_regs_common(void)
14212
14213 printk(KERN_CONT "\n");
14214 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
14215 - current->pid, current->comm, print_tainted(),
14216 + task_pid_nr(current), current->comm, print_tainted(),
14217 init_utsname()->release,
14218 (int)strcspn(init_utsname()->version, " "),
14219 init_utsname()->version);
14220 @@ -120,6 +137,9 @@ void flush_thread(void)
14221 {
14222 struct task_struct *tsk = current;
14223
14224 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
14225 + loadsegment(gs, 0);
14226 +#endif
14227 flush_ptrace_hw_breakpoint(tsk);
14228 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
14229 /*
14230 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
14231 regs.di = (unsigned long) arg;
14232
14233 #ifdef CONFIG_X86_32
14234 - regs.ds = __USER_DS;
14235 - regs.es = __USER_DS;
14236 + regs.ds = __KERNEL_DS;
14237 + regs.es = __KERNEL_DS;
14238 regs.fs = __KERNEL_PERCPU;
14239 - regs.gs = __KERNEL_STACK_CANARY;
14240 + savesegment(gs, regs.gs);
14241 #else
14242 regs.ss = __KERNEL_DS;
14243 #endif
14244 @@ -403,7 +423,7 @@ void default_idle(void)
14245 EXPORT_SYMBOL(default_idle);
14246 #endif
14247
14248 -void stop_this_cpu(void *dummy)
14249 +__noreturn void stop_this_cpu(void *dummy)
14250 {
14251 local_irq_disable();
14252 /*
14253 @@ -668,16 +688,34 @@ static int __init idle_setup(char *str)
14254 }
14255 early_param("idle", idle_setup);
14256
14257 -unsigned long arch_align_stack(unsigned long sp)
14258 +#ifdef CONFIG_PAX_RANDKSTACK
14259 +asmlinkage void pax_randomize_kstack(void)
14260 {
14261 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
14262 - sp -= get_random_int() % 8192;
14263 - return sp & ~0xf;
14264 -}
14265 + struct thread_struct *thread = &current->thread;
14266 + unsigned long time;
14267
14268 -unsigned long arch_randomize_brk(struct mm_struct *mm)
14269 -{
14270 - unsigned long range_end = mm->brk + 0x02000000;
14271 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
14272 -}
14273 + if (!randomize_va_space)
14274 + return;
14275 +
14276 + rdtscl(time);
14277 +
14278 + /* P4 seems to return a 0 LSB, ignore it */
14279 +#ifdef CONFIG_MPENTIUM4
14280 + time &= 0x3EUL;
14281 + time <<= 2;
14282 +#elif defined(CONFIG_X86_64)
14283 + time &= 0xFUL;
14284 + time <<= 4;
14285 +#else
14286 + time &= 0x1FUL;
14287 + time <<= 3;
14288 +#endif
14289 +
14290 + thread->sp0 ^= time;
14291 + load_sp0(init_tss + smp_processor_id(), thread);
14292
14293 +#ifdef CONFIG_X86_64
14294 + percpu_write(kernel_stack, thread->sp0);
14295 +#endif
14296 +}
14297 +#endif
14298 diff -urNp linux-3.0.3/arch/x86/kernel/ptrace.c linux-3.0.3/arch/x86/kernel/ptrace.c
14299 --- linux-3.0.3/arch/x86/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
14300 +++ linux-3.0.3/arch/x86/kernel/ptrace.c 2011-08-23 21:47:55.000000000 -0400
14301 @@ -821,7 +821,7 @@ long arch_ptrace(struct task_struct *chi
14302 unsigned long addr, unsigned long data)
14303 {
14304 int ret;
14305 - unsigned long __user *datap = (unsigned long __user *)data;
14306 + unsigned long __user *datap = (__force unsigned long __user *)data;
14307
14308 switch (request) {
14309 /* read the word at location addr in the USER area. */
14310 @@ -906,14 +906,14 @@ long arch_ptrace(struct task_struct *chi
14311 if ((int) addr < 0)
14312 return -EIO;
14313 ret = do_get_thread_area(child, addr,
14314 - (struct user_desc __user *)data);
14315 + (__force struct user_desc __user *) data);
14316 break;
14317
14318 case PTRACE_SET_THREAD_AREA:
14319 if ((int) addr < 0)
14320 return -EIO;
14321 ret = do_set_thread_area(child, addr,
14322 - (struct user_desc __user *)data, 0);
14323 + (__force struct user_desc __user *) data, 0);
14324 break;
14325 #endif
14326
14327 @@ -1330,7 +1330,7 @@ static void fill_sigtrap_info(struct tas
14328 memset(info, 0, sizeof(*info));
14329 info->si_signo = SIGTRAP;
14330 info->si_code = si_code;
14331 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
14332 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
14333 }
14334
14335 void user_single_step_siginfo(struct task_struct *tsk,
14336 diff -urNp linux-3.0.3/arch/x86/kernel/pvclock.c linux-3.0.3/arch/x86/kernel/pvclock.c
14337 --- linux-3.0.3/arch/x86/kernel/pvclock.c 2011-07-21 22:17:23.000000000 -0400
14338 +++ linux-3.0.3/arch/x86/kernel/pvclock.c 2011-08-23 21:47:55.000000000 -0400
14339 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
14340 return pv_tsc_khz;
14341 }
14342
14343 -static atomic64_t last_value = ATOMIC64_INIT(0);
14344 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
14345
14346 void pvclock_resume(void)
14347 {
14348 - atomic64_set(&last_value, 0);
14349 + atomic64_set_unchecked(&last_value, 0);
14350 }
14351
14352 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
14353 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
14354 * updating at the same time, and one of them could be slightly behind,
14355 * making the assumption that last_value always go forward fail to hold.
14356 */
14357 - last = atomic64_read(&last_value);
14358 + last = atomic64_read_unchecked(&last_value);
14359 do {
14360 if (ret < last)
14361 return last;
14362 - last = atomic64_cmpxchg(&last_value, last, ret);
14363 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
14364 } while (unlikely(last != ret));
14365
14366 return ret;
14367 diff -urNp linux-3.0.3/arch/x86/kernel/reboot.c linux-3.0.3/arch/x86/kernel/reboot.c
14368 --- linux-3.0.3/arch/x86/kernel/reboot.c 2011-07-21 22:17:23.000000000 -0400
14369 +++ linux-3.0.3/arch/x86/kernel/reboot.c 2011-08-23 21:47:55.000000000 -0400
14370 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
14371 EXPORT_SYMBOL(pm_power_off);
14372
14373 static const struct desc_ptr no_idt = {};
14374 -static int reboot_mode;
14375 +static unsigned short reboot_mode;
14376 enum reboot_type reboot_type = BOOT_ACPI;
14377 int reboot_force;
14378
14379 @@ -315,13 +315,17 @@ core_initcall(reboot_init);
14380 extern const unsigned char machine_real_restart_asm[];
14381 extern const u64 machine_real_restart_gdt[3];
14382
14383 -void machine_real_restart(unsigned int type)
14384 +__noreturn void machine_real_restart(unsigned int type)
14385 {
14386 void *restart_va;
14387 unsigned long restart_pa;
14388 - void (*restart_lowmem)(unsigned int);
14389 + void (* __noreturn restart_lowmem)(unsigned int);
14390 u64 *lowmem_gdt;
14391
14392 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14393 + struct desc_struct *gdt;
14394 +#endif
14395 +
14396 local_irq_disable();
14397
14398 /* Write zero to CMOS register number 0x0f, which the BIOS POST
14399 @@ -347,14 +351,14 @@ void machine_real_restart(unsigned int t
14400 boot)". This seems like a fairly standard thing that gets set by
14401 REBOOT.COM programs, and the previous reset routine did this
14402 too. */
14403 - *((unsigned short *)0x472) = reboot_mode;
14404 + *(unsigned short *)(__va(0x472)) = reboot_mode;
14405
14406 /* Patch the GDT in the low memory trampoline */
14407 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
14408
14409 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
14410 restart_pa = virt_to_phys(restart_va);
14411 - restart_lowmem = (void (*)(unsigned int))restart_pa;
14412 + restart_lowmem = (void *)restart_pa;
14413
14414 /* GDT[0]: GDT self-pointer */
14415 lowmem_gdt[0] =
14416 @@ -365,7 +369,33 @@ void machine_real_restart(unsigned int t
14417 GDT_ENTRY(0x009b, restart_pa, 0xffff);
14418
14419 /* Jump to the identity-mapped low memory code */
14420 +
14421 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14422 + gdt = get_cpu_gdt_table(smp_processor_id());
14423 + pax_open_kernel();
14424 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14425 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
14426 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
14427 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
14428 +#endif
14429 +#ifdef CONFIG_PAX_KERNEXEC
14430 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
14431 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
14432 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
14433 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
14434 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
14435 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
14436 +#endif
14437 + pax_close_kernel();
14438 +#endif
14439 +
14440 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14441 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
14442 + unreachable();
14443 +#else
14444 restart_lowmem(type);
14445 +#endif
14446 +
14447 }
14448 #ifdef CONFIG_APM_MODULE
14449 EXPORT_SYMBOL(machine_real_restart);
14450 @@ -523,7 +553,7 @@ void __attribute__((weak)) mach_reboot_f
14451 * try to force a triple fault and then cycle between hitting the keyboard
14452 * controller and doing that
14453 */
14454 -static void native_machine_emergency_restart(void)
14455 +__noreturn static void native_machine_emergency_restart(void)
14456 {
14457 int i;
14458 int attempt = 0;
14459 @@ -647,13 +677,13 @@ void native_machine_shutdown(void)
14460 #endif
14461 }
14462
14463 -static void __machine_emergency_restart(int emergency)
14464 +static __noreturn void __machine_emergency_restart(int emergency)
14465 {
14466 reboot_emergency = emergency;
14467 machine_ops.emergency_restart();
14468 }
14469
14470 -static void native_machine_restart(char *__unused)
14471 +static __noreturn void native_machine_restart(char *__unused)
14472 {
14473 printk("machine restart\n");
14474
14475 @@ -662,7 +692,7 @@ static void native_machine_restart(char
14476 __machine_emergency_restart(0);
14477 }
14478
14479 -static void native_machine_halt(void)
14480 +static __noreturn void native_machine_halt(void)
14481 {
14482 /* stop other cpus and apics */
14483 machine_shutdown();
14484 @@ -673,7 +703,7 @@ static void native_machine_halt(void)
14485 stop_this_cpu(NULL);
14486 }
14487
14488 -static void native_machine_power_off(void)
14489 +__noreturn static void native_machine_power_off(void)
14490 {
14491 if (pm_power_off) {
14492 if (!reboot_force)
14493 @@ -682,6 +712,7 @@ static void native_machine_power_off(voi
14494 }
14495 /* a fallback in case there is no PM info available */
14496 tboot_shutdown(TB_SHUTDOWN_HALT);
14497 + unreachable();
14498 }
14499
14500 struct machine_ops machine_ops = {
14501 diff -urNp linux-3.0.3/arch/x86/kernel/setup.c linux-3.0.3/arch/x86/kernel/setup.c
14502 --- linux-3.0.3/arch/x86/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
14503 +++ linux-3.0.3/arch/x86/kernel/setup.c 2011-08-23 21:47:55.000000000 -0400
14504 @@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
14505 * area (640->1Mb) as ram even though it is not.
14506 * take them out.
14507 */
14508 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
14509 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
14510 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
14511 }
14512
14513 @@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
14514
14515 if (!boot_params.hdr.root_flags)
14516 root_mountflags &= ~MS_RDONLY;
14517 - init_mm.start_code = (unsigned long) _text;
14518 - init_mm.end_code = (unsigned long) _etext;
14519 + init_mm.start_code = ktla_ktva((unsigned long) _text);
14520 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
14521 init_mm.end_data = (unsigned long) _edata;
14522 init_mm.brk = _brk_end;
14523
14524 - code_resource.start = virt_to_phys(_text);
14525 - code_resource.end = virt_to_phys(_etext)-1;
14526 - data_resource.start = virt_to_phys(_etext);
14527 + code_resource.start = virt_to_phys(ktla_ktva(_text));
14528 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
14529 + data_resource.start = virt_to_phys(_sdata);
14530 data_resource.end = virt_to_phys(_edata)-1;
14531 bss_resource.start = virt_to_phys(&__bss_start);
14532 bss_resource.end = virt_to_phys(&__bss_stop)-1;
14533 diff -urNp linux-3.0.3/arch/x86/kernel/setup_percpu.c linux-3.0.3/arch/x86/kernel/setup_percpu.c
14534 --- linux-3.0.3/arch/x86/kernel/setup_percpu.c 2011-07-21 22:17:23.000000000 -0400
14535 +++ linux-3.0.3/arch/x86/kernel/setup_percpu.c 2011-08-23 21:47:55.000000000 -0400
14536 @@ -21,19 +21,17 @@
14537 #include <asm/cpu.h>
14538 #include <asm/stackprotector.h>
14539
14540 -DEFINE_PER_CPU(int, cpu_number);
14541 +#ifdef CONFIG_SMP
14542 +DEFINE_PER_CPU(unsigned int, cpu_number);
14543 EXPORT_PER_CPU_SYMBOL(cpu_number);
14544 +#endif
14545
14546 -#ifdef CONFIG_X86_64
14547 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
14548 -#else
14549 -#define BOOT_PERCPU_OFFSET 0
14550 -#endif
14551
14552 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
14553 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
14554
14555 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
14556 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
14557 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
14558 };
14559 EXPORT_SYMBOL(__per_cpu_offset);
14560 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
14561 {
14562 #ifdef CONFIG_X86_32
14563 struct desc_struct gdt;
14564 + unsigned long base = per_cpu_offset(cpu);
14565
14566 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
14567 - 0x2 | DESCTYPE_S, 0x8);
14568 - gdt.s = 1;
14569 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
14570 + 0x83 | DESCTYPE_S, 0xC);
14571 write_gdt_entry(get_cpu_gdt_table(cpu),
14572 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
14573 #endif
14574 @@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
14575 /* alrighty, percpu areas up and running */
14576 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
14577 for_each_possible_cpu(cpu) {
14578 +#ifdef CONFIG_CC_STACKPROTECTOR
14579 +#ifdef CONFIG_X86_32
14580 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
14581 +#endif
14582 +#endif
14583 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
14584 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
14585 per_cpu(cpu_number, cpu) = cpu;
14586 @@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
14587 */
14588 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
14589 #endif
14590 +#ifdef CONFIG_CC_STACKPROTECTOR
14591 +#ifdef CONFIG_X86_32
14592 + if (!cpu)
14593 + per_cpu(stack_canary.canary, cpu) = canary;
14594 +#endif
14595 +#endif
14596 /*
14597 * Up to this point, the boot CPU has been using .init.data
14598 * area. Reload any changed state for the boot CPU.
14599 diff -urNp linux-3.0.3/arch/x86/kernel/signal.c linux-3.0.3/arch/x86/kernel/signal.c
14600 --- linux-3.0.3/arch/x86/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
14601 +++ linux-3.0.3/arch/x86/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
14602 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
14603 * Align the stack pointer according to the i386 ABI,
14604 * i.e. so that on function entry ((sp + 4) & 15) == 0.
14605 */
14606 - sp = ((sp + 4) & -16ul) - 4;
14607 + sp = ((sp - 12) & -16ul) - 4;
14608 #else /* !CONFIG_X86_32 */
14609 sp = round_down(sp, 16) - 8;
14610 #endif
14611 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
14612 * Return an always-bogus address instead so we will die with SIGSEGV.
14613 */
14614 if (onsigstack && !likely(on_sig_stack(sp)))
14615 - return (void __user *)-1L;
14616 + return (__force void __user *)-1L;
14617
14618 /* save i387 state */
14619 if (used_math() && save_i387_xstate(*fpstate) < 0)
14620 - return (void __user *)-1L;
14621 + return (__force void __user *)-1L;
14622
14623 return (void __user *)sp;
14624 }
14625 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
14626 }
14627
14628 if (current->mm->context.vdso)
14629 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14630 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14631 else
14632 - restorer = &frame->retcode;
14633 + restorer = (void __user *)&frame->retcode;
14634 if (ka->sa.sa_flags & SA_RESTORER)
14635 restorer = ka->sa.sa_restorer;
14636
14637 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
14638 * reasons and because gdb uses it as a signature to notice
14639 * signal handler stack frames.
14640 */
14641 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
14642 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
14643
14644 if (err)
14645 return -EFAULT;
14646 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
14647 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
14648
14649 /* Set up to return from userspace. */
14650 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14651 + if (current->mm->context.vdso)
14652 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14653 + else
14654 + restorer = (void __user *)&frame->retcode;
14655 if (ka->sa.sa_flags & SA_RESTORER)
14656 restorer = ka->sa.sa_restorer;
14657 put_user_ex(restorer, &frame->pretcode);
14658 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
14659 * reasons and because gdb uses it as a signature to notice
14660 * signal handler stack frames.
14661 */
14662 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
14663 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
14664 } put_user_catch(err);
14665
14666 if (err)
14667 @@ -769,6 +772,8 @@ static void do_signal(struct pt_regs *re
14668 int signr;
14669 sigset_t *oldset;
14670
14671 + pax_track_stack();
14672 +
14673 /*
14674 * We want the common case to go fast, which is why we may in certain
14675 * cases get here from kernel mode. Just return without doing anything
14676 @@ -776,7 +781,7 @@ static void do_signal(struct pt_regs *re
14677 * X86_32: vm86 regs switched out by assembly code before reaching
14678 * here, so testing against kernel CS suffices.
14679 */
14680 - if (!user_mode(regs))
14681 + if (!user_mode_novm(regs))
14682 return;
14683
14684 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
14685 diff -urNp linux-3.0.3/arch/x86/kernel/smpboot.c linux-3.0.3/arch/x86/kernel/smpboot.c
14686 --- linux-3.0.3/arch/x86/kernel/smpboot.c 2011-07-21 22:17:23.000000000 -0400
14687 +++ linux-3.0.3/arch/x86/kernel/smpboot.c 2011-08-23 21:47:55.000000000 -0400
14688 @@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
14689 set_idle_for_cpu(cpu, c_idle.idle);
14690 do_rest:
14691 per_cpu(current_task, cpu) = c_idle.idle;
14692 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
14693 #ifdef CONFIG_X86_32
14694 /* Stack for startup_32 can be just as for start_secondary onwards */
14695 irq_ctx_init(cpu);
14696 #else
14697 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
14698 initial_gs = per_cpu_offset(cpu);
14699 - per_cpu(kernel_stack, cpu) =
14700 - (unsigned long)task_stack_page(c_idle.idle) -
14701 - KERNEL_STACK_OFFSET + THREAD_SIZE;
14702 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
14703 #endif
14704 +
14705 + pax_open_kernel();
14706 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14707 + pax_close_kernel();
14708 +
14709 initial_code = (unsigned long)start_secondary;
14710 stack_start = c_idle.idle->thread.sp;
14711
14712 @@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
14713
14714 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
14715
14716 +#ifdef CONFIG_PAX_PER_CPU_PGD
14717 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
14718 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14719 + KERNEL_PGD_PTRS);
14720 +#endif
14721 +
14722 err = do_boot_cpu(apicid, cpu);
14723 if (err) {
14724 pr_debug("do_boot_cpu failed %d\n", err);
14725 diff -urNp linux-3.0.3/arch/x86/kernel/step.c linux-3.0.3/arch/x86/kernel/step.c
14726 --- linux-3.0.3/arch/x86/kernel/step.c 2011-07-21 22:17:23.000000000 -0400
14727 +++ linux-3.0.3/arch/x86/kernel/step.c 2011-08-23 21:47:55.000000000 -0400
14728 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
14729 struct desc_struct *desc;
14730 unsigned long base;
14731
14732 - seg &= ~7UL;
14733 + seg >>= 3;
14734
14735 mutex_lock(&child->mm->context.lock);
14736 - if (unlikely((seg >> 3) >= child->mm->context.size))
14737 + if (unlikely(seg >= child->mm->context.size))
14738 addr = -1L; /* bogus selector, access would fault */
14739 else {
14740 desc = child->mm->context.ldt + seg;
14741 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
14742 addr += base;
14743 }
14744 mutex_unlock(&child->mm->context.lock);
14745 - }
14746 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
14747 + addr = ktla_ktva(addr);
14748
14749 return addr;
14750 }
14751 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
14752 unsigned char opcode[15];
14753 unsigned long addr = convert_ip_to_linear(child, regs);
14754
14755 + if (addr == -EINVAL)
14756 + return 0;
14757 +
14758 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
14759 for (i = 0; i < copied; i++) {
14760 switch (opcode[i]) {
14761 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
14762
14763 #ifdef CONFIG_X86_64
14764 case 0x40 ... 0x4f:
14765 - if (regs->cs != __USER_CS)
14766 + if ((regs->cs & 0xffff) != __USER_CS)
14767 /* 32-bit mode: register increment */
14768 return 0;
14769 /* 64-bit mode: REX prefix */
14770 diff -urNp linux-3.0.3/arch/x86/kernel/syscall_table_32.S linux-3.0.3/arch/x86/kernel/syscall_table_32.S
14771 --- linux-3.0.3/arch/x86/kernel/syscall_table_32.S 2011-07-21 22:17:23.000000000 -0400
14772 +++ linux-3.0.3/arch/x86/kernel/syscall_table_32.S 2011-08-23 21:47:55.000000000 -0400
14773 @@ -1,3 +1,4 @@
14774 +.section .rodata,"a",@progbits
14775 ENTRY(sys_call_table)
14776 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
14777 .long sys_exit
14778 diff -urNp linux-3.0.3/arch/x86/kernel/sys_i386_32.c linux-3.0.3/arch/x86/kernel/sys_i386_32.c
14779 --- linux-3.0.3/arch/x86/kernel/sys_i386_32.c 2011-07-21 22:17:23.000000000 -0400
14780 +++ linux-3.0.3/arch/x86/kernel/sys_i386_32.c 2011-08-23 21:47:55.000000000 -0400
14781 @@ -24,17 +24,224 @@
14782
14783 #include <asm/syscalls.h>
14784
14785 -/*
14786 - * Do a system call from kernel instead of calling sys_execve so we
14787 - * end up with proper pt_regs.
14788 - */
14789 -int kernel_execve(const char *filename,
14790 - const char *const argv[],
14791 - const char *const envp[])
14792 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
14793 {
14794 - long __res;
14795 - asm volatile ("int $0x80"
14796 - : "=a" (__res)
14797 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
14798 - return __res;
14799 + unsigned long pax_task_size = TASK_SIZE;
14800 +
14801 +#ifdef CONFIG_PAX_SEGMEXEC
14802 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
14803 + pax_task_size = SEGMEXEC_TASK_SIZE;
14804 +#endif
14805 +
14806 + if (len > pax_task_size || addr > pax_task_size - len)
14807 + return -EINVAL;
14808 +
14809 + return 0;
14810 +}
14811 +
14812 +unsigned long
14813 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
14814 + unsigned long len, unsigned long pgoff, unsigned long flags)
14815 +{
14816 + struct mm_struct *mm = current->mm;
14817 + struct vm_area_struct *vma;
14818 + unsigned long start_addr, pax_task_size = TASK_SIZE;
14819 +
14820 +#ifdef CONFIG_PAX_SEGMEXEC
14821 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
14822 + pax_task_size = SEGMEXEC_TASK_SIZE;
14823 +#endif
14824 +
14825 + pax_task_size -= PAGE_SIZE;
14826 +
14827 + if (len > pax_task_size)
14828 + return -ENOMEM;
14829 +
14830 + if (flags & MAP_FIXED)
14831 + return addr;
14832 +
14833 +#ifdef CONFIG_PAX_RANDMMAP
14834 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14835 +#endif
14836 +
14837 + if (addr) {
14838 + addr = PAGE_ALIGN(addr);
14839 + if (pax_task_size - len >= addr) {
14840 + vma = find_vma(mm, addr);
14841 + if (check_heap_stack_gap(vma, addr, len))
14842 + return addr;
14843 + }
14844 + }
14845 + if (len > mm->cached_hole_size) {
14846 + start_addr = addr = mm->free_area_cache;
14847 + } else {
14848 + start_addr = addr = mm->mmap_base;
14849 + mm->cached_hole_size = 0;
14850 + }
14851 +
14852 +#ifdef CONFIG_PAX_PAGEEXEC
14853 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
14854 + start_addr = 0x00110000UL;
14855 +
14856 +#ifdef CONFIG_PAX_RANDMMAP
14857 + if (mm->pax_flags & MF_PAX_RANDMMAP)
14858 + start_addr += mm->delta_mmap & 0x03FFF000UL;
14859 +#endif
14860 +
14861 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
14862 + start_addr = addr = mm->mmap_base;
14863 + else
14864 + addr = start_addr;
14865 + }
14866 +#endif
14867 +
14868 +full_search:
14869 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
14870 + /* At this point: (!vma || addr < vma->vm_end). */
14871 + if (pax_task_size - len < addr) {
14872 + /*
14873 + * Start a new search - just in case we missed
14874 + * some holes.
14875 + */
14876 + if (start_addr != mm->mmap_base) {
14877 + start_addr = addr = mm->mmap_base;
14878 + mm->cached_hole_size = 0;
14879 + goto full_search;
14880 + }
14881 + return -ENOMEM;
14882 + }
14883 + if (check_heap_stack_gap(vma, addr, len))
14884 + break;
14885 + if (addr + mm->cached_hole_size < vma->vm_start)
14886 + mm->cached_hole_size = vma->vm_start - addr;
14887 + addr = vma->vm_end;
14888 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
14889 + start_addr = addr = mm->mmap_base;
14890 + mm->cached_hole_size = 0;
14891 + goto full_search;
14892 + }
14893 + }
14894 +
14895 + /*
14896 + * Remember the place where we stopped the search:
14897 + */
14898 + mm->free_area_cache = addr + len;
14899 + return addr;
14900 +}
14901 +
14902 +unsigned long
14903 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
14904 + const unsigned long len, const unsigned long pgoff,
14905 + const unsigned long flags)
14906 +{
14907 + struct vm_area_struct *vma;
14908 + struct mm_struct *mm = current->mm;
14909 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
14910 +
14911 +#ifdef CONFIG_PAX_SEGMEXEC
14912 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
14913 + pax_task_size = SEGMEXEC_TASK_SIZE;
14914 +#endif
14915 +
14916 + pax_task_size -= PAGE_SIZE;
14917 +
14918 + /* requested length too big for entire address space */
14919 + if (len > pax_task_size)
14920 + return -ENOMEM;
14921 +
14922 + if (flags & MAP_FIXED)
14923 + return addr;
14924 +
14925 +#ifdef CONFIG_PAX_PAGEEXEC
14926 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
14927 + goto bottomup;
14928 +#endif
14929 +
14930 +#ifdef CONFIG_PAX_RANDMMAP
14931 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14932 +#endif
14933 +
14934 + /* requesting a specific address */
14935 + if (addr) {
14936 + addr = PAGE_ALIGN(addr);
14937 + if (pax_task_size - len >= addr) {
14938 + vma = find_vma(mm, addr);
14939 + if (check_heap_stack_gap(vma, addr, len))
14940 + return addr;
14941 + }
14942 + }
14943 +
14944 + /* check if free_area_cache is useful for us */
14945 + if (len <= mm->cached_hole_size) {
14946 + mm->cached_hole_size = 0;
14947 + mm->free_area_cache = mm->mmap_base;
14948 + }
14949 +
14950 + /* either no address requested or can't fit in requested address hole */
14951 + addr = mm->free_area_cache;
14952 +
14953 + /* make sure it can fit in the remaining address space */
14954 + if (addr > len) {
14955 + vma = find_vma(mm, addr-len);
14956 + if (check_heap_stack_gap(vma, addr - len, len))
14957 + /* remember the address as a hint for next time */
14958 + return (mm->free_area_cache = addr-len);
14959 + }
14960 +
14961 + if (mm->mmap_base < len)
14962 + goto bottomup;
14963 +
14964 + addr = mm->mmap_base-len;
14965 +
14966 + do {
14967 + /*
14968 + * Lookup failure means no vma is above this address,
14969 + * else if new region fits below vma->vm_start,
14970 + * return with success:
14971 + */
14972 + vma = find_vma(mm, addr);
14973 + if (check_heap_stack_gap(vma, addr, len))
14974 + /* remember the address as a hint for next time */
14975 + return (mm->free_area_cache = addr);
14976 +
14977 + /* remember the largest hole we saw so far */
14978 + if (addr + mm->cached_hole_size < vma->vm_start)
14979 + mm->cached_hole_size = vma->vm_start - addr;
14980 +
14981 + /* try just below the current vma->vm_start */
14982 + addr = skip_heap_stack_gap(vma, len);
14983 + } while (!IS_ERR_VALUE(addr));
14984 +
14985 +bottomup:
14986 + /*
14987 + * A failed mmap() very likely causes application failure,
14988 + * so fall back to the bottom-up function here. This scenario
14989 + * can happen with large stack limits and large mmap()
14990 + * allocations.
14991 + */
14992 +
14993 +#ifdef CONFIG_PAX_SEGMEXEC
14994 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
14995 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
14996 + else
14997 +#endif
14998 +
14999 + mm->mmap_base = TASK_UNMAPPED_BASE;
15000 +
15001 +#ifdef CONFIG_PAX_RANDMMAP
15002 + if (mm->pax_flags & MF_PAX_RANDMMAP)
15003 + mm->mmap_base += mm->delta_mmap;
15004 +#endif
15005 +
15006 + mm->free_area_cache = mm->mmap_base;
15007 + mm->cached_hole_size = ~0UL;
15008 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15009 + /*
15010 + * Restore the topdown base:
15011 + */
15012 + mm->mmap_base = base;
15013 + mm->free_area_cache = base;
15014 + mm->cached_hole_size = ~0UL;
15015 +
15016 + return addr;
15017 }
15018 diff -urNp linux-3.0.3/arch/x86/kernel/sys_x86_64.c linux-3.0.3/arch/x86/kernel/sys_x86_64.c
15019 --- linux-3.0.3/arch/x86/kernel/sys_x86_64.c 2011-07-21 22:17:23.000000000 -0400
15020 +++ linux-3.0.3/arch/x86/kernel/sys_x86_64.c 2011-08-23 21:47:55.000000000 -0400
15021 @@ -32,8 +32,8 @@ out:
15022 return error;
15023 }
15024
15025 -static void find_start_end(unsigned long flags, unsigned long *begin,
15026 - unsigned long *end)
15027 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
15028 + unsigned long *begin, unsigned long *end)
15029 {
15030 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
15031 unsigned long new_begin;
15032 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long
15033 *begin = new_begin;
15034 }
15035 } else {
15036 - *begin = TASK_UNMAPPED_BASE;
15037 + *begin = mm->mmap_base;
15038 *end = TASK_SIZE;
15039 }
15040 }
15041 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
15042 if (flags & MAP_FIXED)
15043 return addr;
15044
15045 - find_start_end(flags, &begin, &end);
15046 + find_start_end(mm, flags, &begin, &end);
15047
15048 if (len > end)
15049 return -ENOMEM;
15050
15051 +#ifdef CONFIG_PAX_RANDMMAP
15052 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15053 +#endif
15054 +
15055 if (addr) {
15056 addr = PAGE_ALIGN(addr);
15057 vma = find_vma(mm, addr);
15058 - if (end - len >= addr &&
15059 - (!vma || addr + len <= vma->vm_start))
15060 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
15061 return addr;
15062 }
15063 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
15064 @@ -106,7 +109,7 @@ full_search:
15065 }
15066 return -ENOMEM;
15067 }
15068 - if (!vma || addr + len <= vma->vm_start) {
15069 + if (check_heap_stack_gap(vma, addr, len)) {
15070 /*
15071 * Remember the place where we stopped the search:
15072 */
15073 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
15074 {
15075 struct vm_area_struct *vma;
15076 struct mm_struct *mm = current->mm;
15077 - unsigned long addr = addr0;
15078 + unsigned long base = mm->mmap_base, addr = addr0;
15079
15080 /* requested length too big for entire address space */
15081 if (len > TASK_SIZE)
15082 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
15083 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
15084 goto bottomup;
15085
15086 +#ifdef CONFIG_PAX_RANDMMAP
15087 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15088 +#endif
15089 +
15090 /* requesting a specific address */
15091 if (addr) {
15092 addr = PAGE_ALIGN(addr);
15093 - vma = find_vma(mm, addr);
15094 - if (TASK_SIZE - len >= addr &&
15095 - (!vma || addr + len <= vma->vm_start))
15096 - return addr;
15097 + if (TASK_SIZE - len >= addr) {
15098 + vma = find_vma(mm, addr);
15099 + if (check_heap_stack_gap(vma, addr, len))
15100 + return addr;
15101 + }
15102 }
15103
15104 /* check if free_area_cache is useful for us */
15105 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
15106 /* make sure it can fit in the remaining address space */
15107 if (addr > len) {
15108 vma = find_vma(mm, addr-len);
15109 - if (!vma || addr <= vma->vm_start)
15110 + if (check_heap_stack_gap(vma, addr - len, len))
15111 /* remember the address as a hint for next time */
15112 return mm->free_area_cache = addr-len;
15113 }
15114 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
15115 * return with success:
15116 */
15117 vma = find_vma(mm, addr);
15118 - if (!vma || addr+len <= vma->vm_start)
15119 + if (check_heap_stack_gap(vma, addr, len))
15120 /* remember the address as a hint for next time */
15121 return mm->free_area_cache = addr;
15122
15123 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
15124 mm->cached_hole_size = vma->vm_start - addr;
15125
15126 /* try just below the current vma->vm_start */
15127 - addr = vma->vm_start-len;
15128 - } while (len < vma->vm_start);
15129 + addr = skip_heap_stack_gap(vma, len);
15130 + } while (!IS_ERR_VALUE(addr));
15131
15132 bottomup:
15133 /*
15134 @@ -198,13 +206,21 @@ bottomup:
15135 * can happen with large stack limits and large mmap()
15136 * allocations.
15137 */
15138 + mm->mmap_base = TASK_UNMAPPED_BASE;
15139 +
15140 +#ifdef CONFIG_PAX_RANDMMAP
15141 + if (mm->pax_flags & MF_PAX_RANDMMAP)
15142 + mm->mmap_base += mm->delta_mmap;
15143 +#endif
15144 +
15145 + mm->free_area_cache = mm->mmap_base;
15146 mm->cached_hole_size = ~0UL;
15147 - mm->free_area_cache = TASK_UNMAPPED_BASE;
15148 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15149 /*
15150 * Restore the topdown base:
15151 */
15152 - mm->free_area_cache = mm->mmap_base;
15153 + mm->mmap_base = base;
15154 + mm->free_area_cache = base;
15155 mm->cached_hole_size = ~0UL;
15156
15157 return addr;
15158 diff -urNp linux-3.0.3/arch/x86/kernel/tboot.c linux-3.0.3/arch/x86/kernel/tboot.c
15159 --- linux-3.0.3/arch/x86/kernel/tboot.c 2011-07-21 22:17:23.000000000 -0400
15160 +++ linux-3.0.3/arch/x86/kernel/tboot.c 2011-08-23 21:47:55.000000000 -0400
15161 @@ -217,7 +217,7 @@ static int tboot_setup_sleep(void)
15162
15163 void tboot_shutdown(u32 shutdown_type)
15164 {
15165 - void (*shutdown)(void);
15166 + void (* __noreturn shutdown)(void);
15167
15168 if (!tboot_enabled())
15169 return;
15170 @@ -239,7 +239,7 @@ void tboot_shutdown(u32 shutdown_type)
15171
15172 switch_to_tboot_pt();
15173
15174 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
15175 + shutdown = (void *)tboot->shutdown_entry;
15176 shutdown();
15177
15178 /* should not reach here */
15179 @@ -296,7 +296,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
15180 tboot_shutdown(acpi_shutdown_map[sleep_state]);
15181 }
15182
15183 -static atomic_t ap_wfs_count;
15184 +static atomic_unchecked_t ap_wfs_count;
15185
15186 static int tboot_wait_for_aps(int num_aps)
15187 {
15188 @@ -320,9 +320,9 @@ static int __cpuinit tboot_cpu_callback(
15189 {
15190 switch (action) {
15191 case CPU_DYING:
15192 - atomic_inc(&ap_wfs_count);
15193 + atomic_inc_unchecked(&ap_wfs_count);
15194 if (num_online_cpus() == 1)
15195 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
15196 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
15197 return NOTIFY_BAD;
15198 break;
15199 }
15200 @@ -341,7 +341,7 @@ static __init int tboot_late_init(void)
15201
15202 tboot_create_trampoline();
15203
15204 - atomic_set(&ap_wfs_count, 0);
15205 + atomic_set_unchecked(&ap_wfs_count, 0);
15206 register_hotcpu_notifier(&tboot_cpu_notifier);
15207 return 0;
15208 }
15209 diff -urNp linux-3.0.3/arch/x86/kernel/time.c linux-3.0.3/arch/x86/kernel/time.c
15210 --- linux-3.0.3/arch/x86/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
15211 +++ linux-3.0.3/arch/x86/kernel/time.c 2011-08-23 21:47:55.000000000 -0400
15212 @@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs
15213 {
15214 unsigned long pc = instruction_pointer(regs);
15215
15216 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
15217 + if (!user_mode(regs) && in_lock_functions(pc)) {
15218 #ifdef CONFIG_FRAME_POINTER
15219 - return *(unsigned long *)(regs->bp + sizeof(long));
15220 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
15221 #else
15222 unsigned long *sp =
15223 (unsigned long *)kernel_stack_pointer(regs);
15224 @@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
15225 * or above a saved flags. Eflags has bits 22-31 zero,
15226 * kernel addresses don't.
15227 */
15228 +
15229 +#ifdef CONFIG_PAX_KERNEXEC
15230 + return ktla_ktva(sp[0]);
15231 +#else
15232 if (sp[0] >> 22)
15233 return sp[0];
15234 if (sp[1] >> 22)
15235 return sp[1];
15236 #endif
15237 +
15238 +#endif
15239 }
15240 return pc;
15241 }
15242 diff -urNp linux-3.0.3/arch/x86/kernel/tls.c linux-3.0.3/arch/x86/kernel/tls.c
15243 --- linux-3.0.3/arch/x86/kernel/tls.c 2011-07-21 22:17:23.000000000 -0400
15244 +++ linux-3.0.3/arch/x86/kernel/tls.c 2011-08-23 21:47:55.000000000 -0400
15245 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
15246 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
15247 return -EINVAL;
15248
15249 +#ifdef CONFIG_PAX_SEGMEXEC
15250 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
15251 + return -EINVAL;
15252 +#endif
15253 +
15254 set_tls_desc(p, idx, &info, 1);
15255
15256 return 0;
15257 diff -urNp linux-3.0.3/arch/x86/kernel/trampoline_32.S linux-3.0.3/arch/x86/kernel/trampoline_32.S
15258 --- linux-3.0.3/arch/x86/kernel/trampoline_32.S 2011-07-21 22:17:23.000000000 -0400
15259 +++ linux-3.0.3/arch/x86/kernel/trampoline_32.S 2011-08-23 21:47:55.000000000 -0400
15260 @@ -32,6 +32,12 @@
15261 #include <asm/segment.h>
15262 #include <asm/page_types.h>
15263
15264 +#ifdef CONFIG_PAX_KERNEXEC
15265 +#define ta(X) (X)
15266 +#else
15267 +#define ta(X) ((X) - __PAGE_OFFSET)
15268 +#endif
15269 +
15270 #ifdef CONFIG_SMP
15271
15272 .section ".x86_trampoline","a"
15273 @@ -62,7 +68,7 @@ r_base = .
15274 inc %ax # protected mode (PE) bit
15275 lmsw %ax # into protected mode
15276 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
15277 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
15278 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
15279
15280 # These need to be in the same 64K segment as the above;
15281 # hence we don't use the boot_gdt_descr defined in head.S
15282 diff -urNp linux-3.0.3/arch/x86/kernel/trampoline_64.S linux-3.0.3/arch/x86/kernel/trampoline_64.S
15283 --- linux-3.0.3/arch/x86/kernel/trampoline_64.S 2011-07-21 22:17:23.000000000 -0400
15284 +++ linux-3.0.3/arch/x86/kernel/trampoline_64.S 2011-08-23 21:47:55.000000000 -0400
15285 @@ -90,7 +90,7 @@ startup_32:
15286 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
15287 movl %eax, %ds
15288
15289 - movl $X86_CR4_PAE, %eax
15290 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15291 movl %eax, %cr4 # Enable PAE mode
15292
15293 # Setup trampoline 4 level pagetables
15294 @@ -138,7 +138,7 @@ tidt:
15295 # so the kernel can live anywhere
15296 .balign 4
15297 tgdt:
15298 - .short tgdt_end - tgdt # gdt limit
15299 + .short tgdt_end - tgdt - 1 # gdt limit
15300 .long tgdt - r_base
15301 .short 0
15302 .quad 0x00cf9b000000ffff # __KERNEL32_CS
15303 diff -urNp linux-3.0.3/arch/x86/kernel/traps.c linux-3.0.3/arch/x86/kernel/traps.c
15304 --- linux-3.0.3/arch/x86/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
15305 +++ linux-3.0.3/arch/x86/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
15306 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
15307
15308 /* Do we ignore FPU interrupts ? */
15309 char ignore_fpu_irq;
15310 -
15311 -/*
15312 - * The IDT has to be page-aligned to simplify the Pentium
15313 - * F0 0F bug workaround.
15314 - */
15315 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
15316 #endif
15317
15318 DECLARE_BITMAP(used_vectors, NR_VECTORS);
15319 @@ -117,13 +111,13 @@ static inline void preempt_conditional_c
15320 }
15321
15322 static void __kprobes
15323 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
15324 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
15325 long error_code, siginfo_t *info)
15326 {
15327 struct task_struct *tsk = current;
15328
15329 #ifdef CONFIG_X86_32
15330 - if (regs->flags & X86_VM_MASK) {
15331 + if (v8086_mode(regs)) {
15332 /*
15333 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
15334 * On nmi (interrupt 2), do_trap should not be called.
15335 @@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
15336 }
15337 #endif
15338
15339 - if (!user_mode(regs))
15340 + if (!user_mode_novm(regs))
15341 goto kernel_trap;
15342
15343 #ifdef CONFIG_X86_32
15344 @@ -157,7 +151,7 @@ trap_signal:
15345 printk_ratelimit()) {
15346 printk(KERN_INFO
15347 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
15348 - tsk->comm, tsk->pid, str,
15349 + tsk->comm, task_pid_nr(tsk), str,
15350 regs->ip, regs->sp, error_code);
15351 print_vma_addr(" in ", regs->ip);
15352 printk("\n");
15353 @@ -174,8 +168,20 @@ kernel_trap:
15354 if (!fixup_exception(regs)) {
15355 tsk->thread.error_code = error_code;
15356 tsk->thread.trap_no = trapnr;
15357 +
15358 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15359 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
15360 + str = "PAX: suspicious stack segment fault";
15361 +#endif
15362 +
15363 die(str, regs, error_code);
15364 }
15365 +
15366 +#ifdef CONFIG_PAX_REFCOUNT
15367 + if (trapnr == 4)
15368 + pax_report_refcount_overflow(regs);
15369 +#endif
15370 +
15371 return;
15372
15373 #ifdef CONFIG_X86_32
15374 @@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
15375 conditional_sti(regs);
15376
15377 #ifdef CONFIG_X86_32
15378 - if (regs->flags & X86_VM_MASK)
15379 + if (v8086_mode(regs))
15380 goto gp_in_vm86;
15381 #endif
15382
15383 tsk = current;
15384 - if (!user_mode(regs))
15385 + if (!user_mode_novm(regs))
15386 goto gp_in_kernel;
15387
15388 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
15389 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
15390 + struct mm_struct *mm = tsk->mm;
15391 + unsigned long limit;
15392 +
15393 + down_write(&mm->mmap_sem);
15394 + limit = mm->context.user_cs_limit;
15395 + if (limit < TASK_SIZE) {
15396 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
15397 + up_write(&mm->mmap_sem);
15398 + return;
15399 + }
15400 + up_write(&mm->mmap_sem);
15401 + }
15402 +#endif
15403 +
15404 tsk->thread.error_code = error_code;
15405 tsk->thread.trap_no = 13;
15406
15407 @@ -304,6 +326,13 @@ gp_in_kernel:
15408 if (notify_die(DIE_GPF, "general protection fault", regs,
15409 error_code, 13, SIGSEGV) == NOTIFY_STOP)
15410 return;
15411 +
15412 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15413 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
15414 + die("PAX: suspicious general protection fault", regs, error_code);
15415 + else
15416 +#endif
15417 +
15418 die("general protection fault", regs, error_code);
15419 }
15420
15421 @@ -433,6 +462,17 @@ static notrace __kprobes void default_do
15422 dotraplinkage notrace __kprobes void
15423 do_nmi(struct pt_regs *regs, long error_code)
15424 {
15425 +
15426 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15427 + if (!user_mode(regs)) {
15428 + unsigned long cs = regs->cs & 0xFFFF;
15429 + unsigned long ip = ktva_ktla(regs->ip);
15430 +
15431 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
15432 + regs->ip = ip;
15433 + }
15434 +#endif
15435 +
15436 nmi_enter();
15437
15438 inc_irq_stat(__nmi_count);
15439 @@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
15440 /* It's safe to allow irq's after DR6 has been saved */
15441 preempt_conditional_sti(regs);
15442
15443 - if (regs->flags & X86_VM_MASK) {
15444 + if (v8086_mode(regs)) {
15445 handle_vm86_trap((struct kernel_vm86_regs *) regs,
15446 error_code, 1);
15447 preempt_conditional_cli(regs);
15448 @@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
15449 * We already checked v86 mode above, so we can check for kernel mode
15450 * by just checking the CPL of CS.
15451 */
15452 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
15453 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
15454 tsk->thread.debugreg6 &= ~DR_STEP;
15455 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
15456 regs->flags &= ~X86_EFLAGS_TF;
15457 @@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
15458 return;
15459 conditional_sti(regs);
15460
15461 - if (!user_mode_vm(regs))
15462 + if (!user_mode(regs))
15463 {
15464 if (!fixup_exception(regs)) {
15465 task->thread.error_code = error_code;
15466 @@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
15467 void __math_state_restore(void)
15468 {
15469 struct thread_info *thread = current_thread_info();
15470 - struct task_struct *tsk = thread->task;
15471 + struct task_struct *tsk = current;
15472
15473 /*
15474 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
15475 @@ -750,8 +790,7 @@ void __math_state_restore(void)
15476 */
15477 asmlinkage void math_state_restore(void)
15478 {
15479 - struct thread_info *thread = current_thread_info();
15480 - struct task_struct *tsk = thread->task;
15481 + struct task_struct *tsk = current;
15482
15483 if (!tsk_used_math(tsk)) {
15484 local_irq_enable();
15485 diff -urNp linux-3.0.3/arch/x86/kernel/verify_cpu.S linux-3.0.3/arch/x86/kernel/verify_cpu.S
15486 --- linux-3.0.3/arch/x86/kernel/verify_cpu.S 2011-07-21 22:17:23.000000000 -0400
15487 +++ linux-3.0.3/arch/x86/kernel/verify_cpu.S 2011-08-23 21:48:14.000000000 -0400
15488 @@ -20,6 +20,7 @@
15489 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
15490 * arch/x86/kernel/trampoline_64.S: secondary processor verification
15491 * arch/x86/kernel/head_32.S: processor startup
15492 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
15493 *
15494 * verify_cpu, returns the status of longmode and SSE in register %eax.
15495 * 0: Success 1: Failure
15496 diff -urNp linux-3.0.3/arch/x86/kernel/vm86_32.c linux-3.0.3/arch/x86/kernel/vm86_32.c
15497 --- linux-3.0.3/arch/x86/kernel/vm86_32.c 2011-07-21 22:17:23.000000000 -0400
15498 +++ linux-3.0.3/arch/x86/kernel/vm86_32.c 2011-08-23 21:48:14.000000000 -0400
15499 @@ -41,6 +41,7 @@
15500 #include <linux/ptrace.h>
15501 #include <linux/audit.h>
15502 #include <linux/stddef.h>
15503 +#include <linux/grsecurity.h>
15504
15505 #include <asm/uaccess.h>
15506 #include <asm/io.h>
15507 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
15508 do_exit(SIGSEGV);
15509 }
15510
15511 - tss = &per_cpu(init_tss, get_cpu());
15512 + tss = init_tss + get_cpu();
15513 current->thread.sp0 = current->thread.saved_sp0;
15514 current->thread.sysenter_cs = __KERNEL_CS;
15515 load_sp0(tss, &current->thread);
15516 @@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
15517 struct task_struct *tsk;
15518 int tmp, ret = -EPERM;
15519
15520 +#ifdef CONFIG_GRKERNSEC_VM86
15521 + if (!capable(CAP_SYS_RAWIO)) {
15522 + gr_handle_vm86();
15523 + goto out;
15524 + }
15525 +#endif
15526 +
15527 tsk = current;
15528 if (tsk->thread.saved_sp0)
15529 goto out;
15530 @@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
15531 int tmp, ret;
15532 struct vm86plus_struct __user *v86;
15533
15534 +#ifdef CONFIG_GRKERNSEC_VM86
15535 + if (!capable(CAP_SYS_RAWIO)) {
15536 + gr_handle_vm86();
15537 + ret = -EPERM;
15538 + goto out;
15539 + }
15540 +#endif
15541 +
15542 tsk = current;
15543 switch (cmd) {
15544 case VM86_REQUEST_IRQ:
15545 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
15546 tsk->thread.saved_fs = info->regs32->fs;
15547 tsk->thread.saved_gs = get_user_gs(info->regs32);
15548
15549 - tss = &per_cpu(init_tss, get_cpu());
15550 + tss = init_tss + get_cpu();
15551 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
15552 if (cpu_has_sep)
15553 tsk->thread.sysenter_cs = 0;
15554 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
15555 goto cannot_handle;
15556 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
15557 goto cannot_handle;
15558 - intr_ptr = (unsigned long __user *) (i << 2);
15559 + intr_ptr = (__force unsigned long __user *) (i << 2);
15560 if (get_user(segoffs, intr_ptr))
15561 goto cannot_handle;
15562 if ((segoffs >> 16) == BIOSSEG)
15563 diff -urNp linux-3.0.3/arch/x86/kernel/vmlinux.lds.S linux-3.0.3/arch/x86/kernel/vmlinux.lds.S
15564 --- linux-3.0.3/arch/x86/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
15565 +++ linux-3.0.3/arch/x86/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
15566 @@ -26,6 +26,13 @@
15567 #include <asm/page_types.h>
15568 #include <asm/cache.h>
15569 #include <asm/boot.h>
15570 +#include <asm/segment.h>
15571 +
15572 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15573 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
15574 +#else
15575 +#define __KERNEL_TEXT_OFFSET 0
15576 +#endif
15577
15578 #undef i386 /* in case the preprocessor is a 32bit one */
15579
15580 @@ -69,31 +76,46 @@ jiffies_64 = jiffies;
15581
15582 PHDRS {
15583 text PT_LOAD FLAGS(5); /* R_E */
15584 +#ifdef CONFIG_X86_32
15585 + module PT_LOAD FLAGS(5); /* R_E */
15586 +#endif
15587 +#ifdef CONFIG_XEN
15588 + rodata PT_LOAD FLAGS(5); /* R_E */
15589 +#else
15590 + rodata PT_LOAD FLAGS(4); /* R__ */
15591 +#endif
15592 data PT_LOAD FLAGS(6); /* RW_ */
15593 #ifdef CONFIG_X86_64
15594 user PT_LOAD FLAGS(5); /* R_E */
15595 +#endif
15596 + init.begin PT_LOAD FLAGS(6); /* RW_ */
15597 #ifdef CONFIG_SMP
15598 percpu PT_LOAD FLAGS(6); /* RW_ */
15599 #endif
15600 + text.init PT_LOAD FLAGS(5); /* R_E */
15601 + text.exit PT_LOAD FLAGS(5); /* R_E */
15602 init PT_LOAD FLAGS(7); /* RWE */
15603 -#endif
15604 note PT_NOTE FLAGS(0); /* ___ */
15605 }
15606
15607 SECTIONS
15608 {
15609 #ifdef CONFIG_X86_32
15610 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
15611 - phys_startup_32 = startup_32 - LOAD_OFFSET;
15612 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
15613 #else
15614 - . = __START_KERNEL;
15615 - phys_startup_64 = startup_64 - LOAD_OFFSET;
15616 + . = __START_KERNEL;
15617 #endif
15618
15619 /* Text and read-only data */
15620 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
15621 - _text = .;
15622 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15623 /* bootstrapping code */
15624 +#ifdef CONFIG_X86_32
15625 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15626 +#else
15627 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15628 +#endif
15629 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15630 + _text = .;
15631 HEAD_TEXT
15632 #ifdef CONFIG_X86_32
15633 . = ALIGN(PAGE_SIZE);
15634 @@ -109,13 +131,47 @@ SECTIONS
15635 IRQENTRY_TEXT
15636 *(.fixup)
15637 *(.gnu.warning)
15638 - /* End of text section */
15639 - _etext = .;
15640 } :text = 0x9090
15641
15642 - NOTES :text :note
15643 + . += __KERNEL_TEXT_OFFSET;
15644 +
15645 +#ifdef CONFIG_X86_32
15646 + . = ALIGN(PAGE_SIZE);
15647 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
15648 +
15649 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
15650 + MODULES_EXEC_VADDR = .;
15651 + BYTE(0)
15652 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
15653 + . = ALIGN(HPAGE_SIZE);
15654 + MODULES_EXEC_END = . - 1;
15655 +#endif
15656 +
15657 + } :module
15658 +#endif
15659 +
15660 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
15661 + /* End of text section */
15662 + _etext = . - __KERNEL_TEXT_OFFSET;
15663 + }
15664 +
15665 +#ifdef CONFIG_X86_32
15666 + . = ALIGN(PAGE_SIZE);
15667 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
15668 + *(.idt)
15669 + . = ALIGN(PAGE_SIZE);
15670 + *(.empty_zero_page)
15671 + *(.initial_pg_fixmap)
15672 + *(.initial_pg_pmd)
15673 + *(.initial_page_table)
15674 + *(.swapper_pg_dir)
15675 + } :rodata
15676 +#endif
15677 +
15678 + . = ALIGN(PAGE_SIZE);
15679 + NOTES :rodata :note
15680
15681 - EXCEPTION_TABLE(16) :text = 0x9090
15682 + EXCEPTION_TABLE(16) :rodata
15683
15684 #if defined(CONFIG_DEBUG_RODATA)
15685 /* .text should occupy whole number of pages */
15686 @@ -127,16 +183,20 @@ SECTIONS
15687
15688 /* Data */
15689 .data : AT(ADDR(.data) - LOAD_OFFSET) {
15690 +
15691 +#ifdef CONFIG_PAX_KERNEXEC
15692 + . = ALIGN(HPAGE_SIZE);
15693 +#else
15694 + . = ALIGN(PAGE_SIZE);
15695 +#endif
15696 +
15697 /* Start of data section */
15698 _sdata = .;
15699
15700 /* init_task */
15701 INIT_TASK_DATA(THREAD_SIZE)
15702
15703 -#ifdef CONFIG_X86_32
15704 - /* 32 bit has nosave before _edata */
15705 NOSAVE_DATA
15706 -#endif
15707
15708 PAGE_ALIGNED_DATA(PAGE_SIZE)
15709
15710 @@ -208,12 +268,19 @@ SECTIONS
15711 #endif /* CONFIG_X86_64 */
15712
15713 /* Init code and data - will be freed after init */
15714 - . = ALIGN(PAGE_SIZE);
15715 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
15716 + BYTE(0)
15717 +
15718 +#ifdef CONFIG_PAX_KERNEXEC
15719 + . = ALIGN(HPAGE_SIZE);
15720 +#else
15721 + . = ALIGN(PAGE_SIZE);
15722 +#endif
15723 +
15724 __init_begin = .; /* paired with __init_end */
15725 - }
15726 + } :init.begin
15727
15728 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
15729 +#ifdef CONFIG_SMP
15730 /*
15731 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
15732 * output PHDR, so the next output section - .init.text - should
15733 @@ -222,12 +289,27 @@ SECTIONS
15734 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
15735 #endif
15736
15737 - INIT_TEXT_SECTION(PAGE_SIZE)
15738 -#ifdef CONFIG_X86_64
15739 - :init
15740 -#endif
15741 + . = ALIGN(PAGE_SIZE);
15742 + init_begin = .;
15743 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
15744 + VMLINUX_SYMBOL(_sinittext) = .;
15745 + INIT_TEXT
15746 + VMLINUX_SYMBOL(_einittext) = .;
15747 + . = ALIGN(PAGE_SIZE);
15748 + } :text.init
15749
15750 - INIT_DATA_SECTION(16)
15751 + /*
15752 + * .exit.text is discard at runtime, not link time, to deal with
15753 + * references from .altinstructions and .eh_frame
15754 + */
15755 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15756 + EXIT_TEXT
15757 + . = ALIGN(16);
15758 + } :text.exit
15759 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
15760 +
15761 + . = ALIGN(PAGE_SIZE);
15762 + INIT_DATA_SECTION(16) :init
15763
15764 /*
15765 * Code and data for a variety of lowlevel trampolines, to be
15766 @@ -301,19 +383,12 @@ SECTIONS
15767 }
15768
15769 . = ALIGN(8);
15770 - /*
15771 - * .exit.text is discard at runtime, not link time, to deal with
15772 - * references from .altinstructions and .eh_frame
15773 - */
15774 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
15775 - EXIT_TEXT
15776 - }
15777
15778 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
15779 EXIT_DATA
15780 }
15781
15782 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
15783 +#ifndef CONFIG_SMP
15784 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
15785 #endif
15786
15787 @@ -332,16 +407,10 @@ SECTIONS
15788 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
15789 __smp_locks = .;
15790 *(.smp_locks)
15791 - . = ALIGN(PAGE_SIZE);
15792 __smp_locks_end = .;
15793 + . = ALIGN(PAGE_SIZE);
15794 }
15795
15796 -#ifdef CONFIG_X86_64
15797 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
15798 - NOSAVE_DATA
15799 - }
15800 -#endif
15801 -
15802 /* BSS */
15803 . = ALIGN(PAGE_SIZE);
15804 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
15805 @@ -357,6 +426,7 @@ SECTIONS
15806 __brk_base = .;
15807 . += 64 * 1024; /* 64k alignment slop space */
15808 *(.brk_reservation) /* areas brk users have reserved */
15809 + . = ALIGN(HPAGE_SIZE);
15810 __brk_limit = .;
15811 }
15812
15813 @@ -383,13 +453,12 @@ SECTIONS
15814 * for the boot processor.
15815 */
15816 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
15817 -INIT_PER_CPU(gdt_page);
15818 INIT_PER_CPU(irq_stack_union);
15819
15820 /*
15821 * Build-time check on the image size:
15822 */
15823 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
15824 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
15825 "kernel image bigger than KERNEL_IMAGE_SIZE");
15826
15827 #ifdef CONFIG_SMP
15828 diff -urNp linux-3.0.3/arch/x86/kernel/vsyscall_64.c linux-3.0.3/arch/x86/kernel/vsyscall_64.c
15829 --- linux-3.0.3/arch/x86/kernel/vsyscall_64.c 2011-07-21 22:17:23.000000000 -0400
15830 +++ linux-3.0.3/arch/x86/kernel/vsyscall_64.c 2011-08-23 21:47:55.000000000 -0400
15831 @@ -53,7 +53,7 @@ DEFINE_VVAR(int, vgetcpu_mode);
15832 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
15833 {
15834 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
15835 - .sysctl_enabled = 1,
15836 + .sysctl_enabled = 0,
15837 };
15838
15839 void update_vsyscall_tz(void)
15840 @@ -231,7 +231,7 @@ static long __vsyscall(3) venosys_1(void
15841 static ctl_table kernel_table2[] = {
15842 { .procname = "vsyscall64",
15843 .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
15844 - .mode = 0644,
15845 + .mode = 0444,
15846 .proc_handler = proc_dointvec },
15847 {}
15848 };
15849 diff -urNp linux-3.0.3/arch/x86/kernel/x8664_ksyms_64.c linux-3.0.3/arch/x86/kernel/x8664_ksyms_64.c
15850 --- linux-3.0.3/arch/x86/kernel/x8664_ksyms_64.c 2011-07-21 22:17:23.000000000 -0400
15851 +++ linux-3.0.3/arch/x86/kernel/x8664_ksyms_64.c 2011-08-23 21:47:55.000000000 -0400
15852 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
15853 EXPORT_SYMBOL(copy_user_generic_string);
15854 EXPORT_SYMBOL(copy_user_generic_unrolled);
15855 EXPORT_SYMBOL(__copy_user_nocache);
15856 -EXPORT_SYMBOL(_copy_from_user);
15857 -EXPORT_SYMBOL(_copy_to_user);
15858
15859 EXPORT_SYMBOL(copy_page);
15860 EXPORT_SYMBOL(clear_page);
15861 diff -urNp linux-3.0.3/arch/x86/kernel/xsave.c linux-3.0.3/arch/x86/kernel/xsave.c
15862 --- linux-3.0.3/arch/x86/kernel/xsave.c 2011-07-21 22:17:23.000000000 -0400
15863 +++ linux-3.0.3/arch/x86/kernel/xsave.c 2011-08-23 21:47:55.000000000 -0400
15864 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
15865 fx_sw_user->xstate_size > fx_sw_user->extended_size)
15866 return -EINVAL;
15867
15868 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
15869 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
15870 fx_sw_user->extended_size -
15871 FP_XSTATE_MAGIC2_SIZE));
15872 if (err)
15873 @@ -267,7 +267,7 @@ fx_only:
15874 * the other extended state.
15875 */
15876 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
15877 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
15878 + return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
15879 }
15880
15881 /*
15882 @@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
15883 if (use_xsave())
15884 err = restore_user_xstate(buf);
15885 else
15886 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
15887 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
15888 buf);
15889 if (unlikely(err)) {
15890 /*
15891 diff -urNp linux-3.0.3/arch/x86/kvm/emulate.c linux-3.0.3/arch/x86/kvm/emulate.c
15892 --- linux-3.0.3/arch/x86/kvm/emulate.c 2011-07-21 22:17:23.000000000 -0400
15893 +++ linux-3.0.3/arch/x86/kvm/emulate.c 2011-08-23 21:47:55.000000000 -0400
15894 @@ -96,7 +96,7 @@
15895 #define Src2ImmByte (2<<29)
15896 #define Src2One (3<<29)
15897 #define Src2Imm (4<<29)
15898 -#define Src2Mask (7<<29)
15899 +#define Src2Mask (7U<<29)
15900
15901 #define X2(x...) x, x
15902 #define X3(x...) X2(x), x
15903 @@ -207,6 +207,7 @@ struct gprefix {
15904
15905 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
15906 do { \
15907 + unsigned long _tmp; \
15908 __asm__ __volatile__ ( \
15909 _PRE_EFLAGS("0", "4", "2") \
15910 _op _suffix " %"_x"3,%1; " \
15911 @@ -220,8 +221,6 @@ struct gprefix {
15912 /* Raw emulation: instruction has two explicit operands. */
15913 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
15914 do { \
15915 - unsigned long _tmp; \
15916 - \
15917 switch ((_dst).bytes) { \
15918 case 2: \
15919 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
15920 @@ -237,7 +236,6 @@ struct gprefix {
15921
15922 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
15923 do { \
15924 - unsigned long _tmp; \
15925 switch ((_dst).bytes) { \
15926 case 1: \
15927 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
15928 diff -urNp linux-3.0.3/arch/x86/kvm/lapic.c linux-3.0.3/arch/x86/kvm/lapic.c
15929 --- linux-3.0.3/arch/x86/kvm/lapic.c 2011-07-21 22:17:23.000000000 -0400
15930 +++ linux-3.0.3/arch/x86/kvm/lapic.c 2011-08-23 21:47:55.000000000 -0400
15931 @@ -53,7 +53,7 @@
15932 #define APIC_BUS_CYCLE_NS 1
15933
15934 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
15935 -#define apic_debug(fmt, arg...)
15936 +#define apic_debug(fmt, arg...) do {} while (0)
15937
15938 #define APIC_LVT_NUM 6
15939 /* 14 is the version for Xeon and Pentium 8.4.8*/
15940 diff -urNp linux-3.0.3/arch/x86/kvm/mmu.c linux-3.0.3/arch/x86/kvm/mmu.c
15941 --- linux-3.0.3/arch/x86/kvm/mmu.c 2011-07-21 22:17:23.000000000 -0400
15942 +++ linux-3.0.3/arch/x86/kvm/mmu.c 2011-08-23 21:47:55.000000000 -0400
15943 @@ -3238,7 +3238,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
15944
15945 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
15946
15947 - invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
15948 + invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
15949
15950 /*
15951 * Assume that the pte write on a page table of the same type
15952 @@ -3270,7 +3270,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
15953 }
15954
15955 spin_lock(&vcpu->kvm->mmu_lock);
15956 - if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
15957 + if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
15958 gentry = 0;
15959 kvm_mmu_free_some_pages(vcpu);
15960 ++vcpu->kvm->stat.mmu_pte_write;
15961 diff -urNp linux-3.0.3/arch/x86/kvm/paging_tmpl.h linux-3.0.3/arch/x86/kvm/paging_tmpl.h
15962 --- linux-3.0.3/arch/x86/kvm/paging_tmpl.h 2011-07-21 22:17:23.000000000 -0400
15963 +++ linux-3.0.3/arch/x86/kvm/paging_tmpl.h 2011-08-23 21:48:14.000000000 -0400
15964 @@ -583,6 +583,8 @@ static int FNAME(page_fault)(struct kvm_
15965 unsigned long mmu_seq;
15966 bool map_writable;
15967
15968 + pax_track_stack();
15969 +
15970 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
15971
15972 r = mmu_topup_memory_caches(vcpu);
15973 @@ -703,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcp
15974 if (need_flush)
15975 kvm_flush_remote_tlbs(vcpu->kvm);
15976
15977 - atomic_inc(&vcpu->kvm->arch.invlpg_counter);
15978 + atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
15979
15980 spin_unlock(&vcpu->kvm->mmu_lock);
15981
15982 diff -urNp linux-3.0.3/arch/x86/kvm/svm.c linux-3.0.3/arch/x86/kvm/svm.c
15983 --- linux-3.0.3/arch/x86/kvm/svm.c 2011-07-21 22:17:23.000000000 -0400
15984 +++ linux-3.0.3/arch/x86/kvm/svm.c 2011-08-23 21:47:55.000000000 -0400
15985 @@ -3377,7 +3377,11 @@ static void reload_tss(struct kvm_vcpu *
15986 int cpu = raw_smp_processor_id();
15987
15988 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
15989 +
15990 + pax_open_kernel();
15991 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
15992 + pax_close_kernel();
15993 +
15994 load_TR_desc();
15995 }
15996
15997 @@ -3755,6 +3759,10 @@ static void svm_vcpu_run(struct kvm_vcpu
15998 #endif
15999 #endif
16000
16001 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16002 + __set_fs(current_thread_info()->addr_limit);
16003 +#endif
16004 +
16005 reload_tss(vcpu);
16006
16007 local_irq_disable();
16008 diff -urNp linux-3.0.3/arch/x86/kvm/vmx.c linux-3.0.3/arch/x86/kvm/vmx.c
16009 --- linux-3.0.3/arch/x86/kvm/vmx.c 2011-07-21 22:17:23.000000000 -0400
16010 +++ linux-3.0.3/arch/x86/kvm/vmx.c 2011-08-23 21:47:55.000000000 -0400
16011 @@ -797,7 +797,11 @@ static void reload_tss(void)
16012 struct desc_struct *descs;
16013
16014 descs = (void *)gdt->address;
16015 +
16016 + pax_open_kernel();
16017 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
16018 + pax_close_kernel();
16019 +
16020 load_TR_desc();
16021 }
16022
16023 @@ -1747,8 +1751,11 @@ static __init int hardware_setup(void)
16024 if (!cpu_has_vmx_flexpriority())
16025 flexpriority_enabled = 0;
16026
16027 - if (!cpu_has_vmx_tpr_shadow())
16028 - kvm_x86_ops->update_cr8_intercept = NULL;
16029 + if (!cpu_has_vmx_tpr_shadow()) {
16030 + pax_open_kernel();
16031 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
16032 + pax_close_kernel();
16033 + }
16034
16035 if (enable_ept && !cpu_has_vmx_ept_2m_page())
16036 kvm_disable_largepages();
16037 @@ -2814,7 +2821,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
16038 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
16039
16040 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
16041 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
16042 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
16043 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
16044 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
16045 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
16046 @@ -4211,6 +4218,12 @@ static void __noclone vmx_vcpu_run(struc
16047 "jmp .Lkvm_vmx_return \n\t"
16048 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
16049 ".Lkvm_vmx_return: "
16050 +
16051 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16052 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
16053 + ".Lkvm_vmx_return2: "
16054 +#endif
16055 +
16056 /* Save guest registers, load host registers, keep flags */
16057 "mov %0, %c[wordsize](%%"R"sp) \n\t"
16058 "pop %0 \n\t"
16059 @@ -4259,6 +4272,11 @@ static void __noclone vmx_vcpu_run(struc
16060 #endif
16061 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
16062 [wordsize]"i"(sizeof(ulong))
16063 +
16064 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16065 + ,[cs]"i"(__KERNEL_CS)
16066 +#endif
16067 +
16068 : "cc", "memory"
16069 , R"ax", R"bx", R"di", R"si"
16070 #ifdef CONFIG_X86_64
16071 @@ -4276,7 +4294,16 @@ static void __noclone vmx_vcpu_run(struc
16072
16073 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
16074
16075 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
16076 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
16077 +
16078 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16079 + loadsegment(fs, __KERNEL_PERCPU);
16080 +#endif
16081 +
16082 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16083 + __set_fs(current_thread_info()->addr_limit);
16084 +#endif
16085 +
16086 vmx->launched = 1;
16087
16088 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
16089 diff -urNp linux-3.0.3/arch/x86/kvm/x86.c linux-3.0.3/arch/x86/kvm/x86.c
16090 --- linux-3.0.3/arch/x86/kvm/x86.c 2011-07-21 22:17:23.000000000 -0400
16091 +++ linux-3.0.3/arch/x86/kvm/x86.c 2011-08-23 21:47:55.000000000 -0400
16092 @@ -2057,6 +2057,8 @@ long kvm_arch_dev_ioctl(struct file *fil
16093 if (n < msr_list.nmsrs)
16094 goto out;
16095 r = -EFAULT;
16096 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
16097 + goto out;
16098 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
16099 num_msrs_to_save * sizeof(u32)))
16100 goto out;
16101 @@ -2229,15 +2231,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
16102 struct kvm_cpuid2 *cpuid,
16103 struct kvm_cpuid_entry2 __user *entries)
16104 {
16105 - int r;
16106 + int r, i;
16107
16108 r = -E2BIG;
16109 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
16110 goto out;
16111 r = -EFAULT;
16112 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
16113 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16114 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16115 goto out;
16116 + for (i = 0; i < cpuid->nent; ++i) {
16117 + struct kvm_cpuid_entry2 cpuid_entry;
16118 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
16119 + goto out;
16120 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
16121 + }
16122 vcpu->arch.cpuid_nent = cpuid->nent;
16123 kvm_apic_set_version(vcpu);
16124 kvm_x86_ops->cpuid_update(vcpu);
16125 @@ -2252,15 +2259,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
16126 struct kvm_cpuid2 *cpuid,
16127 struct kvm_cpuid_entry2 __user *entries)
16128 {
16129 - int r;
16130 + int r, i;
16131
16132 r = -E2BIG;
16133 if (cpuid->nent < vcpu->arch.cpuid_nent)
16134 goto out;
16135 r = -EFAULT;
16136 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
16137 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16138 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16139 goto out;
16140 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
16141 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
16142 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
16143 + goto out;
16144 + }
16145 return 0;
16146
16147 out:
16148 @@ -2579,7 +2590,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
16149 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
16150 struct kvm_interrupt *irq)
16151 {
16152 - if (irq->irq < 0 || irq->irq >= 256)
16153 + if (irq->irq >= 256)
16154 return -EINVAL;
16155 if (irqchip_in_kernel(vcpu->kvm))
16156 return -ENXIO;
16157 @@ -4878,7 +4889,7 @@ void kvm_after_handle_nmi(struct kvm_vcp
16158 }
16159 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
16160
16161 -int kvm_arch_init(void *opaque)
16162 +int kvm_arch_init(const void *opaque)
16163 {
16164 int r;
16165 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
16166 diff -urNp linux-3.0.3/arch/x86/lguest/boot.c linux-3.0.3/arch/x86/lguest/boot.c
16167 --- linux-3.0.3/arch/x86/lguest/boot.c 2011-07-21 22:17:23.000000000 -0400
16168 +++ linux-3.0.3/arch/x86/lguest/boot.c 2011-08-23 21:47:55.000000000 -0400
16169 @@ -1176,9 +1176,10 @@ static __init int early_put_chars(u32 vt
16170 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
16171 * Launcher to reboot us.
16172 */
16173 -static void lguest_restart(char *reason)
16174 +static __noreturn void lguest_restart(char *reason)
16175 {
16176 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
16177 + BUG();
16178 }
16179
16180 /*G:050
16181 diff -urNp linux-3.0.3/arch/x86/lib/atomic64_32.c linux-3.0.3/arch/x86/lib/atomic64_32.c
16182 --- linux-3.0.3/arch/x86/lib/atomic64_32.c 2011-07-21 22:17:23.000000000 -0400
16183 +++ linux-3.0.3/arch/x86/lib/atomic64_32.c 2011-08-23 21:47:55.000000000 -0400
16184 @@ -8,18 +8,30 @@
16185
16186 long long atomic64_read_cx8(long long, const atomic64_t *v);
16187 EXPORT_SYMBOL(atomic64_read_cx8);
16188 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16189 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
16190 long long atomic64_set_cx8(long long, const atomic64_t *v);
16191 EXPORT_SYMBOL(atomic64_set_cx8);
16192 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16193 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
16194 long long atomic64_xchg_cx8(long long, unsigned high);
16195 EXPORT_SYMBOL(atomic64_xchg_cx8);
16196 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
16197 EXPORT_SYMBOL(atomic64_add_return_cx8);
16198 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16199 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
16200 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
16201 EXPORT_SYMBOL(atomic64_sub_return_cx8);
16202 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16203 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
16204 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
16205 EXPORT_SYMBOL(atomic64_inc_return_cx8);
16206 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16207 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
16208 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
16209 EXPORT_SYMBOL(atomic64_dec_return_cx8);
16210 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16211 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
16212 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
16213 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
16214 int atomic64_inc_not_zero_cx8(atomic64_t *v);
16215 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
16216 #ifndef CONFIG_X86_CMPXCHG64
16217 long long atomic64_read_386(long long, const atomic64_t *v);
16218 EXPORT_SYMBOL(atomic64_read_386);
16219 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
16220 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
16221 long long atomic64_set_386(long long, const atomic64_t *v);
16222 EXPORT_SYMBOL(atomic64_set_386);
16223 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
16224 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
16225 long long atomic64_xchg_386(long long, unsigned high);
16226 EXPORT_SYMBOL(atomic64_xchg_386);
16227 long long atomic64_add_return_386(long long a, atomic64_t *v);
16228 EXPORT_SYMBOL(atomic64_add_return_386);
16229 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16230 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
16231 long long atomic64_sub_return_386(long long a, atomic64_t *v);
16232 EXPORT_SYMBOL(atomic64_sub_return_386);
16233 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16234 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
16235 long long atomic64_inc_return_386(long long a, atomic64_t *v);
16236 EXPORT_SYMBOL(atomic64_inc_return_386);
16237 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16238 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
16239 long long atomic64_dec_return_386(long long a, atomic64_t *v);
16240 EXPORT_SYMBOL(atomic64_dec_return_386);
16241 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16242 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
16243 long long atomic64_add_386(long long a, atomic64_t *v);
16244 EXPORT_SYMBOL(atomic64_add_386);
16245 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
16246 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
16247 long long atomic64_sub_386(long long a, atomic64_t *v);
16248 EXPORT_SYMBOL(atomic64_sub_386);
16249 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
16250 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
16251 long long atomic64_inc_386(long long a, atomic64_t *v);
16252 EXPORT_SYMBOL(atomic64_inc_386);
16253 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
16254 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
16255 long long atomic64_dec_386(long long a, atomic64_t *v);
16256 EXPORT_SYMBOL(atomic64_dec_386);
16257 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
16258 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
16259 long long atomic64_dec_if_positive_386(atomic64_t *v);
16260 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
16261 int atomic64_inc_not_zero_386(atomic64_t *v);
16262 diff -urNp linux-3.0.3/arch/x86/lib/atomic64_386_32.S linux-3.0.3/arch/x86/lib/atomic64_386_32.S
16263 --- linux-3.0.3/arch/x86/lib/atomic64_386_32.S 2011-07-21 22:17:23.000000000 -0400
16264 +++ linux-3.0.3/arch/x86/lib/atomic64_386_32.S 2011-08-23 21:47:55.000000000 -0400
16265 @@ -48,6 +48,10 @@ BEGIN(read)
16266 movl (v), %eax
16267 movl 4(v), %edx
16268 RET_ENDP
16269 +BEGIN(read_unchecked)
16270 + movl (v), %eax
16271 + movl 4(v), %edx
16272 +RET_ENDP
16273 #undef v
16274
16275 #define v %esi
16276 @@ -55,6 +59,10 @@ BEGIN(set)
16277 movl %ebx, (v)
16278 movl %ecx, 4(v)
16279 RET_ENDP
16280 +BEGIN(set_unchecked)
16281 + movl %ebx, (v)
16282 + movl %ecx, 4(v)
16283 +RET_ENDP
16284 #undef v
16285
16286 #define v %esi
16287 @@ -70,6 +78,20 @@ RET_ENDP
16288 BEGIN(add)
16289 addl %eax, (v)
16290 adcl %edx, 4(v)
16291 +
16292 +#ifdef CONFIG_PAX_REFCOUNT
16293 + jno 0f
16294 + subl %eax, (v)
16295 + sbbl %edx, 4(v)
16296 + int $4
16297 +0:
16298 + _ASM_EXTABLE(0b, 0b)
16299 +#endif
16300 +
16301 +RET_ENDP
16302 +BEGIN(add_unchecked)
16303 + addl %eax, (v)
16304 + adcl %edx, 4(v)
16305 RET_ENDP
16306 #undef v
16307
16308 @@ -77,6 +99,24 @@ RET_ENDP
16309 BEGIN(add_return)
16310 addl (v), %eax
16311 adcl 4(v), %edx
16312 +
16313 +#ifdef CONFIG_PAX_REFCOUNT
16314 + into
16315 +1234:
16316 + _ASM_EXTABLE(1234b, 2f)
16317 +#endif
16318 +
16319 + movl %eax, (v)
16320 + movl %edx, 4(v)
16321 +
16322 +#ifdef CONFIG_PAX_REFCOUNT
16323 +2:
16324 +#endif
16325 +
16326 +RET_ENDP
16327 +BEGIN(add_return_unchecked)
16328 + addl (v), %eax
16329 + adcl 4(v), %edx
16330 movl %eax, (v)
16331 movl %edx, 4(v)
16332 RET_ENDP
16333 @@ -86,6 +126,20 @@ RET_ENDP
16334 BEGIN(sub)
16335 subl %eax, (v)
16336 sbbl %edx, 4(v)
16337 +
16338 +#ifdef CONFIG_PAX_REFCOUNT
16339 + jno 0f
16340 + addl %eax, (v)
16341 + adcl %edx, 4(v)
16342 + int $4
16343 +0:
16344 + _ASM_EXTABLE(0b, 0b)
16345 +#endif
16346 +
16347 +RET_ENDP
16348 +BEGIN(sub_unchecked)
16349 + subl %eax, (v)
16350 + sbbl %edx, 4(v)
16351 RET_ENDP
16352 #undef v
16353
16354 @@ -96,6 +150,27 @@ BEGIN(sub_return)
16355 sbbl $0, %edx
16356 addl (v), %eax
16357 adcl 4(v), %edx
16358 +
16359 +#ifdef CONFIG_PAX_REFCOUNT
16360 + into
16361 +1234:
16362 + _ASM_EXTABLE(1234b, 2f)
16363 +#endif
16364 +
16365 + movl %eax, (v)
16366 + movl %edx, 4(v)
16367 +
16368 +#ifdef CONFIG_PAX_REFCOUNT
16369 +2:
16370 +#endif
16371 +
16372 +RET_ENDP
16373 +BEGIN(sub_return_unchecked)
16374 + negl %edx
16375 + negl %eax
16376 + sbbl $0, %edx
16377 + addl (v), %eax
16378 + adcl 4(v), %edx
16379 movl %eax, (v)
16380 movl %edx, 4(v)
16381 RET_ENDP
16382 @@ -105,6 +180,20 @@ RET_ENDP
16383 BEGIN(inc)
16384 addl $1, (v)
16385 adcl $0, 4(v)
16386 +
16387 +#ifdef CONFIG_PAX_REFCOUNT
16388 + jno 0f
16389 + subl $1, (v)
16390 + sbbl $0, 4(v)
16391 + int $4
16392 +0:
16393 + _ASM_EXTABLE(0b, 0b)
16394 +#endif
16395 +
16396 +RET_ENDP
16397 +BEGIN(inc_unchecked)
16398 + addl $1, (v)
16399 + adcl $0, 4(v)
16400 RET_ENDP
16401 #undef v
16402
16403 @@ -114,6 +203,26 @@ BEGIN(inc_return)
16404 movl 4(v), %edx
16405 addl $1, %eax
16406 adcl $0, %edx
16407 +
16408 +#ifdef CONFIG_PAX_REFCOUNT
16409 + into
16410 +1234:
16411 + _ASM_EXTABLE(1234b, 2f)
16412 +#endif
16413 +
16414 + movl %eax, (v)
16415 + movl %edx, 4(v)
16416 +
16417 +#ifdef CONFIG_PAX_REFCOUNT
16418 +2:
16419 +#endif
16420 +
16421 +RET_ENDP
16422 +BEGIN(inc_return_unchecked)
16423 + movl (v), %eax
16424 + movl 4(v), %edx
16425 + addl $1, %eax
16426 + adcl $0, %edx
16427 movl %eax, (v)
16428 movl %edx, 4(v)
16429 RET_ENDP
16430 @@ -123,6 +232,20 @@ RET_ENDP
16431 BEGIN(dec)
16432 subl $1, (v)
16433 sbbl $0, 4(v)
16434 +
16435 +#ifdef CONFIG_PAX_REFCOUNT
16436 + jno 0f
16437 + addl $1, (v)
16438 + adcl $0, 4(v)
16439 + int $4
16440 +0:
16441 + _ASM_EXTABLE(0b, 0b)
16442 +#endif
16443 +
16444 +RET_ENDP
16445 +BEGIN(dec_unchecked)
16446 + subl $1, (v)
16447 + sbbl $0, 4(v)
16448 RET_ENDP
16449 #undef v
16450
16451 @@ -132,6 +255,26 @@ BEGIN(dec_return)
16452 movl 4(v), %edx
16453 subl $1, %eax
16454 sbbl $0, %edx
16455 +
16456 +#ifdef CONFIG_PAX_REFCOUNT
16457 + into
16458 +1234:
16459 + _ASM_EXTABLE(1234b, 2f)
16460 +#endif
16461 +
16462 + movl %eax, (v)
16463 + movl %edx, 4(v)
16464 +
16465 +#ifdef CONFIG_PAX_REFCOUNT
16466 +2:
16467 +#endif
16468 +
16469 +RET_ENDP
16470 +BEGIN(dec_return_unchecked)
16471 + movl (v), %eax
16472 + movl 4(v), %edx
16473 + subl $1, %eax
16474 + sbbl $0, %edx
16475 movl %eax, (v)
16476 movl %edx, 4(v)
16477 RET_ENDP
16478 @@ -143,6 +286,13 @@ BEGIN(add_unless)
16479 adcl %edx, %edi
16480 addl (v), %eax
16481 adcl 4(v), %edx
16482 +
16483 +#ifdef CONFIG_PAX_REFCOUNT
16484 + into
16485 +1234:
16486 + _ASM_EXTABLE(1234b, 2f)
16487 +#endif
16488 +
16489 cmpl %eax, %esi
16490 je 3f
16491 1:
16492 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
16493 1:
16494 addl $1, %eax
16495 adcl $0, %edx
16496 +
16497 +#ifdef CONFIG_PAX_REFCOUNT
16498 + into
16499 +1234:
16500 + _ASM_EXTABLE(1234b, 2f)
16501 +#endif
16502 +
16503 movl %eax, (v)
16504 movl %edx, 4(v)
16505 movl $1, %eax
16506 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
16507 movl 4(v), %edx
16508 subl $1, %eax
16509 sbbl $0, %edx
16510 +
16511 +#ifdef CONFIG_PAX_REFCOUNT
16512 + into
16513 +1234:
16514 + _ASM_EXTABLE(1234b, 1f)
16515 +#endif
16516 +
16517 js 1f
16518 movl %eax, (v)
16519 movl %edx, 4(v)
16520 diff -urNp linux-3.0.3/arch/x86/lib/atomic64_cx8_32.S linux-3.0.3/arch/x86/lib/atomic64_cx8_32.S
16521 --- linux-3.0.3/arch/x86/lib/atomic64_cx8_32.S 2011-07-21 22:17:23.000000000 -0400
16522 +++ linux-3.0.3/arch/x86/lib/atomic64_cx8_32.S 2011-08-23 21:47:55.000000000 -0400
16523 @@ -39,6 +39,14 @@ ENTRY(atomic64_read_cx8)
16524 CFI_ENDPROC
16525 ENDPROC(atomic64_read_cx8)
16526
16527 +ENTRY(atomic64_read_unchecked_cx8)
16528 + CFI_STARTPROC
16529 +
16530 + read64 %ecx
16531 + ret
16532 + CFI_ENDPROC
16533 +ENDPROC(atomic64_read_unchecked_cx8)
16534 +
16535 ENTRY(atomic64_set_cx8)
16536 CFI_STARTPROC
16537
16538 @@ -52,6 +60,19 @@ ENTRY(atomic64_set_cx8)
16539 CFI_ENDPROC
16540 ENDPROC(atomic64_set_cx8)
16541
16542 +ENTRY(atomic64_set_unchecked_cx8)
16543 + CFI_STARTPROC
16544 +
16545 +1:
16546 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
16547 + * are atomic on 586 and newer */
16548 + cmpxchg8b (%esi)
16549 + jne 1b
16550 +
16551 + ret
16552 + CFI_ENDPROC
16553 +ENDPROC(atomic64_set_unchecked_cx8)
16554 +
16555 ENTRY(atomic64_xchg_cx8)
16556 CFI_STARTPROC
16557
16558 @@ -66,8 +87,8 @@ ENTRY(atomic64_xchg_cx8)
16559 CFI_ENDPROC
16560 ENDPROC(atomic64_xchg_cx8)
16561
16562 -.macro addsub_return func ins insc
16563 -ENTRY(atomic64_\func\()_return_cx8)
16564 +.macro addsub_return func ins insc unchecked=""
16565 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16566 CFI_STARTPROC
16567 SAVE ebp
16568 SAVE ebx
16569 @@ -84,27 +105,43 @@ ENTRY(atomic64_\func\()_return_cx8)
16570 movl %edx, %ecx
16571 \ins\()l %esi, %ebx
16572 \insc\()l %edi, %ecx
16573 +
16574 +.ifb \unchecked
16575 +#ifdef CONFIG_PAX_REFCOUNT
16576 + into
16577 +2:
16578 + _ASM_EXTABLE(2b, 3f)
16579 +#endif
16580 +.endif
16581 +
16582 LOCK_PREFIX
16583 cmpxchg8b (%ebp)
16584 jne 1b
16585 -
16586 -10:
16587 movl %ebx, %eax
16588 movl %ecx, %edx
16589 +
16590 +.ifb \unchecked
16591 +#ifdef CONFIG_PAX_REFCOUNT
16592 +3:
16593 +#endif
16594 +.endif
16595 +
16596 RESTORE edi
16597 RESTORE esi
16598 RESTORE ebx
16599 RESTORE ebp
16600 ret
16601 CFI_ENDPROC
16602 -ENDPROC(atomic64_\func\()_return_cx8)
16603 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16604 .endm
16605
16606 addsub_return add add adc
16607 addsub_return sub sub sbb
16608 +addsub_return add add adc _unchecked
16609 +addsub_return sub sub sbb _unchecked
16610
16611 -.macro incdec_return func ins insc
16612 -ENTRY(atomic64_\func\()_return_cx8)
16613 +.macro incdec_return func ins insc unchecked
16614 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16615 CFI_STARTPROC
16616 SAVE ebx
16617
16618 @@ -114,21 +151,38 @@ ENTRY(atomic64_\func\()_return_cx8)
16619 movl %edx, %ecx
16620 \ins\()l $1, %ebx
16621 \insc\()l $0, %ecx
16622 +
16623 +.ifb \unchecked
16624 +#ifdef CONFIG_PAX_REFCOUNT
16625 + into
16626 +2:
16627 + _ASM_EXTABLE(2b, 3f)
16628 +#endif
16629 +.endif
16630 +
16631 LOCK_PREFIX
16632 cmpxchg8b (%esi)
16633 jne 1b
16634
16635 -10:
16636 movl %ebx, %eax
16637 movl %ecx, %edx
16638 +
16639 +.ifb \unchecked
16640 +#ifdef CONFIG_PAX_REFCOUNT
16641 +3:
16642 +#endif
16643 +.endif
16644 +
16645 RESTORE ebx
16646 ret
16647 CFI_ENDPROC
16648 -ENDPROC(atomic64_\func\()_return_cx8)
16649 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16650 .endm
16651
16652 incdec_return inc add adc
16653 incdec_return dec sub sbb
16654 +incdec_return inc add adc _unchecked
16655 +incdec_return dec sub sbb _unchecked
16656
16657 ENTRY(atomic64_dec_if_positive_cx8)
16658 CFI_STARTPROC
16659 @@ -140,6 +194,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
16660 movl %edx, %ecx
16661 subl $1, %ebx
16662 sbb $0, %ecx
16663 +
16664 +#ifdef CONFIG_PAX_REFCOUNT
16665 + into
16666 +1234:
16667 + _ASM_EXTABLE(1234b, 2f)
16668 +#endif
16669 +
16670 js 2f
16671 LOCK_PREFIX
16672 cmpxchg8b (%esi)
16673 @@ -174,6 +235,13 @@ ENTRY(atomic64_add_unless_cx8)
16674 movl %edx, %ecx
16675 addl %esi, %ebx
16676 adcl %edi, %ecx
16677 +
16678 +#ifdef CONFIG_PAX_REFCOUNT
16679 + into
16680 +1234:
16681 + _ASM_EXTABLE(1234b, 3f)
16682 +#endif
16683 +
16684 LOCK_PREFIX
16685 cmpxchg8b (%ebp)
16686 jne 1b
16687 @@ -206,6 +274,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
16688 movl %edx, %ecx
16689 addl $1, %ebx
16690 adcl $0, %ecx
16691 +
16692 +#ifdef CONFIG_PAX_REFCOUNT
16693 + into
16694 +1234:
16695 + _ASM_EXTABLE(1234b, 3f)
16696 +#endif
16697 +
16698 LOCK_PREFIX
16699 cmpxchg8b (%esi)
16700 jne 1b
16701 diff -urNp linux-3.0.3/arch/x86/lib/checksum_32.S linux-3.0.3/arch/x86/lib/checksum_32.S
16702 --- linux-3.0.3/arch/x86/lib/checksum_32.S 2011-07-21 22:17:23.000000000 -0400
16703 +++ linux-3.0.3/arch/x86/lib/checksum_32.S 2011-08-23 21:47:55.000000000 -0400
16704 @@ -28,7 +28,8 @@
16705 #include <linux/linkage.h>
16706 #include <asm/dwarf2.h>
16707 #include <asm/errno.h>
16708 -
16709 +#include <asm/segment.h>
16710 +
16711 /*
16712 * computes a partial checksum, e.g. for TCP/UDP fragments
16713 */
16714 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
16715
16716 #define ARGBASE 16
16717 #define FP 12
16718 -
16719 -ENTRY(csum_partial_copy_generic)
16720 +
16721 +ENTRY(csum_partial_copy_generic_to_user)
16722 CFI_STARTPROC
16723 +
16724 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16725 + pushl_cfi %gs
16726 + popl_cfi %es
16727 + jmp csum_partial_copy_generic
16728 +#endif
16729 +
16730 +ENTRY(csum_partial_copy_generic_from_user)
16731 +
16732 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16733 + pushl_cfi %gs
16734 + popl_cfi %ds
16735 +#endif
16736 +
16737 +ENTRY(csum_partial_copy_generic)
16738 subl $4,%esp
16739 CFI_ADJUST_CFA_OFFSET 4
16740 pushl_cfi %edi
16741 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
16742 jmp 4f
16743 SRC(1: movw (%esi), %bx )
16744 addl $2, %esi
16745 -DST( movw %bx, (%edi) )
16746 +DST( movw %bx, %es:(%edi) )
16747 addl $2, %edi
16748 addw %bx, %ax
16749 adcl $0, %eax
16750 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
16751 SRC(1: movl (%esi), %ebx )
16752 SRC( movl 4(%esi), %edx )
16753 adcl %ebx, %eax
16754 -DST( movl %ebx, (%edi) )
16755 +DST( movl %ebx, %es:(%edi) )
16756 adcl %edx, %eax
16757 -DST( movl %edx, 4(%edi) )
16758 +DST( movl %edx, %es:4(%edi) )
16759
16760 SRC( movl 8(%esi), %ebx )
16761 SRC( movl 12(%esi), %edx )
16762 adcl %ebx, %eax
16763 -DST( movl %ebx, 8(%edi) )
16764 +DST( movl %ebx, %es:8(%edi) )
16765 adcl %edx, %eax
16766 -DST( movl %edx, 12(%edi) )
16767 +DST( movl %edx, %es:12(%edi) )
16768
16769 SRC( movl 16(%esi), %ebx )
16770 SRC( movl 20(%esi), %edx )
16771 adcl %ebx, %eax
16772 -DST( movl %ebx, 16(%edi) )
16773 +DST( movl %ebx, %es:16(%edi) )
16774 adcl %edx, %eax
16775 -DST( movl %edx, 20(%edi) )
16776 +DST( movl %edx, %es:20(%edi) )
16777
16778 SRC( movl 24(%esi), %ebx )
16779 SRC( movl 28(%esi), %edx )
16780 adcl %ebx, %eax
16781 -DST( movl %ebx, 24(%edi) )
16782 +DST( movl %ebx, %es:24(%edi) )
16783 adcl %edx, %eax
16784 -DST( movl %edx, 28(%edi) )
16785 +DST( movl %edx, %es:28(%edi) )
16786
16787 lea 32(%esi), %esi
16788 lea 32(%edi), %edi
16789 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
16790 shrl $2, %edx # This clears CF
16791 SRC(3: movl (%esi), %ebx )
16792 adcl %ebx, %eax
16793 -DST( movl %ebx, (%edi) )
16794 +DST( movl %ebx, %es:(%edi) )
16795 lea 4(%esi), %esi
16796 lea 4(%edi), %edi
16797 dec %edx
16798 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
16799 jb 5f
16800 SRC( movw (%esi), %cx )
16801 leal 2(%esi), %esi
16802 -DST( movw %cx, (%edi) )
16803 +DST( movw %cx, %es:(%edi) )
16804 leal 2(%edi), %edi
16805 je 6f
16806 shll $16,%ecx
16807 SRC(5: movb (%esi), %cl )
16808 -DST( movb %cl, (%edi) )
16809 +DST( movb %cl, %es:(%edi) )
16810 6: addl %ecx, %eax
16811 adcl $0, %eax
16812 7:
16813 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
16814
16815 6001:
16816 movl ARGBASE+20(%esp), %ebx # src_err_ptr
16817 - movl $-EFAULT, (%ebx)
16818 + movl $-EFAULT, %ss:(%ebx)
16819
16820 # zero the complete destination - computing the rest
16821 # is too much work
16822 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
16823
16824 6002:
16825 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
16826 - movl $-EFAULT,(%ebx)
16827 + movl $-EFAULT,%ss:(%ebx)
16828 jmp 5000b
16829
16830 .previous
16831
16832 + pushl_cfi %ss
16833 + popl_cfi %ds
16834 + pushl_cfi %ss
16835 + popl_cfi %es
16836 popl_cfi %ebx
16837 CFI_RESTORE ebx
16838 popl_cfi %esi
16839 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
16840 popl_cfi %ecx # equivalent to addl $4,%esp
16841 ret
16842 CFI_ENDPROC
16843 -ENDPROC(csum_partial_copy_generic)
16844 +ENDPROC(csum_partial_copy_generic_to_user)
16845
16846 #else
16847
16848 /* Version for PentiumII/PPro */
16849
16850 #define ROUND1(x) \
16851 + nop; nop; nop; \
16852 SRC(movl x(%esi), %ebx ) ; \
16853 addl %ebx, %eax ; \
16854 - DST(movl %ebx, x(%edi) ) ;
16855 + DST(movl %ebx, %es:x(%edi)) ;
16856
16857 #define ROUND(x) \
16858 + nop; nop; nop; \
16859 SRC(movl x(%esi), %ebx ) ; \
16860 adcl %ebx, %eax ; \
16861 - DST(movl %ebx, x(%edi) ) ;
16862 + DST(movl %ebx, %es:x(%edi)) ;
16863
16864 #define ARGBASE 12
16865 -
16866 -ENTRY(csum_partial_copy_generic)
16867 +
16868 +ENTRY(csum_partial_copy_generic_to_user)
16869 CFI_STARTPROC
16870 +
16871 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16872 + pushl_cfi %gs
16873 + popl_cfi %es
16874 + jmp csum_partial_copy_generic
16875 +#endif
16876 +
16877 +ENTRY(csum_partial_copy_generic_from_user)
16878 +
16879 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16880 + pushl_cfi %gs
16881 + popl_cfi %ds
16882 +#endif
16883 +
16884 +ENTRY(csum_partial_copy_generic)
16885 pushl_cfi %ebx
16886 CFI_REL_OFFSET ebx, 0
16887 pushl_cfi %edi
16888 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
16889 subl %ebx, %edi
16890 lea -1(%esi),%edx
16891 andl $-32,%edx
16892 - lea 3f(%ebx,%ebx), %ebx
16893 + lea 3f(%ebx,%ebx,2), %ebx
16894 testl %esi, %esi
16895 jmp *%ebx
16896 1: addl $64,%esi
16897 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
16898 jb 5f
16899 SRC( movw (%esi), %dx )
16900 leal 2(%esi), %esi
16901 -DST( movw %dx, (%edi) )
16902 +DST( movw %dx, %es:(%edi) )
16903 leal 2(%edi), %edi
16904 je 6f
16905 shll $16,%edx
16906 5:
16907 SRC( movb (%esi), %dl )
16908 -DST( movb %dl, (%edi) )
16909 +DST( movb %dl, %es:(%edi) )
16910 6: addl %edx, %eax
16911 adcl $0, %eax
16912 7:
16913 .section .fixup, "ax"
16914 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
16915 - movl $-EFAULT, (%ebx)
16916 + movl $-EFAULT, %ss:(%ebx)
16917 # zero the complete destination (computing the rest is too much work)
16918 movl ARGBASE+8(%esp),%edi # dst
16919 movl ARGBASE+12(%esp),%ecx # len
16920 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
16921 rep; stosb
16922 jmp 7b
16923 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
16924 - movl $-EFAULT, (%ebx)
16925 + movl $-EFAULT, %ss:(%ebx)
16926 jmp 7b
16927 .previous
16928
16929 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16930 + pushl_cfi %ss
16931 + popl_cfi %ds
16932 + pushl_cfi %ss
16933 + popl_cfi %es
16934 +#endif
16935 +
16936 popl_cfi %esi
16937 CFI_RESTORE esi
16938 popl_cfi %edi
16939 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
16940 CFI_RESTORE ebx
16941 ret
16942 CFI_ENDPROC
16943 -ENDPROC(csum_partial_copy_generic)
16944 +ENDPROC(csum_partial_copy_generic_to_user)
16945
16946 #undef ROUND
16947 #undef ROUND1
16948 diff -urNp linux-3.0.3/arch/x86/lib/clear_page_64.S linux-3.0.3/arch/x86/lib/clear_page_64.S
16949 --- linux-3.0.3/arch/x86/lib/clear_page_64.S 2011-07-21 22:17:23.000000000 -0400
16950 +++ linux-3.0.3/arch/x86/lib/clear_page_64.S 2011-08-23 21:47:55.000000000 -0400
16951 @@ -58,7 +58,7 @@ ENDPROC(clear_page)
16952
16953 #include <asm/cpufeature.h>
16954
16955 - .section .altinstr_replacement,"ax"
16956 + .section .altinstr_replacement,"a"
16957 1: .byte 0xeb /* jmp <disp8> */
16958 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
16959 2: .byte 0xeb /* jmp <disp8> */
16960 diff -urNp linux-3.0.3/arch/x86/lib/copy_page_64.S linux-3.0.3/arch/x86/lib/copy_page_64.S
16961 --- linux-3.0.3/arch/x86/lib/copy_page_64.S 2011-07-21 22:17:23.000000000 -0400
16962 +++ linux-3.0.3/arch/x86/lib/copy_page_64.S 2011-08-23 21:47:55.000000000 -0400
16963 @@ -104,7 +104,7 @@ ENDPROC(copy_page)
16964
16965 #include <asm/cpufeature.h>
16966
16967 - .section .altinstr_replacement,"ax"
16968 + .section .altinstr_replacement,"a"
16969 1: .byte 0xeb /* jmp <disp8> */
16970 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
16971 2:
16972 diff -urNp linux-3.0.3/arch/x86/lib/copy_user_64.S linux-3.0.3/arch/x86/lib/copy_user_64.S
16973 --- linux-3.0.3/arch/x86/lib/copy_user_64.S 2011-07-21 22:17:23.000000000 -0400
16974 +++ linux-3.0.3/arch/x86/lib/copy_user_64.S 2011-08-23 21:47:55.000000000 -0400
16975 @@ -16,6 +16,7 @@
16976 #include <asm/thread_info.h>
16977 #include <asm/cpufeature.h>
16978 #include <asm/alternative-asm.h>
16979 +#include <asm/pgtable.h>
16980
16981 /*
16982 * By placing feature2 after feature1 in altinstructions section, we logically
16983 @@ -29,7 +30,7 @@
16984 .byte 0xe9 /* 32bit jump */
16985 .long \orig-1f /* by default jump to orig */
16986 1:
16987 - .section .altinstr_replacement,"ax"
16988 + .section .altinstr_replacement,"a"
16989 2: .byte 0xe9 /* near jump with 32bit immediate */
16990 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
16991 3: .byte 0xe9 /* near jump with 32bit immediate */
16992 @@ -71,41 +72,13 @@
16993 #endif
16994 .endm
16995
16996 -/* Standard copy_to_user with segment limit checking */
16997 -ENTRY(_copy_to_user)
16998 - CFI_STARTPROC
16999 - GET_THREAD_INFO(%rax)
17000 - movq %rdi,%rcx
17001 - addq %rdx,%rcx
17002 - jc bad_to_user
17003 - cmpq TI_addr_limit(%rax),%rcx
17004 - ja bad_to_user
17005 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17006 - copy_user_generic_unrolled,copy_user_generic_string, \
17007 - copy_user_enhanced_fast_string
17008 - CFI_ENDPROC
17009 -ENDPROC(_copy_to_user)
17010 -
17011 -/* Standard copy_from_user with segment limit checking */
17012 -ENTRY(_copy_from_user)
17013 - CFI_STARTPROC
17014 - GET_THREAD_INFO(%rax)
17015 - movq %rsi,%rcx
17016 - addq %rdx,%rcx
17017 - jc bad_from_user
17018 - cmpq TI_addr_limit(%rax),%rcx
17019 - ja bad_from_user
17020 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17021 - copy_user_generic_unrolled,copy_user_generic_string, \
17022 - copy_user_enhanced_fast_string
17023 - CFI_ENDPROC
17024 -ENDPROC(_copy_from_user)
17025 -
17026 .section .fixup,"ax"
17027 /* must zero dest */
17028 ENTRY(bad_from_user)
17029 bad_from_user:
17030 CFI_STARTPROC
17031 + testl %edx,%edx
17032 + js bad_to_user
17033 movl %edx,%ecx
17034 xorl %eax,%eax
17035 rep
17036 diff -urNp linux-3.0.3/arch/x86/lib/copy_user_nocache_64.S linux-3.0.3/arch/x86/lib/copy_user_nocache_64.S
17037 --- linux-3.0.3/arch/x86/lib/copy_user_nocache_64.S 2011-07-21 22:17:23.000000000 -0400
17038 +++ linux-3.0.3/arch/x86/lib/copy_user_nocache_64.S 2011-08-23 21:47:55.000000000 -0400
17039 @@ -14,6 +14,7 @@
17040 #include <asm/current.h>
17041 #include <asm/asm-offsets.h>
17042 #include <asm/thread_info.h>
17043 +#include <asm/pgtable.h>
17044
17045 .macro ALIGN_DESTINATION
17046 #ifdef FIX_ALIGNMENT
17047 @@ -50,6 +51,15 @@
17048 */
17049 ENTRY(__copy_user_nocache)
17050 CFI_STARTPROC
17051 +
17052 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17053 + mov $PAX_USER_SHADOW_BASE,%rcx
17054 + cmp %rcx,%rsi
17055 + jae 1f
17056 + add %rcx,%rsi
17057 +1:
17058 +#endif
17059 +
17060 cmpl $8,%edx
17061 jb 20f /* less then 8 bytes, go to byte copy loop */
17062 ALIGN_DESTINATION
17063 diff -urNp linux-3.0.3/arch/x86/lib/csum-wrappers_64.c linux-3.0.3/arch/x86/lib/csum-wrappers_64.c
17064 --- linux-3.0.3/arch/x86/lib/csum-wrappers_64.c 2011-07-21 22:17:23.000000000 -0400
17065 +++ linux-3.0.3/arch/x86/lib/csum-wrappers_64.c 2011-08-23 21:47:55.000000000 -0400
17066 @@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
17067 len -= 2;
17068 }
17069 }
17070 +
17071 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17072 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
17073 + src += PAX_USER_SHADOW_BASE;
17074 +#endif
17075 +
17076 isum = csum_partial_copy_generic((__force const void *)src,
17077 dst, len, isum, errp, NULL);
17078 if (unlikely(*errp))
17079 @@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
17080 }
17081
17082 *errp = 0;
17083 +
17084 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17085 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
17086 + dst += PAX_USER_SHADOW_BASE;
17087 +#endif
17088 +
17089 return csum_partial_copy_generic(src, (void __force *)dst,
17090 len, isum, NULL, errp);
17091 }
17092 diff -urNp linux-3.0.3/arch/x86/lib/getuser.S linux-3.0.3/arch/x86/lib/getuser.S
17093 --- linux-3.0.3/arch/x86/lib/getuser.S 2011-07-21 22:17:23.000000000 -0400
17094 +++ linux-3.0.3/arch/x86/lib/getuser.S 2011-08-23 21:47:55.000000000 -0400
17095 @@ -33,14 +33,35 @@
17096 #include <asm/asm-offsets.h>
17097 #include <asm/thread_info.h>
17098 #include <asm/asm.h>
17099 +#include <asm/segment.h>
17100 +#include <asm/pgtable.h>
17101 +
17102 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17103 +#define __copyuser_seg gs;
17104 +#else
17105 +#define __copyuser_seg
17106 +#endif
17107
17108 .text
17109 ENTRY(__get_user_1)
17110 CFI_STARTPROC
17111 +
17112 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17113 GET_THREAD_INFO(%_ASM_DX)
17114 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17115 jae bad_get_user
17116 -1: movzb (%_ASM_AX),%edx
17117 +
17118 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17119 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17120 + cmp %_ASM_DX,%_ASM_AX
17121 + jae 1234f
17122 + add %_ASM_DX,%_ASM_AX
17123 +1234:
17124 +#endif
17125 +
17126 +#endif
17127 +
17128 +1: __copyuser_seg movzb (%_ASM_AX),%edx
17129 xor %eax,%eax
17130 ret
17131 CFI_ENDPROC
17132 @@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
17133 ENTRY(__get_user_2)
17134 CFI_STARTPROC
17135 add $1,%_ASM_AX
17136 +
17137 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17138 jc bad_get_user
17139 GET_THREAD_INFO(%_ASM_DX)
17140 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17141 jae bad_get_user
17142 -2: movzwl -1(%_ASM_AX),%edx
17143 +
17144 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17145 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17146 + cmp %_ASM_DX,%_ASM_AX
17147 + jae 1234f
17148 + add %_ASM_DX,%_ASM_AX
17149 +1234:
17150 +#endif
17151 +
17152 +#endif
17153 +
17154 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
17155 xor %eax,%eax
17156 ret
17157 CFI_ENDPROC
17158 @@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
17159 ENTRY(__get_user_4)
17160 CFI_STARTPROC
17161 add $3,%_ASM_AX
17162 +
17163 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17164 jc bad_get_user
17165 GET_THREAD_INFO(%_ASM_DX)
17166 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17167 jae bad_get_user
17168 -3: mov -3(%_ASM_AX),%edx
17169 +
17170 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17171 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17172 + cmp %_ASM_DX,%_ASM_AX
17173 + jae 1234f
17174 + add %_ASM_DX,%_ASM_AX
17175 +1234:
17176 +#endif
17177 +
17178 +#endif
17179 +
17180 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
17181 xor %eax,%eax
17182 ret
17183 CFI_ENDPROC
17184 @@ -80,6 +127,15 @@ ENTRY(__get_user_8)
17185 GET_THREAD_INFO(%_ASM_DX)
17186 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17187 jae bad_get_user
17188 +
17189 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17190 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17191 + cmp %_ASM_DX,%_ASM_AX
17192 + jae 1234f
17193 + add %_ASM_DX,%_ASM_AX
17194 +1234:
17195 +#endif
17196 +
17197 4: movq -7(%_ASM_AX),%_ASM_DX
17198 xor %eax,%eax
17199 ret
17200 diff -urNp linux-3.0.3/arch/x86/lib/insn.c linux-3.0.3/arch/x86/lib/insn.c
17201 --- linux-3.0.3/arch/x86/lib/insn.c 2011-07-21 22:17:23.000000000 -0400
17202 +++ linux-3.0.3/arch/x86/lib/insn.c 2011-08-23 21:47:55.000000000 -0400
17203 @@ -21,6 +21,11 @@
17204 #include <linux/string.h>
17205 #include <asm/inat.h>
17206 #include <asm/insn.h>
17207 +#ifdef __KERNEL__
17208 +#include <asm/pgtable_types.h>
17209 +#else
17210 +#define ktla_ktva(addr) addr
17211 +#endif
17212
17213 #define get_next(t, insn) \
17214 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
17215 @@ -40,8 +45,8 @@
17216 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
17217 {
17218 memset(insn, 0, sizeof(*insn));
17219 - insn->kaddr = kaddr;
17220 - insn->next_byte = kaddr;
17221 + insn->kaddr = ktla_ktva(kaddr);
17222 + insn->next_byte = ktla_ktva(kaddr);
17223 insn->x86_64 = x86_64 ? 1 : 0;
17224 insn->opnd_bytes = 4;
17225 if (x86_64)
17226 diff -urNp linux-3.0.3/arch/x86/lib/mmx_32.c linux-3.0.3/arch/x86/lib/mmx_32.c
17227 --- linux-3.0.3/arch/x86/lib/mmx_32.c 2011-07-21 22:17:23.000000000 -0400
17228 +++ linux-3.0.3/arch/x86/lib/mmx_32.c 2011-08-23 21:47:55.000000000 -0400
17229 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
17230 {
17231 void *p;
17232 int i;
17233 + unsigned long cr0;
17234
17235 if (unlikely(in_interrupt()))
17236 return __memcpy(to, from, len);
17237 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
17238 kernel_fpu_begin();
17239
17240 __asm__ __volatile__ (
17241 - "1: prefetch (%0)\n" /* This set is 28 bytes */
17242 - " prefetch 64(%0)\n"
17243 - " prefetch 128(%0)\n"
17244 - " prefetch 192(%0)\n"
17245 - " prefetch 256(%0)\n"
17246 + "1: prefetch (%1)\n" /* This set is 28 bytes */
17247 + " prefetch 64(%1)\n"
17248 + " prefetch 128(%1)\n"
17249 + " prefetch 192(%1)\n"
17250 + " prefetch 256(%1)\n"
17251 "2: \n"
17252 ".section .fixup, \"ax\"\n"
17253 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17254 + "3: \n"
17255 +
17256 +#ifdef CONFIG_PAX_KERNEXEC
17257 + " movl %%cr0, %0\n"
17258 + " movl %0, %%eax\n"
17259 + " andl $0xFFFEFFFF, %%eax\n"
17260 + " movl %%eax, %%cr0\n"
17261 +#endif
17262 +
17263 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17264 +
17265 +#ifdef CONFIG_PAX_KERNEXEC
17266 + " movl %0, %%cr0\n"
17267 +#endif
17268 +
17269 " jmp 2b\n"
17270 ".previous\n"
17271 _ASM_EXTABLE(1b, 3b)
17272 - : : "r" (from));
17273 + : "=&r" (cr0) : "r" (from) : "ax");
17274
17275 for ( ; i > 5; i--) {
17276 __asm__ __volatile__ (
17277 - "1: prefetch 320(%0)\n"
17278 - "2: movq (%0), %%mm0\n"
17279 - " movq 8(%0), %%mm1\n"
17280 - " movq 16(%0), %%mm2\n"
17281 - " movq 24(%0), %%mm3\n"
17282 - " movq %%mm0, (%1)\n"
17283 - " movq %%mm1, 8(%1)\n"
17284 - " movq %%mm2, 16(%1)\n"
17285 - " movq %%mm3, 24(%1)\n"
17286 - " movq 32(%0), %%mm0\n"
17287 - " movq 40(%0), %%mm1\n"
17288 - " movq 48(%0), %%mm2\n"
17289 - " movq 56(%0), %%mm3\n"
17290 - " movq %%mm0, 32(%1)\n"
17291 - " movq %%mm1, 40(%1)\n"
17292 - " movq %%mm2, 48(%1)\n"
17293 - " movq %%mm3, 56(%1)\n"
17294 + "1: prefetch 320(%1)\n"
17295 + "2: movq (%1), %%mm0\n"
17296 + " movq 8(%1), %%mm1\n"
17297 + " movq 16(%1), %%mm2\n"
17298 + " movq 24(%1), %%mm3\n"
17299 + " movq %%mm0, (%2)\n"
17300 + " movq %%mm1, 8(%2)\n"
17301 + " movq %%mm2, 16(%2)\n"
17302 + " movq %%mm3, 24(%2)\n"
17303 + " movq 32(%1), %%mm0\n"
17304 + " movq 40(%1), %%mm1\n"
17305 + " movq 48(%1), %%mm2\n"
17306 + " movq 56(%1), %%mm3\n"
17307 + " movq %%mm0, 32(%2)\n"
17308 + " movq %%mm1, 40(%2)\n"
17309 + " movq %%mm2, 48(%2)\n"
17310 + " movq %%mm3, 56(%2)\n"
17311 ".section .fixup, \"ax\"\n"
17312 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17313 + "3:\n"
17314 +
17315 +#ifdef CONFIG_PAX_KERNEXEC
17316 + " movl %%cr0, %0\n"
17317 + " movl %0, %%eax\n"
17318 + " andl $0xFFFEFFFF, %%eax\n"
17319 + " movl %%eax, %%cr0\n"
17320 +#endif
17321 +
17322 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17323 +
17324 +#ifdef CONFIG_PAX_KERNEXEC
17325 + " movl %0, %%cr0\n"
17326 +#endif
17327 +
17328 " jmp 2b\n"
17329 ".previous\n"
17330 _ASM_EXTABLE(1b, 3b)
17331 - : : "r" (from), "r" (to) : "memory");
17332 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17333
17334 from += 64;
17335 to += 64;
17336 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
17337 static void fast_copy_page(void *to, void *from)
17338 {
17339 int i;
17340 + unsigned long cr0;
17341
17342 kernel_fpu_begin();
17343
17344 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
17345 * but that is for later. -AV
17346 */
17347 __asm__ __volatile__(
17348 - "1: prefetch (%0)\n"
17349 - " prefetch 64(%0)\n"
17350 - " prefetch 128(%0)\n"
17351 - " prefetch 192(%0)\n"
17352 - " prefetch 256(%0)\n"
17353 + "1: prefetch (%1)\n"
17354 + " prefetch 64(%1)\n"
17355 + " prefetch 128(%1)\n"
17356 + " prefetch 192(%1)\n"
17357 + " prefetch 256(%1)\n"
17358 "2: \n"
17359 ".section .fixup, \"ax\"\n"
17360 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17361 + "3: \n"
17362 +
17363 +#ifdef CONFIG_PAX_KERNEXEC
17364 + " movl %%cr0, %0\n"
17365 + " movl %0, %%eax\n"
17366 + " andl $0xFFFEFFFF, %%eax\n"
17367 + " movl %%eax, %%cr0\n"
17368 +#endif
17369 +
17370 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17371 +
17372 +#ifdef CONFIG_PAX_KERNEXEC
17373 + " movl %0, %%cr0\n"
17374 +#endif
17375 +
17376 " jmp 2b\n"
17377 ".previous\n"
17378 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
17379 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17380
17381 for (i = 0; i < (4096-320)/64; i++) {
17382 __asm__ __volatile__ (
17383 - "1: prefetch 320(%0)\n"
17384 - "2: movq (%0), %%mm0\n"
17385 - " movntq %%mm0, (%1)\n"
17386 - " movq 8(%0), %%mm1\n"
17387 - " movntq %%mm1, 8(%1)\n"
17388 - " movq 16(%0), %%mm2\n"
17389 - " movntq %%mm2, 16(%1)\n"
17390 - " movq 24(%0), %%mm3\n"
17391 - " movntq %%mm3, 24(%1)\n"
17392 - " movq 32(%0), %%mm4\n"
17393 - " movntq %%mm4, 32(%1)\n"
17394 - " movq 40(%0), %%mm5\n"
17395 - " movntq %%mm5, 40(%1)\n"
17396 - " movq 48(%0), %%mm6\n"
17397 - " movntq %%mm6, 48(%1)\n"
17398 - " movq 56(%0), %%mm7\n"
17399 - " movntq %%mm7, 56(%1)\n"
17400 + "1: prefetch 320(%1)\n"
17401 + "2: movq (%1), %%mm0\n"
17402 + " movntq %%mm0, (%2)\n"
17403 + " movq 8(%1), %%mm1\n"
17404 + " movntq %%mm1, 8(%2)\n"
17405 + " movq 16(%1), %%mm2\n"
17406 + " movntq %%mm2, 16(%2)\n"
17407 + " movq 24(%1), %%mm3\n"
17408 + " movntq %%mm3, 24(%2)\n"
17409 + " movq 32(%1), %%mm4\n"
17410 + " movntq %%mm4, 32(%2)\n"
17411 + " movq 40(%1), %%mm5\n"
17412 + " movntq %%mm5, 40(%2)\n"
17413 + " movq 48(%1), %%mm6\n"
17414 + " movntq %%mm6, 48(%2)\n"
17415 + " movq 56(%1), %%mm7\n"
17416 + " movntq %%mm7, 56(%2)\n"
17417 ".section .fixup, \"ax\"\n"
17418 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17419 + "3:\n"
17420 +
17421 +#ifdef CONFIG_PAX_KERNEXEC
17422 + " movl %%cr0, %0\n"
17423 + " movl %0, %%eax\n"
17424 + " andl $0xFFFEFFFF, %%eax\n"
17425 + " movl %%eax, %%cr0\n"
17426 +#endif
17427 +
17428 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17429 +
17430 +#ifdef CONFIG_PAX_KERNEXEC
17431 + " movl %0, %%cr0\n"
17432 +#endif
17433 +
17434 " jmp 2b\n"
17435 ".previous\n"
17436 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
17437 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17438
17439 from += 64;
17440 to += 64;
17441 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
17442 static void fast_copy_page(void *to, void *from)
17443 {
17444 int i;
17445 + unsigned long cr0;
17446
17447 kernel_fpu_begin();
17448
17449 __asm__ __volatile__ (
17450 - "1: prefetch (%0)\n"
17451 - " prefetch 64(%0)\n"
17452 - " prefetch 128(%0)\n"
17453 - " prefetch 192(%0)\n"
17454 - " prefetch 256(%0)\n"
17455 + "1: prefetch (%1)\n"
17456 + " prefetch 64(%1)\n"
17457 + " prefetch 128(%1)\n"
17458 + " prefetch 192(%1)\n"
17459 + " prefetch 256(%1)\n"
17460 "2: \n"
17461 ".section .fixup, \"ax\"\n"
17462 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17463 + "3: \n"
17464 +
17465 +#ifdef CONFIG_PAX_KERNEXEC
17466 + " movl %%cr0, %0\n"
17467 + " movl %0, %%eax\n"
17468 + " andl $0xFFFEFFFF, %%eax\n"
17469 + " movl %%eax, %%cr0\n"
17470 +#endif
17471 +
17472 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17473 +
17474 +#ifdef CONFIG_PAX_KERNEXEC
17475 + " movl %0, %%cr0\n"
17476 +#endif
17477 +
17478 " jmp 2b\n"
17479 ".previous\n"
17480 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
17481 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17482
17483 for (i = 0; i < 4096/64; i++) {
17484 __asm__ __volatile__ (
17485 - "1: prefetch 320(%0)\n"
17486 - "2: movq (%0), %%mm0\n"
17487 - " movq 8(%0), %%mm1\n"
17488 - " movq 16(%0), %%mm2\n"
17489 - " movq 24(%0), %%mm3\n"
17490 - " movq %%mm0, (%1)\n"
17491 - " movq %%mm1, 8(%1)\n"
17492 - " movq %%mm2, 16(%1)\n"
17493 - " movq %%mm3, 24(%1)\n"
17494 - " movq 32(%0), %%mm0\n"
17495 - " movq 40(%0), %%mm1\n"
17496 - " movq 48(%0), %%mm2\n"
17497 - " movq 56(%0), %%mm3\n"
17498 - " movq %%mm0, 32(%1)\n"
17499 - " movq %%mm1, 40(%1)\n"
17500 - " movq %%mm2, 48(%1)\n"
17501 - " movq %%mm3, 56(%1)\n"
17502 + "1: prefetch 320(%1)\n"
17503 + "2: movq (%1), %%mm0\n"
17504 + " movq 8(%1), %%mm1\n"
17505 + " movq 16(%1), %%mm2\n"
17506 + " movq 24(%1), %%mm3\n"
17507 + " movq %%mm0, (%2)\n"
17508 + " movq %%mm1, 8(%2)\n"
17509 + " movq %%mm2, 16(%2)\n"
17510 + " movq %%mm3, 24(%2)\n"
17511 + " movq 32(%1), %%mm0\n"
17512 + " movq 40(%1), %%mm1\n"
17513 + " movq 48(%1), %%mm2\n"
17514 + " movq 56(%1), %%mm3\n"
17515 + " movq %%mm0, 32(%2)\n"
17516 + " movq %%mm1, 40(%2)\n"
17517 + " movq %%mm2, 48(%2)\n"
17518 + " movq %%mm3, 56(%2)\n"
17519 ".section .fixup, \"ax\"\n"
17520 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17521 + "3:\n"
17522 +
17523 +#ifdef CONFIG_PAX_KERNEXEC
17524 + " movl %%cr0, %0\n"
17525 + " movl %0, %%eax\n"
17526 + " andl $0xFFFEFFFF, %%eax\n"
17527 + " movl %%eax, %%cr0\n"
17528 +#endif
17529 +
17530 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17531 +
17532 +#ifdef CONFIG_PAX_KERNEXEC
17533 + " movl %0, %%cr0\n"
17534 +#endif
17535 +
17536 " jmp 2b\n"
17537 ".previous\n"
17538 _ASM_EXTABLE(1b, 3b)
17539 - : : "r" (from), "r" (to) : "memory");
17540 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17541
17542 from += 64;
17543 to += 64;
17544 diff -urNp linux-3.0.3/arch/x86/lib/putuser.S linux-3.0.3/arch/x86/lib/putuser.S
17545 --- linux-3.0.3/arch/x86/lib/putuser.S 2011-07-21 22:17:23.000000000 -0400
17546 +++ linux-3.0.3/arch/x86/lib/putuser.S 2011-08-23 21:47:55.000000000 -0400
17547 @@ -15,7 +15,8 @@
17548 #include <asm/thread_info.h>
17549 #include <asm/errno.h>
17550 #include <asm/asm.h>
17551 -
17552 +#include <asm/segment.h>
17553 +#include <asm/pgtable.h>
17554
17555 /*
17556 * __put_user_X
17557 @@ -29,52 +30,119 @@
17558 * as they get called from within inline assembly.
17559 */
17560
17561 -#define ENTER CFI_STARTPROC ; \
17562 - GET_THREAD_INFO(%_ASM_BX)
17563 +#define ENTER CFI_STARTPROC
17564 #define EXIT ret ; \
17565 CFI_ENDPROC
17566
17567 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17568 +#define _DEST %_ASM_CX,%_ASM_BX
17569 +#else
17570 +#define _DEST %_ASM_CX
17571 +#endif
17572 +
17573 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17574 +#define __copyuser_seg gs;
17575 +#else
17576 +#define __copyuser_seg
17577 +#endif
17578 +
17579 .text
17580 ENTRY(__put_user_1)
17581 ENTER
17582 +
17583 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17584 + GET_THREAD_INFO(%_ASM_BX)
17585 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
17586 jae bad_put_user
17587 -1: movb %al,(%_ASM_CX)
17588 +
17589 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17590 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17591 + cmp %_ASM_BX,%_ASM_CX
17592 + jb 1234f
17593 + xor %ebx,%ebx
17594 +1234:
17595 +#endif
17596 +
17597 +#endif
17598 +
17599 +1: __copyuser_seg movb %al,(_DEST)
17600 xor %eax,%eax
17601 EXIT
17602 ENDPROC(__put_user_1)
17603
17604 ENTRY(__put_user_2)
17605 ENTER
17606 +
17607 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17608 + GET_THREAD_INFO(%_ASM_BX)
17609 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17610 sub $1,%_ASM_BX
17611 cmp %_ASM_BX,%_ASM_CX
17612 jae bad_put_user
17613 -2: movw %ax,(%_ASM_CX)
17614 +
17615 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17616 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17617 + cmp %_ASM_BX,%_ASM_CX
17618 + jb 1234f
17619 + xor %ebx,%ebx
17620 +1234:
17621 +#endif
17622 +
17623 +#endif
17624 +
17625 +2: __copyuser_seg movw %ax,(_DEST)
17626 xor %eax,%eax
17627 EXIT
17628 ENDPROC(__put_user_2)
17629
17630 ENTRY(__put_user_4)
17631 ENTER
17632 +
17633 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17634 + GET_THREAD_INFO(%_ASM_BX)
17635 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17636 sub $3,%_ASM_BX
17637 cmp %_ASM_BX,%_ASM_CX
17638 jae bad_put_user
17639 -3: movl %eax,(%_ASM_CX)
17640 +
17641 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17642 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17643 + cmp %_ASM_BX,%_ASM_CX
17644 + jb 1234f
17645 + xor %ebx,%ebx
17646 +1234:
17647 +#endif
17648 +
17649 +#endif
17650 +
17651 +3: __copyuser_seg movl %eax,(_DEST)
17652 xor %eax,%eax
17653 EXIT
17654 ENDPROC(__put_user_4)
17655
17656 ENTRY(__put_user_8)
17657 ENTER
17658 +
17659 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17660 + GET_THREAD_INFO(%_ASM_BX)
17661 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17662 sub $7,%_ASM_BX
17663 cmp %_ASM_BX,%_ASM_CX
17664 jae bad_put_user
17665 -4: mov %_ASM_AX,(%_ASM_CX)
17666 +
17667 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17668 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17669 + cmp %_ASM_BX,%_ASM_CX
17670 + jb 1234f
17671 + xor %ebx,%ebx
17672 +1234:
17673 +#endif
17674 +
17675 +#endif
17676 +
17677 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
17678 #ifdef CONFIG_X86_32
17679 -5: movl %edx,4(%_ASM_CX)
17680 +5: __copyuser_seg movl %edx,4(_DEST)
17681 #endif
17682 xor %eax,%eax
17683 EXIT
17684 diff -urNp linux-3.0.3/arch/x86/lib/usercopy_32.c linux-3.0.3/arch/x86/lib/usercopy_32.c
17685 --- linux-3.0.3/arch/x86/lib/usercopy_32.c 2011-07-21 22:17:23.000000000 -0400
17686 +++ linux-3.0.3/arch/x86/lib/usercopy_32.c 2011-08-23 21:47:55.000000000 -0400
17687 @@ -43,7 +43,7 @@ do { \
17688 __asm__ __volatile__( \
17689 " testl %1,%1\n" \
17690 " jz 2f\n" \
17691 - "0: lodsb\n" \
17692 + "0: "__copyuser_seg"lodsb\n" \
17693 " stosb\n" \
17694 " testb %%al,%%al\n" \
17695 " jz 1f\n" \
17696 @@ -128,10 +128,12 @@ do { \
17697 int __d0; \
17698 might_fault(); \
17699 __asm__ __volatile__( \
17700 + __COPYUSER_SET_ES \
17701 "0: rep; stosl\n" \
17702 " movl %2,%0\n" \
17703 "1: rep; stosb\n" \
17704 "2:\n" \
17705 + __COPYUSER_RESTORE_ES \
17706 ".section .fixup,\"ax\"\n" \
17707 "3: lea 0(%2,%0,4),%0\n" \
17708 " jmp 2b\n" \
17709 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
17710 might_fault();
17711
17712 __asm__ __volatile__(
17713 + __COPYUSER_SET_ES
17714 " testl %0, %0\n"
17715 " jz 3f\n"
17716 " andl %0,%%ecx\n"
17717 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
17718 " subl %%ecx,%0\n"
17719 " addl %0,%%eax\n"
17720 "1:\n"
17721 + __COPYUSER_RESTORE_ES
17722 ".section .fixup,\"ax\"\n"
17723 "2: xorl %%eax,%%eax\n"
17724 " jmp 1b\n"
17725 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
17726
17727 #ifdef CONFIG_X86_INTEL_USERCOPY
17728 static unsigned long
17729 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
17730 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
17731 {
17732 int d0, d1;
17733 __asm__ __volatile__(
17734 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
17735 " .align 2,0x90\n"
17736 "3: movl 0(%4), %%eax\n"
17737 "4: movl 4(%4), %%edx\n"
17738 - "5: movl %%eax, 0(%3)\n"
17739 - "6: movl %%edx, 4(%3)\n"
17740 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
17741 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
17742 "7: movl 8(%4), %%eax\n"
17743 "8: movl 12(%4),%%edx\n"
17744 - "9: movl %%eax, 8(%3)\n"
17745 - "10: movl %%edx, 12(%3)\n"
17746 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
17747 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
17748 "11: movl 16(%4), %%eax\n"
17749 "12: movl 20(%4), %%edx\n"
17750 - "13: movl %%eax, 16(%3)\n"
17751 - "14: movl %%edx, 20(%3)\n"
17752 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
17753 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
17754 "15: movl 24(%4), %%eax\n"
17755 "16: movl 28(%4), %%edx\n"
17756 - "17: movl %%eax, 24(%3)\n"
17757 - "18: movl %%edx, 28(%3)\n"
17758 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
17759 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
17760 "19: movl 32(%4), %%eax\n"
17761 "20: movl 36(%4), %%edx\n"
17762 - "21: movl %%eax, 32(%3)\n"
17763 - "22: movl %%edx, 36(%3)\n"
17764 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
17765 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
17766 "23: movl 40(%4), %%eax\n"
17767 "24: movl 44(%4), %%edx\n"
17768 - "25: movl %%eax, 40(%3)\n"
17769 - "26: movl %%edx, 44(%3)\n"
17770 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
17771 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
17772 "27: movl 48(%4), %%eax\n"
17773 "28: movl 52(%4), %%edx\n"
17774 - "29: movl %%eax, 48(%3)\n"
17775 - "30: movl %%edx, 52(%3)\n"
17776 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
17777 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
17778 "31: movl 56(%4), %%eax\n"
17779 "32: movl 60(%4), %%edx\n"
17780 - "33: movl %%eax, 56(%3)\n"
17781 - "34: movl %%edx, 60(%3)\n"
17782 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
17783 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
17784 " addl $-64, %0\n"
17785 " addl $64, %4\n"
17786 " addl $64, %3\n"
17787 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
17788 " shrl $2, %0\n"
17789 " andl $3, %%eax\n"
17790 " cld\n"
17791 + __COPYUSER_SET_ES
17792 "99: rep; movsl\n"
17793 "36: movl %%eax, %0\n"
17794 "37: rep; movsb\n"
17795 "100:\n"
17796 + __COPYUSER_RESTORE_ES
17797 + ".section .fixup,\"ax\"\n"
17798 + "101: lea 0(%%eax,%0,4),%0\n"
17799 + " jmp 100b\n"
17800 + ".previous\n"
17801 + ".section __ex_table,\"a\"\n"
17802 + " .align 4\n"
17803 + " .long 1b,100b\n"
17804 + " .long 2b,100b\n"
17805 + " .long 3b,100b\n"
17806 + " .long 4b,100b\n"
17807 + " .long 5b,100b\n"
17808 + " .long 6b,100b\n"
17809 + " .long 7b,100b\n"
17810 + " .long 8b,100b\n"
17811 + " .long 9b,100b\n"
17812 + " .long 10b,100b\n"
17813 + " .long 11b,100b\n"
17814 + " .long 12b,100b\n"
17815 + " .long 13b,100b\n"
17816 + " .long 14b,100b\n"
17817 + " .long 15b,100b\n"
17818 + " .long 16b,100b\n"
17819 + " .long 17b,100b\n"
17820 + " .long 18b,100b\n"
17821 + " .long 19b,100b\n"
17822 + " .long 20b,100b\n"
17823 + " .long 21b,100b\n"
17824 + " .long 22b,100b\n"
17825 + " .long 23b,100b\n"
17826 + " .long 24b,100b\n"
17827 + " .long 25b,100b\n"
17828 + " .long 26b,100b\n"
17829 + " .long 27b,100b\n"
17830 + " .long 28b,100b\n"
17831 + " .long 29b,100b\n"
17832 + " .long 30b,100b\n"
17833 + " .long 31b,100b\n"
17834 + " .long 32b,100b\n"
17835 + " .long 33b,100b\n"
17836 + " .long 34b,100b\n"
17837 + " .long 35b,100b\n"
17838 + " .long 36b,100b\n"
17839 + " .long 37b,100b\n"
17840 + " .long 99b,101b\n"
17841 + ".previous"
17842 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
17843 + : "1"(to), "2"(from), "0"(size)
17844 + : "eax", "edx", "memory");
17845 + return size;
17846 +}
17847 +
17848 +static unsigned long
17849 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
17850 +{
17851 + int d0, d1;
17852 + __asm__ __volatile__(
17853 + " .align 2,0x90\n"
17854 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
17855 + " cmpl $67, %0\n"
17856 + " jbe 3f\n"
17857 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
17858 + " .align 2,0x90\n"
17859 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
17860 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
17861 + "5: movl %%eax, 0(%3)\n"
17862 + "6: movl %%edx, 4(%3)\n"
17863 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
17864 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
17865 + "9: movl %%eax, 8(%3)\n"
17866 + "10: movl %%edx, 12(%3)\n"
17867 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
17868 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
17869 + "13: movl %%eax, 16(%3)\n"
17870 + "14: movl %%edx, 20(%3)\n"
17871 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
17872 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
17873 + "17: movl %%eax, 24(%3)\n"
17874 + "18: movl %%edx, 28(%3)\n"
17875 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
17876 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
17877 + "21: movl %%eax, 32(%3)\n"
17878 + "22: movl %%edx, 36(%3)\n"
17879 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
17880 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
17881 + "25: movl %%eax, 40(%3)\n"
17882 + "26: movl %%edx, 44(%3)\n"
17883 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
17884 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
17885 + "29: movl %%eax, 48(%3)\n"
17886 + "30: movl %%edx, 52(%3)\n"
17887 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
17888 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
17889 + "33: movl %%eax, 56(%3)\n"
17890 + "34: movl %%edx, 60(%3)\n"
17891 + " addl $-64, %0\n"
17892 + " addl $64, %4\n"
17893 + " addl $64, %3\n"
17894 + " cmpl $63, %0\n"
17895 + " ja 1b\n"
17896 + "35: movl %0, %%eax\n"
17897 + " shrl $2, %0\n"
17898 + " andl $3, %%eax\n"
17899 + " cld\n"
17900 + "99: rep; "__copyuser_seg" movsl\n"
17901 + "36: movl %%eax, %0\n"
17902 + "37: rep; "__copyuser_seg" movsb\n"
17903 + "100:\n"
17904 ".section .fixup,\"ax\"\n"
17905 "101: lea 0(%%eax,%0,4),%0\n"
17906 " jmp 100b\n"
17907 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
17908 int d0, d1;
17909 __asm__ __volatile__(
17910 " .align 2,0x90\n"
17911 - "0: movl 32(%4), %%eax\n"
17912 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
17913 " cmpl $67, %0\n"
17914 " jbe 2f\n"
17915 - "1: movl 64(%4), %%eax\n"
17916 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
17917 " .align 2,0x90\n"
17918 - "2: movl 0(%4), %%eax\n"
17919 - "21: movl 4(%4), %%edx\n"
17920 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
17921 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
17922 " movl %%eax, 0(%3)\n"
17923 " movl %%edx, 4(%3)\n"
17924 - "3: movl 8(%4), %%eax\n"
17925 - "31: movl 12(%4),%%edx\n"
17926 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
17927 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
17928 " movl %%eax, 8(%3)\n"
17929 " movl %%edx, 12(%3)\n"
17930 - "4: movl 16(%4), %%eax\n"
17931 - "41: movl 20(%4), %%edx\n"
17932 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
17933 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
17934 " movl %%eax, 16(%3)\n"
17935 " movl %%edx, 20(%3)\n"
17936 - "10: movl 24(%4), %%eax\n"
17937 - "51: movl 28(%4), %%edx\n"
17938 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
17939 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
17940 " movl %%eax, 24(%3)\n"
17941 " movl %%edx, 28(%3)\n"
17942 - "11: movl 32(%4), %%eax\n"
17943 - "61: movl 36(%4), %%edx\n"
17944 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
17945 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
17946 " movl %%eax, 32(%3)\n"
17947 " movl %%edx, 36(%3)\n"
17948 - "12: movl 40(%4), %%eax\n"
17949 - "71: movl 44(%4), %%edx\n"
17950 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
17951 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
17952 " movl %%eax, 40(%3)\n"
17953 " movl %%edx, 44(%3)\n"
17954 - "13: movl 48(%4), %%eax\n"
17955 - "81: movl 52(%4), %%edx\n"
17956 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
17957 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
17958 " movl %%eax, 48(%3)\n"
17959 " movl %%edx, 52(%3)\n"
17960 - "14: movl 56(%4), %%eax\n"
17961 - "91: movl 60(%4), %%edx\n"
17962 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
17963 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
17964 " movl %%eax, 56(%3)\n"
17965 " movl %%edx, 60(%3)\n"
17966 " addl $-64, %0\n"
17967 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
17968 " shrl $2, %0\n"
17969 " andl $3, %%eax\n"
17970 " cld\n"
17971 - "6: rep; movsl\n"
17972 + "6: rep; "__copyuser_seg" movsl\n"
17973 " movl %%eax,%0\n"
17974 - "7: rep; movsb\n"
17975 + "7: rep; "__copyuser_seg" movsb\n"
17976 "8:\n"
17977 ".section .fixup,\"ax\"\n"
17978 "9: lea 0(%%eax,%0,4),%0\n"
17979 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
17980
17981 __asm__ __volatile__(
17982 " .align 2,0x90\n"
17983 - "0: movl 32(%4), %%eax\n"
17984 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
17985 " cmpl $67, %0\n"
17986 " jbe 2f\n"
17987 - "1: movl 64(%4), %%eax\n"
17988 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
17989 " .align 2,0x90\n"
17990 - "2: movl 0(%4), %%eax\n"
17991 - "21: movl 4(%4), %%edx\n"
17992 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
17993 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
17994 " movnti %%eax, 0(%3)\n"
17995 " movnti %%edx, 4(%3)\n"
17996 - "3: movl 8(%4), %%eax\n"
17997 - "31: movl 12(%4),%%edx\n"
17998 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
17999 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18000 " movnti %%eax, 8(%3)\n"
18001 " movnti %%edx, 12(%3)\n"
18002 - "4: movl 16(%4), %%eax\n"
18003 - "41: movl 20(%4), %%edx\n"
18004 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18005 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18006 " movnti %%eax, 16(%3)\n"
18007 " movnti %%edx, 20(%3)\n"
18008 - "10: movl 24(%4), %%eax\n"
18009 - "51: movl 28(%4), %%edx\n"
18010 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18011 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18012 " movnti %%eax, 24(%3)\n"
18013 " movnti %%edx, 28(%3)\n"
18014 - "11: movl 32(%4), %%eax\n"
18015 - "61: movl 36(%4), %%edx\n"
18016 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18017 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18018 " movnti %%eax, 32(%3)\n"
18019 " movnti %%edx, 36(%3)\n"
18020 - "12: movl 40(%4), %%eax\n"
18021 - "71: movl 44(%4), %%edx\n"
18022 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18023 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18024 " movnti %%eax, 40(%3)\n"
18025 " movnti %%edx, 44(%3)\n"
18026 - "13: movl 48(%4), %%eax\n"
18027 - "81: movl 52(%4), %%edx\n"
18028 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18029 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18030 " movnti %%eax, 48(%3)\n"
18031 " movnti %%edx, 52(%3)\n"
18032 - "14: movl 56(%4), %%eax\n"
18033 - "91: movl 60(%4), %%edx\n"
18034 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18035 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18036 " movnti %%eax, 56(%3)\n"
18037 " movnti %%edx, 60(%3)\n"
18038 " addl $-64, %0\n"
18039 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
18040 " shrl $2, %0\n"
18041 " andl $3, %%eax\n"
18042 " cld\n"
18043 - "6: rep; movsl\n"
18044 + "6: rep; "__copyuser_seg" movsl\n"
18045 " movl %%eax,%0\n"
18046 - "7: rep; movsb\n"
18047 + "7: rep; "__copyuser_seg" movsb\n"
18048 "8:\n"
18049 ".section .fixup,\"ax\"\n"
18050 "9: lea 0(%%eax,%0,4),%0\n"
18051 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
18052
18053 __asm__ __volatile__(
18054 " .align 2,0x90\n"
18055 - "0: movl 32(%4), %%eax\n"
18056 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18057 " cmpl $67, %0\n"
18058 " jbe 2f\n"
18059 - "1: movl 64(%4), %%eax\n"
18060 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18061 " .align 2,0x90\n"
18062 - "2: movl 0(%4), %%eax\n"
18063 - "21: movl 4(%4), %%edx\n"
18064 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18065 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18066 " movnti %%eax, 0(%3)\n"
18067 " movnti %%edx, 4(%3)\n"
18068 - "3: movl 8(%4), %%eax\n"
18069 - "31: movl 12(%4),%%edx\n"
18070 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18071 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18072 " movnti %%eax, 8(%3)\n"
18073 " movnti %%edx, 12(%3)\n"
18074 - "4: movl 16(%4), %%eax\n"
18075 - "41: movl 20(%4), %%edx\n"
18076 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18077 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18078 " movnti %%eax, 16(%3)\n"
18079 " movnti %%edx, 20(%3)\n"
18080 - "10: movl 24(%4), %%eax\n"
18081 - "51: movl 28(%4), %%edx\n"
18082 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18083 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18084 " movnti %%eax, 24(%3)\n"
18085 " movnti %%edx, 28(%3)\n"
18086 - "11: movl 32(%4), %%eax\n"
18087 - "61: movl 36(%4), %%edx\n"
18088 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18089 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18090 " movnti %%eax, 32(%3)\n"
18091 " movnti %%edx, 36(%3)\n"
18092 - "12: movl 40(%4), %%eax\n"
18093 - "71: movl 44(%4), %%edx\n"
18094 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18095 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18096 " movnti %%eax, 40(%3)\n"
18097 " movnti %%edx, 44(%3)\n"
18098 - "13: movl 48(%4), %%eax\n"
18099 - "81: movl 52(%4), %%edx\n"
18100 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18101 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18102 " movnti %%eax, 48(%3)\n"
18103 " movnti %%edx, 52(%3)\n"
18104 - "14: movl 56(%4), %%eax\n"
18105 - "91: movl 60(%4), %%edx\n"
18106 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18107 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18108 " movnti %%eax, 56(%3)\n"
18109 " movnti %%edx, 60(%3)\n"
18110 " addl $-64, %0\n"
18111 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
18112 " shrl $2, %0\n"
18113 " andl $3, %%eax\n"
18114 " cld\n"
18115 - "6: rep; movsl\n"
18116 + "6: rep; "__copyuser_seg" movsl\n"
18117 " movl %%eax,%0\n"
18118 - "7: rep; movsb\n"
18119 + "7: rep; "__copyuser_seg" movsb\n"
18120 "8:\n"
18121 ".section .fixup,\"ax\"\n"
18122 "9: lea 0(%%eax,%0,4),%0\n"
18123 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
18124 */
18125 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
18126 unsigned long size);
18127 -unsigned long __copy_user_intel(void __user *to, const void *from,
18128 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
18129 + unsigned long size);
18130 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
18131 unsigned long size);
18132 unsigned long __copy_user_zeroing_intel_nocache(void *to,
18133 const void __user *from, unsigned long size);
18134 #endif /* CONFIG_X86_INTEL_USERCOPY */
18135
18136 /* Generic arbitrary sized copy. */
18137 -#define __copy_user(to, from, size) \
18138 +#define __copy_user(to, from, size, prefix, set, restore) \
18139 do { \
18140 int __d0, __d1, __d2; \
18141 __asm__ __volatile__( \
18142 + set \
18143 " cmp $7,%0\n" \
18144 " jbe 1f\n" \
18145 " movl %1,%0\n" \
18146 " negl %0\n" \
18147 " andl $7,%0\n" \
18148 " subl %0,%3\n" \
18149 - "4: rep; movsb\n" \
18150 + "4: rep; "prefix"movsb\n" \
18151 " movl %3,%0\n" \
18152 " shrl $2,%0\n" \
18153 " andl $3,%3\n" \
18154 " .align 2,0x90\n" \
18155 - "0: rep; movsl\n" \
18156 + "0: rep; "prefix"movsl\n" \
18157 " movl %3,%0\n" \
18158 - "1: rep; movsb\n" \
18159 + "1: rep; "prefix"movsb\n" \
18160 "2:\n" \
18161 + restore \
18162 ".section .fixup,\"ax\"\n" \
18163 "5: addl %3,%0\n" \
18164 " jmp 2b\n" \
18165 @@ -682,14 +799,14 @@ do { \
18166 " negl %0\n" \
18167 " andl $7,%0\n" \
18168 " subl %0,%3\n" \
18169 - "4: rep; movsb\n" \
18170 + "4: rep; "__copyuser_seg"movsb\n" \
18171 " movl %3,%0\n" \
18172 " shrl $2,%0\n" \
18173 " andl $3,%3\n" \
18174 " .align 2,0x90\n" \
18175 - "0: rep; movsl\n" \
18176 + "0: rep; "__copyuser_seg"movsl\n" \
18177 " movl %3,%0\n" \
18178 - "1: rep; movsb\n" \
18179 + "1: rep; "__copyuser_seg"movsb\n" \
18180 "2:\n" \
18181 ".section .fixup,\"ax\"\n" \
18182 "5: addl %3,%0\n" \
18183 @@ -775,9 +892,9 @@ survive:
18184 }
18185 #endif
18186 if (movsl_is_ok(to, from, n))
18187 - __copy_user(to, from, n);
18188 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
18189 else
18190 - n = __copy_user_intel(to, from, n);
18191 + n = __generic_copy_to_user_intel(to, from, n);
18192 return n;
18193 }
18194 EXPORT_SYMBOL(__copy_to_user_ll);
18195 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
18196 unsigned long n)
18197 {
18198 if (movsl_is_ok(to, from, n))
18199 - __copy_user(to, from, n);
18200 + __copy_user(to, from, n, __copyuser_seg, "", "");
18201 else
18202 - n = __copy_user_intel((void __user *)to,
18203 - (const void *)from, n);
18204 + n = __generic_copy_from_user_intel(to, from, n);
18205 return n;
18206 }
18207 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
18208 @@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
18209 if (n > 64 && cpu_has_xmm2)
18210 n = __copy_user_intel_nocache(to, from, n);
18211 else
18212 - __copy_user(to, from, n);
18213 + __copy_user(to, from, n, __copyuser_seg, "", "");
18214 #else
18215 - __copy_user(to, from, n);
18216 + __copy_user(to, from, n, __copyuser_seg, "", "");
18217 #endif
18218 return n;
18219 }
18220 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
18221
18222 -/**
18223 - * copy_to_user: - Copy a block of data into user space.
18224 - * @to: Destination address, in user space.
18225 - * @from: Source address, in kernel space.
18226 - * @n: Number of bytes to copy.
18227 - *
18228 - * Context: User context only. This function may sleep.
18229 - *
18230 - * Copy data from kernel space to user space.
18231 - *
18232 - * Returns number of bytes that could not be copied.
18233 - * On success, this will be zero.
18234 - */
18235 -unsigned long
18236 -copy_to_user(void __user *to, const void *from, unsigned long n)
18237 +void copy_from_user_overflow(void)
18238 {
18239 - if (access_ok(VERIFY_WRITE, to, n))
18240 - n = __copy_to_user(to, from, n);
18241 - return n;
18242 + WARN(1, "Buffer overflow detected!\n");
18243 }
18244 -EXPORT_SYMBOL(copy_to_user);
18245 +EXPORT_SYMBOL(copy_from_user_overflow);
18246
18247 -/**
18248 - * copy_from_user: - Copy a block of data from user space.
18249 - * @to: Destination address, in kernel space.
18250 - * @from: Source address, in user space.
18251 - * @n: Number of bytes to copy.
18252 - *
18253 - * Context: User context only. This function may sleep.
18254 - *
18255 - * Copy data from user space to kernel space.
18256 - *
18257 - * Returns number of bytes that could not be copied.
18258 - * On success, this will be zero.
18259 - *
18260 - * If some data could not be copied, this function will pad the copied
18261 - * data to the requested size using zero bytes.
18262 - */
18263 -unsigned long
18264 -_copy_from_user(void *to, const void __user *from, unsigned long n)
18265 +void copy_to_user_overflow(void)
18266 {
18267 - if (access_ok(VERIFY_READ, from, n))
18268 - n = __copy_from_user(to, from, n);
18269 - else
18270 - memset(to, 0, n);
18271 - return n;
18272 + WARN(1, "Buffer overflow detected!\n");
18273 }
18274 -EXPORT_SYMBOL(_copy_from_user);
18275 +EXPORT_SYMBOL(copy_to_user_overflow);
18276
18277 -void copy_from_user_overflow(void)
18278 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18279 +void __set_fs(mm_segment_t x)
18280 {
18281 - WARN(1, "Buffer overflow detected!\n");
18282 + switch (x.seg) {
18283 + case 0:
18284 + loadsegment(gs, 0);
18285 + break;
18286 + case TASK_SIZE_MAX:
18287 + loadsegment(gs, __USER_DS);
18288 + break;
18289 + case -1UL:
18290 + loadsegment(gs, __KERNEL_DS);
18291 + break;
18292 + default:
18293 + BUG();
18294 + }
18295 + return;
18296 }
18297 -EXPORT_SYMBOL(copy_from_user_overflow);
18298 +EXPORT_SYMBOL(__set_fs);
18299 +
18300 +void set_fs(mm_segment_t x)
18301 +{
18302 + current_thread_info()->addr_limit = x;
18303 + __set_fs(x);
18304 +}
18305 +EXPORT_SYMBOL(set_fs);
18306 +#endif
18307 diff -urNp linux-3.0.3/arch/x86/lib/usercopy_64.c linux-3.0.3/arch/x86/lib/usercopy_64.c
18308 --- linux-3.0.3/arch/x86/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
18309 +++ linux-3.0.3/arch/x86/lib/usercopy_64.c 2011-08-23 21:47:55.000000000 -0400
18310 @@ -42,6 +42,12 @@ long
18311 __strncpy_from_user(char *dst, const char __user *src, long count)
18312 {
18313 long res;
18314 +
18315 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18316 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18317 + src += PAX_USER_SHADOW_BASE;
18318 +#endif
18319 +
18320 __do_strncpy_from_user(dst, src, count, res);
18321 return res;
18322 }
18323 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
18324 {
18325 long __d0;
18326 might_fault();
18327 +
18328 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18329 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
18330 + addr += PAX_USER_SHADOW_BASE;
18331 +#endif
18332 +
18333 /* no memory constraint because it doesn't change any memory gcc knows
18334 about */
18335 asm volatile(
18336 @@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
18337
18338 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
18339 {
18340 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18341 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18342 +
18343 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18344 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
18345 + to += PAX_USER_SHADOW_BASE;
18346 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
18347 + from += PAX_USER_SHADOW_BASE;
18348 +#endif
18349 +
18350 return copy_user_generic((__force void *)to, (__force void *)from, len);
18351 - }
18352 - return len;
18353 + }
18354 + return len;
18355 }
18356 EXPORT_SYMBOL(copy_in_user);
18357
18358 diff -urNp linux-3.0.3/arch/x86/Makefile linux-3.0.3/arch/x86/Makefile
18359 --- linux-3.0.3/arch/x86/Makefile 2011-07-21 22:17:23.000000000 -0400
18360 +++ linux-3.0.3/arch/x86/Makefile 2011-08-23 21:48:14.000000000 -0400
18361 @@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
18362 else
18363 BITS := 64
18364 UTS_MACHINE := x86_64
18365 + biarch := $(call cc-option,-m64)
18366 CHECKFLAGS += -D__x86_64__ -m64
18367
18368 KBUILD_AFLAGS += -m64
18369 @@ -195,3 +196,12 @@ define archhelp
18370 echo ' FDARGS="..." arguments for the booted kernel'
18371 echo ' FDINITRD=file initrd for the booted kernel'
18372 endef
18373 +
18374 +define OLD_LD
18375 +
18376 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
18377 +*** Please upgrade your binutils to 2.18 or newer
18378 +endef
18379 +
18380 +archprepare:
18381 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
18382 diff -urNp linux-3.0.3/arch/x86/mm/extable.c linux-3.0.3/arch/x86/mm/extable.c
18383 --- linux-3.0.3/arch/x86/mm/extable.c 2011-07-21 22:17:23.000000000 -0400
18384 +++ linux-3.0.3/arch/x86/mm/extable.c 2011-08-23 21:47:55.000000000 -0400
18385 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs
18386 const struct exception_table_entry *fixup;
18387
18388 #ifdef CONFIG_PNPBIOS
18389 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
18390 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
18391 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
18392 extern u32 pnp_bios_is_utter_crap;
18393 pnp_bios_is_utter_crap = 1;
18394 diff -urNp linux-3.0.3/arch/x86/mm/fault.c linux-3.0.3/arch/x86/mm/fault.c
18395 --- linux-3.0.3/arch/x86/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
18396 +++ linux-3.0.3/arch/x86/mm/fault.c 2011-08-23 21:48:14.000000000 -0400
18397 @@ -13,10 +13,18 @@
18398 #include <linux/perf_event.h> /* perf_sw_event */
18399 #include <linux/hugetlb.h> /* hstate_index_to_shift */
18400 #include <linux/prefetch.h> /* prefetchw */
18401 +#include <linux/unistd.h>
18402 +#include <linux/compiler.h>
18403
18404 #include <asm/traps.h> /* dotraplinkage, ... */
18405 #include <asm/pgalloc.h> /* pgd_*(), ... */
18406 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
18407 +#include <asm/vsyscall.h>
18408 +#include <asm/tlbflush.h>
18409 +
18410 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18411 +#include <asm/stacktrace.h>
18412 +#endif
18413
18414 /*
18415 * Page fault error code bits:
18416 @@ -54,7 +62,7 @@ static inline int __kprobes notify_page_
18417 int ret = 0;
18418
18419 /* kprobe_running() needs smp_processor_id() */
18420 - if (kprobes_built_in() && !user_mode_vm(regs)) {
18421 + if (kprobes_built_in() && !user_mode(regs)) {
18422 preempt_disable();
18423 if (kprobe_running() && kprobe_fault_handler(regs, 14))
18424 ret = 1;
18425 @@ -115,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *re
18426 return !instr_lo || (instr_lo>>1) == 1;
18427 case 0x00:
18428 /* Prefetch instruction is 0x0F0D or 0x0F18 */
18429 - if (probe_kernel_address(instr, opcode))
18430 + if (user_mode(regs)) {
18431 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18432 + return 0;
18433 + } else if (probe_kernel_address(instr, opcode))
18434 return 0;
18435
18436 *prefetch = (instr_lo == 0xF) &&
18437 @@ -149,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsign
18438 while (instr < max_instr) {
18439 unsigned char opcode;
18440
18441 - if (probe_kernel_address(instr, opcode))
18442 + if (user_mode(regs)) {
18443 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18444 + break;
18445 + } else if (probe_kernel_address(instr, opcode))
18446 break;
18447
18448 instr++;
18449 @@ -180,6 +194,30 @@ force_sig_info_fault(int si_signo, int s
18450 force_sig_info(si_signo, &info, tsk);
18451 }
18452
18453 +#ifdef CONFIG_PAX_EMUTRAMP
18454 +static int pax_handle_fetch_fault(struct pt_regs *regs);
18455 +#endif
18456 +
18457 +#ifdef CONFIG_PAX_PAGEEXEC
18458 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
18459 +{
18460 + pgd_t *pgd;
18461 + pud_t *pud;
18462 + pmd_t *pmd;
18463 +
18464 + pgd = pgd_offset(mm, address);
18465 + if (!pgd_present(*pgd))
18466 + return NULL;
18467 + pud = pud_offset(pgd, address);
18468 + if (!pud_present(*pud))
18469 + return NULL;
18470 + pmd = pmd_offset(pud, address);
18471 + if (!pmd_present(*pmd))
18472 + return NULL;
18473 + return pmd;
18474 +}
18475 +#endif
18476 +
18477 DEFINE_SPINLOCK(pgd_lock);
18478 LIST_HEAD(pgd_list);
18479
18480 @@ -230,10 +268,22 @@ void vmalloc_sync_all(void)
18481 for (address = VMALLOC_START & PMD_MASK;
18482 address >= TASK_SIZE && address < FIXADDR_TOP;
18483 address += PMD_SIZE) {
18484 +
18485 +#ifdef CONFIG_PAX_PER_CPU_PGD
18486 + unsigned long cpu;
18487 +#else
18488 struct page *page;
18489 +#endif
18490
18491 spin_lock(&pgd_lock);
18492 +
18493 +#ifdef CONFIG_PAX_PER_CPU_PGD
18494 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
18495 + pgd_t *pgd = get_cpu_pgd(cpu);
18496 + pmd_t *ret;
18497 +#else
18498 list_for_each_entry(page, &pgd_list, lru) {
18499 + pgd_t *pgd = page_address(page);
18500 spinlock_t *pgt_lock;
18501 pmd_t *ret;
18502
18503 @@ -241,8 +291,13 @@ void vmalloc_sync_all(void)
18504 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
18505
18506 spin_lock(pgt_lock);
18507 - ret = vmalloc_sync_one(page_address(page), address);
18508 +#endif
18509 +
18510 + ret = vmalloc_sync_one(pgd, address);
18511 +
18512 +#ifndef CONFIG_PAX_PER_CPU_PGD
18513 spin_unlock(pgt_lock);
18514 +#endif
18515
18516 if (!ret)
18517 break;
18518 @@ -276,6 +331,11 @@ static noinline __kprobes int vmalloc_fa
18519 * an interrupt in the middle of a task switch..
18520 */
18521 pgd_paddr = read_cr3();
18522 +
18523 +#ifdef CONFIG_PAX_PER_CPU_PGD
18524 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
18525 +#endif
18526 +
18527 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
18528 if (!pmd_k)
18529 return -1;
18530 @@ -371,7 +431,14 @@ static noinline __kprobes int vmalloc_fa
18531 * happen within a race in page table update. In the later
18532 * case just flush:
18533 */
18534 +
18535 +#ifdef CONFIG_PAX_PER_CPU_PGD
18536 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
18537 + pgd = pgd_offset_cpu(smp_processor_id(), address);
18538 +#else
18539 pgd = pgd_offset(current->active_mm, address);
18540 +#endif
18541 +
18542 pgd_ref = pgd_offset_k(address);
18543 if (pgd_none(*pgd_ref))
18544 return -1;
18545 @@ -533,7 +600,7 @@ static int is_errata93(struct pt_regs *r
18546 static int is_errata100(struct pt_regs *regs, unsigned long address)
18547 {
18548 #ifdef CONFIG_X86_64
18549 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
18550 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
18551 return 1;
18552 #endif
18553 return 0;
18554 @@ -560,7 +627,7 @@ static int is_f00f_bug(struct pt_regs *r
18555 }
18556
18557 static const char nx_warning[] = KERN_CRIT
18558 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
18559 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
18560
18561 static void
18562 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
18563 @@ -569,14 +636,25 @@ show_fault_oops(struct pt_regs *regs, un
18564 if (!oops_may_print())
18565 return;
18566
18567 - if (error_code & PF_INSTR) {
18568 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
18569 unsigned int level;
18570
18571 pte_t *pte = lookup_address(address, &level);
18572
18573 if (pte && pte_present(*pte) && !pte_exec(*pte))
18574 - printk(nx_warning, current_uid());
18575 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
18576 + }
18577 +
18578 +#ifdef CONFIG_PAX_KERNEXEC
18579 + if (init_mm.start_code <= address && address < init_mm.end_code) {
18580 + if (current->signal->curr_ip)
18581 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18582 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
18583 + else
18584 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18585 + current->comm, task_pid_nr(current), current_uid(), current_euid());
18586 }
18587 +#endif
18588
18589 printk(KERN_ALERT "BUG: unable to handle kernel ");
18590 if (address < PAGE_SIZE)
18591 @@ -702,6 +780,66 @@ __bad_area_nosemaphore(struct pt_regs *r
18592 unsigned long address, int si_code)
18593 {
18594 struct task_struct *tsk = current;
18595 +#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18596 + struct mm_struct *mm = tsk->mm;
18597 +#endif
18598 +
18599 +#ifdef CONFIG_X86_64
18600 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
18601 + if (regs->ip == VSYSCALL_ADDR(__NR_vgettimeofday) ||
18602 + regs->ip == VSYSCALL_ADDR(__NR_vtime) ||
18603 + regs->ip == VSYSCALL_ADDR(__NR_vgetcpu)) {
18604 + regs->ip += mm->context.vdso - PAGE_SIZE - VSYSCALL_START;
18605 + return;
18606 + }
18607 + }
18608 +#endif
18609 +
18610 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18611 + if (mm && (error_code & PF_USER)) {
18612 + unsigned long ip = regs->ip;
18613 +
18614 + if (v8086_mode(regs))
18615 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
18616 +
18617 + /*
18618 + * It's possible to have interrupts off here:
18619 + */
18620 + local_irq_enable();
18621 +
18622 +#ifdef CONFIG_PAX_PAGEEXEC
18623 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
18624 + (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
18625 +
18626 +#ifdef CONFIG_PAX_EMUTRAMP
18627 + switch (pax_handle_fetch_fault(regs)) {
18628 + case 2:
18629 + return;
18630 + }
18631 +#endif
18632 +
18633 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18634 + do_group_exit(SIGKILL);
18635 + }
18636 +#endif
18637 +
18638 +#ifdef CONFIG_PAX_SEGMEXEC
18639 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
18640 +
18641 +#ifdef CONFIG_PAX_EMUTRAMP
18642 + switch (pax_handle_fetch_fault(regs)) {
18643 + case 2:
18644 + return;
18645 + }
18646 +#endif
18647 +
18648 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18649 + do_group_exit(SIGKILL);
18650 + }
18651 +#endif
18652 +
18653 + }
18654 +#endif
18655
18656 /* User mode accesses just cause a SIGSEGV */
18657 if (error_code & PF_USER) {
18658 @@ -871,6 +1009,99 @@ static int spurious_fault_check(unsigned
18659 return 1;
18660 }
18661
18662 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18663 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
18664 +{
18665 + pte_t *pte;
18666 + pmd_t *pmd;
18667 + spinlock_t *ptl;
18668 + unsigned char pte_mask;
18669 +
18670 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
18671 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
18672 + return 0;
18673 +
18674 + /* PaX: it's our fault, let's handle it if we can */
18675 +
18676 + /* PaX: take a look at read faults before acquiring any locks */
18677 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
18678 + /* instruction fetch attempt from a protected page in user mode */
18679 + up_read(&mm->mmap_sem);
18680 +
18681 +#ifdef CONFIG_PAX_EMUTRAMP
18682 + switch (pax_handle_fetch_fault(regs)) {
18683 + case 2:
18684 + return 1;
18685 + }
18686 +#endif
18687 +
18688 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
18689 + do_group_exit(SIGKILL);
18690 + }
18691 +
18692 + pmd = pax_get_pmd(mm, address);
18693 + if (unlikely(!pmd))
18694 + return 0;
18695 +
18696 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
18697 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
18698 + pte_unmap_unlock(pte, ptl);
18699 + return 0;
18700 + }
18701 +
18702 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
18703 + /* write attempt to a protected page in user mode */
18704 + pte_unmap_unlock(pte, ptl);
18705 + return 0;
18706 + }
18707 +
18708 +#ifdef CONFIG_SMP
18709 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
18710 +#else
18711 + if (likely(address > get_limit(regs->cs)))
18712 +#endif
18713 + {
18714 + set_pte(pte, pte_mkread(*pte));
18715 + __flush_tlb_one(address);
18716 + pte_unmap_unlock(pte, ptl);
18717 + up_read(&mm->mmap_sem);
18718 + return 1;
18719 + }
18720 +
18721 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
18722 +
18723 + /*
18724 + * PaX: fill DTLB with user rights and retry
18725 + */
18726 + __asm__ __volatile__ (
18727 + "orb %2,(%1)\n"
18728 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
18729 +/*
18730 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
18731 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
18732 + * page fault when examined during a TLB load attempt. this is true not only
18733 + * for PTEs holding a non-present entry but also present entries that will
18734 + * raise a page fault (such as those set up by PaX, or the copy-on-write
18735 + * mechanism). in effect it means that we do *not* need to flush the TLBs
18736 + * for our target pages since their PTEs are simply not in the TLBs at all.
18737 +
18738 + * the best thing in omitting it is that we gain around 15-20% speed in the
18739 + * fast path of the page fault handler and can get rid of tracing since we
18740 + * can no longer flush unintended entries.
18741 + */
18742 + "invlpg (%0)\n"
18743 +#endif
18744 + __copyuser_seg"testb $0,(%0)\n"
18745 + "xorb %3,(%1)\n"
18746 + :
18747 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
18748 + : "memory", "cc");
18749 + pte_unmap_unlock(pte, ptl);
18750 + up_read(&mm->mmap_sem);
18751 + return 1;
18752 +}
18753 +#endif
18754 +
18755 /*
18756 * Handle a spurious fault caused by a stale TLB entry.
18757 *
18758 @@ -943,6 +1174,9 @@ int show_unhandled_signals = 1;
18759 static inline int
18760 access_error(unsigned long error_code, struct vm_area_struct *vma)
18761 {
18762 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
18763 + return 1;
18764 +
18765 if (error_code & PF_WRITE) {
18766 /* write, present and write, not present: */
18767 if (unlikely(!(vma->vm_flags & VM_WRITE)))
18768 @@ -976,19 +1210,33 @@ do_page_fault(struct pt_regs *regs, unsi
18769 {
18770 struct vm_area_struct *vma;
18771 struct task_struct *tsk;
18772 - unsigned long address;
18773 struct mm_struct *mm;
18774 int fault;
18775 int write = error_code & PF_WRITE;
18776 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
18777 (write ? FAULT_FLAG_WRITE : 0);
18778
18779 + /* Get the faulting address: */
18780 + unsigned long address = read_cr2();
18781 +
18782 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18783 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
18784 + if (!search_exception_tables(regs->ip)) {
18785 + bad_area_nosemaphore(regs, error_code, address);
18786 + return;
18787 + }
18788 + if (address < PAX_USER_SHADOW_BASE) {
18789 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
18790 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
18791 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
18792 + } else
18793 + address -= PAX_USER_SHADOW_BASE;
18794 + }
18795 +#endif
18796 +
18797 tsk = current;
18798 mm = tsk->mm;
18799
18800 - /* Get the faulting address: */
18801 - address = read_cr2();
18802 -
18803 /*
18804 * Detect and handle instructions that would cause a page fault for
18805 * both a tracked kernel page and a userspace page.
18806 @@ -1048,7 +1296,7 @@ do_page_fault(struct pt_regs *regs, unsi
18807 * User-mode registers count as a user access even for any
18808 * potential system fault or CPU buglet:
18809 */
18810 - if (user_mode_vm(regs)) {
18811 + if (user_mode(regs)) {
18812 local_irq_enable();
18813 error_code |= PF_USER;
18814 } else {
18815 @@ -1103,6 +1351,11 @@ retry:
18816 might_sleep();
18817 }
18818
18819 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18820 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
18821 + return;
18822 +#endif
18823 +
18824 vma = find_vma(mm, address);
18825 if (unlikely(!vma)) {
18826 bad_area(regs, error_code, address);
18827 @@ -1114,18 +1367,24 @@ retry:
18828 bad_area(regs, error_code, address);
18829 return;
18830 }
18831 - if (error_code & PF_USER) {
18832 - /*
18833 - * Accessing the stack below %sp is always a bug.
18834 - * The large cushion allows instructions like enter
18835 - * and pusha to work. ("enter $65535, $31" pushes
18836 - * 32 pointers and then decrements %sp by 65535.)
18837 - */
18838 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
18839 - bad_area(regs, error_code, address);
18840 - return;
18841 - }
18842 + /*
18843 + * Accessing the stack below %sp is always a bug.
18844 + * The large cushion allows instructions like enter
18845 + * and pusha to work. ("enter $65535, $31" pushes
18846 + * 32 pointers and then decrements %sp by 65535.)
18847 + */
18848 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
18849 + bad_area(regs, error_code, address);
18850 + return;
18851 }
18852 +
18853 +#ifdef CONFIG_PAX_SEGMEXEC
18854 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
18855 + bad_area(regs, error_code, address);
18856 + return;
18857 + }
18858 +#endif
18859 +
18860 if (unlikely(expand_stack(vma, address))) {
18861 bad_area(regs, error_code, address);
18862 return;
18863 @@ -1180,3 +1439,199 @@ good_area:
18864
18865 up_read(&mm->mmap_sem);
18866 }
18867 +
18868 +#ifdef CONFIG_PAX_EMUTRAMP
18869 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
18870 +{
18871 + int err;
18872 +
18873 + do { /* PaX: gcc trampoline emulation #1 */
18874 + unsigned char mov1, mov2;
18875 + unsigned short jmp;
18876 + unsigned int addr1, addr2;
18877 +
18878 +#ifdef CONFIG_X86_64
18879 + if ((regs->ip + 11) >> 32)
18880 + break;
18881 +#endif
18882 +
18883 + err = get_user(mov1, (unsigned char __user *)regs->ip);
18884 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
18885 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
18886 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
18887 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
18888 +
18889 + if (err)
18890 + break;
18891 +
18892 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
18893 + regs->cx = addr1;
18894 + regs->ax = addr2;
18895 + regs->ip = addr2;
18896 + return 2;
18897 + }
18898 + } while (0);
18899 +
18900 + do { /* PaX: gcc trampoline emulation #2 */
18901 + unsigned char mov, jmp;
18902 + unsigned int addr1, addr2;
18903 +
18904 +#ifdef CONFIG_X86_64
18905 + if ((regs->ip + 9) >> 32)
18906 + break;
18907 +#endif
18908 +
18909 + err = get_user(mov, (unsigned char __user *)regs->ip);
18910 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
18911 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
18912 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
18913 +
18914 + if (err)
18915 + break;
18916 +
18917 + if (mov == 0xB9 && jmp == 0xE9) {
18918 + regs->cx = addr1;
18919 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
18920 + return 2;
18921 + }
18922 + } while (0);
18923 +
18924 + return 1; /* PaX in action */
18925 +}
18926 +
18927 +#ifdef CONFIG_X86_64
18928 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
18929 +{
18930 + int err;
18931 +
18932 + do { /* PaX: gcc trampoline emulation #1 */
18933 + unsigned short mov1, mov2, jmp1;
18934 + unsigned char jmp2;
18935 + unsigned int addr1;
18936 + unsigned long addr2;
18937 +
18938 + err = get_user(mov1, (unsigned short __user *)regs->ip);
18939 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
18940 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
18941 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
18942 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
18943 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
18944 +
18945 + if (err)
18946 + break;
18947 +
18948 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
18949 + regs->r11 = addr1;
18950 + regs->r10 = addr2;
18951 + regs->ip = addr1;
18952 + return 2;
18953 + }
18954 + } while (0);
18955 +
18956 + do { /* PaX: gcc trampoline emulation #2 */
18957 + unsigned short mov1, mov2, jmp1;
18958 + unsigned char jmp2;
18959 + unsigned long addr1, addr2;
18960 +
18961 + err = get_user(mov1, (unsigned short __user *)regs->ip);
18962 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
18963 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
18964 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
18965 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
18966 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
18967 +
18968 + if (err)
18969 + break;
18970 +
18971 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
18972 + regs->r11 = addr1;
18973 + regs->r10 = addr2;
18974 + regs->ip = addr1;
18975 + return 2;
18976 + }
18977 + } while (0);
18978 +
18979 + return 1; /* PaX in action */
18980 +}
18981 +#endif
18982 +
18983 +/*
18984 + * PaX: decide what to do with offenders (regs->ip = fault address)
18985 + *
18986 + * returns 1 when task should be killed
18987 + * 2 when gcc trampoline was detected
18988 + */
18989 +static int pax_handle_fetch_fault(struct pt_regs *regs)
18990 +{
18991 + if (v8086_mode(regs))
18992 + return 1;
18993 +
18994 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
18995 + return 1;
18996 +
18997 +#ifdef CONFIG_X86_32
18998 + return pax_handle_fetch_fault_32(regs);
18999 +#else
19000 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
19001 + return pax_handle_fetch_fault_32(regs);
19002 + else
19003 + return pax_handle_fetch_fault_64(regs);
19004 +#endif
19005 +}
19006 +#endif
19007 +
19008 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19009 +void pax_report_insns(void *pc, void *sp)
19010 +{
19011 + long i;
19012 +
19013 + printk(KERN_ERR "PAX: bytes at PC: ");
19014 + for (i = 0; i < 20; i++) {
19015 + unsigned char c;
19016 + if (get_user(c, (__force unsigned char __user *)pc+i))
19017 + printk(KERN_CONT "?? ");
19018 + else
19019 + printk(KERN_CONT "%02x ", c);
19020 + }
19021 + printk("\n");
19022 +
19023 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
19024 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
19025 + unsigned long c;
19026 + if (get_user(c, (__force unsigned long __user *)sp+i))
19027 +#ifdef CONFIG_X86_32
19028 + printk(KERN_CONT "???????? ");
19029 +#else
19030 + printk(KERN_CONT "???????????????? ");
19031 +#endif
19032 + else
19033 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
19034 + }
19035 + printk("\n");
19036 +}
19037 +#endif
19038 +
19039 +/**
19040 + * probe_kernel_write(): safely attempt to write to a location
19041 + * @dst: address to write to
19042 + * @src: pointer to the data that shall be written
19043 + * @size: size of the data chunk
19044 + *
19045 + * Safely write to address @dst from the buffer at @src. If a kernel fault
19046 + * happens, handle that and return -EFAULT.
19047 + */
19048 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
19049 +{
19050 + long ret;
19051 + mm_segment_t old_fs = get_fs();
19052 +
19053 + set_fs(KERNEL_DS);
19054 + pagefault_disable();
19055 + pax_open_kernel();
19056 + ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
19057 + pax_close_kernel();
19058 + pagefault_enable();
19059 + set_fs(old_fs);
19060 +
19061 + return ret ? -EFAULT : 0;
19062 +}
19063 diff -urNp linux-3.0.3/arch/x86/mm/gup.c linux-3.0.3/arch/x86/mm/gup.c
19064 --- linux-3.0.3/arch/x86/mm/gup.c 2011-07-21 22:17:23.000000000 -0400
19065 +++ linux-3.0.3/arch/x86/mm/gup.c 2011-08-23 21:47:55.000000000 -0400
19066 @@ -263,7 +263,7 @@ int __get_user_pages_fast(unsigned long
19067 addr = start;
19068 len = (unsigned long) nr_pages << PAGE_SHIFT;
19069 end = start + len;
19070 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19071 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19072 (void __user *)start, len)))
19073 return 0;
19074
19075 diff -urNp linux-3.0.3/arch/x86/mm/highmem_32.c linux-3.0.3/arch/x86/mm/highmem_32.c
19076 --- linux-3.0.3/arch/x86/mm/highmem_32.c 2011-07-21 22:17:23.000000000 -0400
19077 +++ linux-3.0.3/arch/x86/mm/highmem_32.c 2011-08-23 21:47:55.000000000 -0400
19078 @@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
19079 idx = type + KM_TYPE_NR*smp_processor_id();
19080 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19081 BUG_ON(!pte_none(*(kmap_pte-idx)));
19082 +
19083 + pax_open_kernel();
19084 set_pte(kmap_pte-idx, mk_pte(page, prot));
19085 + pax_close_kernel();
19086
19087 return (void *)vaddr;
19088 }
19089 diff -urNp linux-3.0.3/arch/x86/mm/hugetlbpage.c linux-3.0.3/arch/x86/mm/hugetlbpage.c
19090 --- linux-3.0.3/arch/x86/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
19091 +++ linux-3.0.3/arch/x86/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
19092 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
19093 struct hstate *h = hstate_file(file);
19094 struct mm_struct *mm = current->mm;
19095 struct vm_area_struct *vma;
19096 - unsigned long start_addr;
19097 + unsigned long start_addr, pax_task_size = TASK_SIZE;
19098 +
19099 +#ifdef CONFIG_PAX_SEGMEXEC
19100 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19101 + pax_task_size = SEGMEXEC_TASK_SIZE;
19102 +#endif
19103 +
19104 + pax_task_size -= PAGE_SIZE;
19105
19106 if (len > mm->cached_hole_size) {
19107 - start_addr = mm->free_area_cache;
19108 + start_addr = mm->free_area_cache;
19109 } else {
19110 - start_addr = TASK_UNMAPPED_BASE;
19111 - mm->cached_hole_size = 0;
19112 + start_addr = mm->mmap_base;
19113 + mm->cached_hole_size = 0;
19114 }
19115
19116 full_search:
19117 @@ -280,26 +287,27 @@ full_search:
19118
19119 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19120 /* At this point: (!vma || addr < vma->vm_end). */
19121 - if (TASK_SIZE - len < addr) {
19122 + if (pax_task_size - len < addr) {
19123 /*
19124 * Start a new search - just in case we missed
19125 * some holes.
19126 */
19127 - if (start_addr != TASK_UNMAPPED_BASE) {
19128 - start_addr = TASK_UNMAPPED_BASE;
19129 + if (start_addr != mm->mmap_base) {
19130 + start_addr = mm->mmap_base;
19131 mm->cached_hole_size = 0;
19132 goto full_search;
19133 }
19134 return -ENOMEM;
19135 }
19136 - if (!vma || addr + len <= vma->vm_start) {
19137 - mm->free_area_cache = addr + len;
19138 - return addr;
19139 - }
19140 + if (check_heap_stack_gap(vma, addr, len))
19141 + break;
19142 if (addr + mm->cached_hole_size < vma->vm_start)
19143 mm->cached_hole_size = vma->vm_start - addr;
19144 addr = ALIGN(vma->vm_end, huge_page_size(h));
19145 }
19146 +
19147 + mm->free_area_cache = addr + len;
19148 + return addr;
19149 }
19150
19151 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
19152 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
19153 {
19154 struct hstate *h = hstate_file(file);
19155 struct mm_struct *mm = current->mm;
19156 - struct vm_area_struct *vma, *prev_vma;
19157 - unsigned long base = mm->mmap_base, addr = addr0;
19158 + struct vm_area_struct *vma;
19159 + unsigned long base = mm->mmap_base, addr;
19160 unsigned long largest_hole = mm->cached_hole_size;
19161 - int first_time = 1;
19162
19163 /* don't allow allocations above current base */
19164 if (mm->free_area_cache > base)
19165 @@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
19166 largest_hole = 0;
19167 mm->free_area_cache = base;
19168 }
19169 -try_again:
19170 +
19171 /* make sure it can fit in the remaining address space */
19172 if (mm->free_area_cache < len)
19173 goto fail;
19174
19175 /* either no address requested or can't fit in requested address hole */
19176 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
19177 + addr = (mm->free_area_cache - len);
19178 do {
19179 + addr &= huge_page_mask(h);
19180 + vma = find_vma(mm, addr);
19181 /*
19182 * Lookup failure means no vma is above this address,
19183 * i.e. return with success:
19184 - */
19185 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
19186 - return addr;
19187 -
19188 - /*
19189 * new region fits between prev_vma->vm_end and
19190 * vma->vm_start, use it:
19191 */
19192 - if (addr + len <= vma->vm_start &&
19193 - (!prev_vma || (addr >= prev_vma->vm_end))) {
19194 + if (check_heap_stack_gap(vma, addr, len)) {
19195 /* remember the address as a hint for next time */
19196 - mm->cached_hole_size = largest_hole;
19197 - return (mm->free_area_cache = addr);
19198 - } else {
19199 - /* pull free_area_cache down to the first hole */
19200 - if (mm->free_area_cache == vma->vm_end) {
19201 - mm->free_area_cache = vma->vm_start;
19202 - mm->cached_hole_size = largest_hole;
19203 - }
19204 + mm->cached_hole_size = largest_hole;
19205 + return (mm->free_area_cache = addr);
19206 + }
19207 + /* pull free_area_cache down to the first hole */
19208 + if (mm->free_area_cache == vma->vm_end) {
19209 + mm->free_area_cache = vma->vm_start;
19210 + mm->cached_hole_size = largest_hole;
19211 }
19212
19213 /* remember the largest hole we saw so far */
19214 if (addr + largest_hole < vma->vm_start)
19215 - largest_hole = vma->vm_start - addr;
19216 + largest_hole = vma->vm_start - addr;
19217
19218 /* try just below the current vma->vm_start */
19219 - addr = (vma->vm_start - len) & huge_page_mask(h);
19220 - } while (len <= vma->vm_start);
19221 + addr = skip_heap_stack_gap(vma, len);
19222 + } while (!IS_ERR_VALUE(addr));
19223
19224 fail:
19225 /*
19226 - * if hint left us with no space for the requested
19227 - * mapping then try again:
19228 - */
19229 - if (first_time) {
19230 - mm->free_area_cache = base;
19231 - largest_hole = 0;
19232 - first_time = 0;
19233 - goto try_again;
19234 - }
19235 - /*
19236 * A failed mmap() very likely causes application failure,
19237 * so fall back to the bottom-up function here. This scenario
19238 * can happen with large stack limits and large mmap()
19239 * allocations.
19240 */
19241 - mm->free_area_cache = TASK_UNMAPPED_BASE;
19242 +
19243 +#ifdef CONFIG_PAX_SEGMEXEC
19244 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19245 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19246 + else
19247 +#endif
19248 +
19249 + mm->mmap_base = TASK_UNMAPPED_BASE;
19250 +
19251 +#ifdef CONFIG_PAX_RANDMMAP
19252 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19253 + mm->mmap_base += mm->delta_mmap;
19254 +#endif
19255 +
19256 + mm->free_area_cache = mm->mmap_base;
19257 mm->cached_hole_size = ~0UL;
19258 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
19259 len, pgoff, flags);
19260 @@ -386,6 +392,7 @@ fail:
19261 /*
19262 * Restore the topdown base:
19263 */
19264 + mm->mmap_base = base;
19265 mm->free_area_cache = base;
19266 mm->cached_hole_size = ~0UL;
19267
19268 @@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
19269 struct hstate *h = hstate_file(file);
19270 struct mm_struct *mm = current->mm;
19271 struct vm_area_struct *vma;
19272 + unsigned long pax_task_size = TASK_SIZE;
19273
19274 if (len & ~huge_page_mask(h))
19275 return -EINVAL;
19276 - if (len > TASK_SIZE)
19277 +
19278 +#ifdef CONFIG_PAX_SEGMEXEC
19279 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19280 + pax_task_size = SEGMEXEC_TASK_SIZE;
19281 +#endif
19282 +
19283 + pax_task_size -= PAGE_SIZE;
19284 +
19285 + if (len > pax_task_size)
19286 return -ENOMEM;
19287
19288 if (flags & MAP_FIXED) {
19289 @@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
19290 if (addr) {
19291 addr = ALIGN(addr, huge_page_size(h));
19292 vma = find_vma(mm, addr);
19293 - if (TASK_SIZE - len >= addr &&
19294 - (!vma || addr + len <= vma->vm_start))
19295 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
19296 return addr;
19297 }
19298 if (mm->get_unmapped_area == arch_get_unmapped_area)
19299 diff -urNp linux-3.0.3/arch/x86/mm/init_32.c linux-3.0.3/arch/x86/mm/init_32.c
19300 --- linux-3.0.3/arch/x86/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
19301 +++ linux-3.0.3/arch/x86/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
19302 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
19303 }
19304
19305 /*
19306 - * Creates a middle page table and puts a pointer to it in the
19307 - * given global directory entry. This only returns the gd entry
19308 - * in non-PAE compilation mode, since the middle layer is folded.
19309 - */
19310 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
19311 -{
19312 - pud_t *pud;
19313 - pmd_t *pmd_table;
19314 -
19315 -#ifdef CONFIG_X86_PAE
19316 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
19317 - if (after_bootmem)
19318 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
19319 - else
19320 - pmd_table = (pmd_t *)alloc_low_page();
19321 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
19322 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
19323 - pud = pud_offset(pgd, 0);
19324 - BUG_ON(pmd_table != pmd_offset(pud, 0));
19325 -
19326 - return pmd_table;
19327 - }
19328 -#endif
19329 - pud = pud_offset(pgd, 0);
19330 - pmd_table = pmd_offset(pud, 0);
19331 -
19332 - return pmd_table;
19333 -}
19334 -
19335 -/*
19336 * Create a page table and place a pointer to it in a middle page
19337 * directory entry:
19338 */
19339 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
19340 page_table = (pte_t *)alloc_low_page();
19341
19342 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
19343 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19344 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
19345 +#else
19346 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
19347 +#endif
19348 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
19349 }
19350
19351 return pte_offset_kernel(pmd, 0);
19352 }
19353
19354 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
19355 +{
19356 + pud_t *pud;
19357 + pmd_t *pmd_table;
19358 +
19359 + pud = pud_offset(pgd, 0);
19360 + pmd_table = pmd_offset(pud, 0);
19361 +
19362 + return pmd_table;
19363 +}
19364 +
19365 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
19366 {
19367 int pgd_idx = pgd_index(vaddr);
19368 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
19369 int pgd_idx, pmd_idx;
19370 unsigned long vaddr;
19371 pgd_t *pgd;
19372 + pud_t *pud;
19373 pmd_t *pmd;
19374 pte_t *pte = NULL;
19375
19376 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
19377 pgd = pgd_base + pgd_idx;
19378
19379 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
19380 - pmd = one_md_table_init(pgd);
19381 - pmd = pmd + pmd_index(vaddr);
19382 + pud = pud_offset(pgd, vaddr);
19383 + pmd = pmd_offset(pud, vaddr);
19384 +
19385 +#ifdef CONFIG_X86_PAE
19386 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19387 +#endif
19388 +
19389 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
19390 pmd++, pmd_idx++) {
19391 pte = page_table_kmap_check(one_page_table_init(pmd),
19392 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
19393 }
19394 }
19395
19396 -static inline int is_kernel_text(unsigned long addr)
19397 +static inline int is_kernel_text(unsigned long start, unsigned long end)
19398 {
19399 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
19400 - return 1;
19401 - return 0;
19402 + if ((start > ktla_ktva((unsigned long)_etext) ||
19403 + end <= ktla_ktva((unsigned long)_stext)) &&
19404 + (start > ktla_ktva((unsigned long)_einittext) ||
19405 + end <= ktla_ktva((unsigned long)_sinittext)) &&
19406 +
19407 +#ifdef CONFIG_ACPI_SLEEP
19408 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
19409 +#endif
19410 +
19411 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
19412 + return 0;
19413 + return 1;
19414 }
19415
19416 /*
19417 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
19418 unsigned long last_map_addr = end;
19419 unsigned long start_pfn, end_pfn;
19420 pgd_t *pgd_base = swapper_pg_dir;
19421 - int pgd_idx, pmd_idx, pte_ofs;
19422 + unsigned int pgd_idx, pmd_idx, pte_ofs;
19423 unsigned long pfn;
19424 pgd_t *pgd;
19425 + pud_t *pud;
19426 pmd_t *pmd;
19427 pte_t *pte;
19428 unsigned pages_2m, pages_4k;
19429 @@ -281,8 +282,13 @@ repeat:
19430 pfn = start_pfn;
19431 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19432 pgd = pgd_base + pgd_idx;
19433 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
19434 - pmd = one_md_table_init(pgd);
19435 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
19436 + pud = pud_offset(pgd, 0);
19437 + pmd = pmd_offset(pud, 0);
19438 +
19439 +#ifdef CONFIG_X86_PAE
19440 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19441 +#endif
19442
19443 if (pfn >= end_pfn)
19444 continue;
19445 @@ -294,14 +300,13 @@ repeat:
19446 #endif
19447 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
19448 pmd++, pmd_idx++) {
19449 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
19450 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
19451
19452 /*
19453 * Map with big pages if possible, otherwise
19454 * create normal page tables:
19455 */
19456 if (use_pse) {
19457 - unsigned int addr2;
19458 pgprot_t prot = PAGE_KERNEL_LARGE;
19459 /*
19460 * first pass will use the same initial
19461 @@ -311,11 +316,7 @@ repeat:
19462 __pgprot(PTE_IDENT_ATTR |
19463 _PAGE_PSE);
19464
19465 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
19466 - PAGE_OFFSET + PAGE_SIZE-1;
19467 -
19468 - if (is_kernel_text(addr) ||
19469 - is_kernel_text(addr2))
19470 + if (is_kernel_text(address, address + PMD_SIZE))
19471 prot = PAGE_KERNEL_LARGE_EXEC;
19472
19473 pages_2m++;
19474 @@ -332,7 +333,7 @@ repeat:
19475 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19476 pte += pte_ofs;
19477 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
19478 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
19479 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
19480 pgprot_t prot = PAGE_KERNEL;
19481 /*
19482 * first pass will use the same initial
19483 @@ -340,7 +341,7 @@ repeat:
19484 */
19485 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
19486
19487 - if (is_kernel_text(addr))
19488 + if (is_kernel_text(address, address + PAGE_SIZE))
19489 prot = PAGE_KERNEL_EXEC;
19490
19491 pages_4k++;
19492 @@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
19493
19494 pud = pud_offset(pgd, va);
19495 pmd = pmd_offset(pud, va);
19496 - if (!pmd_present(*pmd))
19497 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
19498 break;
19499
19500 pte = pte_offset_kernel(pmd, va);
19501 @@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
19502
19503 static void __init pagetable_init(void)
19504 {
19505 - pgd_t *pgd_base = swapper_pg_dir;
19506 -
19507 - permanent_kmaps_init(pgd_base);
19508 + permanent_kmaps_init(swapper_pg_dir);
19509 }
19510
19511 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19512 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19513 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19514
19515 /* user-defined highmem size */
19516 @@ -757,6 +756,12 @@ void __init mem_init(void)
19517
19518 pci_iommu_alloc();
19519
19520 +#ifdef CONFIG_PAX_PER_CPU_PGD
19521 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19522 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19523 + KERNEL_PGD_PTRS);
19524 +#endif
19525 +
19526 #ifdef CONFIG_FLATMEM
19527 BUG_ON(!mem_map);
19528 #endif
19529 @@ -774,7 +779,7 @@ void __init mem_init(void)
19530 set_highmem_pages_init();
19531
19532 codesize = (unsigned long) &_etext - (unsigned long) &_text;
19533 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
19534 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
19535 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
19536
19537 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
19538 @@ -815,10 +820,10 @@ void __init mem_init(void)
19539 ((unsigned long)&__init_end -
19540 (unsigned long)&__init_begin) >> 10,
19541
19542 - (unsigned long)&_etext, (unsigned long)&_edata,
19543 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
19544 + (unsigned long)&_sdata, (unsigned long)&_edata,
19545 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
19546
19547 - (unsigned long)&_text, (unsigned long)&_etext,
19548 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
19549 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
19550
19551 /*
19552 @@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
19553 if (!kernel_set_to_readonly)
19554 return;
19555
19556 + start = ktla_ktva(start);
19557 pr_debug("Set kernel text: %lx - %lx for read write\n",
19558 start, start+size);
19559
19560 @@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
19561 if (!kernel_set_to_readonly)
19562 return;
19563
19564 + start = ktla_ktva(start);
19565 pr_debug("Set kernel text: %lx - %lx for read only\n",
19566 start, start+size);
19567
19568 @@ -938,6 +945,7 @@ void mark_rodata_ro(void)
19569 unsigned long start = PFN_ALIGN(_text);
19570 unsigned long size = PFN_ALIGN(_etext) - start;
19571
19572 + start = ktla_ktva(start);
19573 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
19574 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
19575 size >> 10);
19576 diff -urNp linux-3.0.3/arch/x86/mm/init_64.c linux-3.0.3/arch/x86/mm/init_64.c
19577 --- linux-3.0.3/arch/x86/mm/init_64.c 2011-07-21 22:17:23.000000000 -0400
19578 +++ linux-3.0.3/arch/x86/mm/init_64.c 2011-08-23 21:47:55.000000000 -0400
19579 @@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpa
19580 * around without checking the pgd every time.
19581 */
19582
19583 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
19584 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
19585 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19586
19587 int force_personality32;
19588 @@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long star
19589
19590 for (address = start; address <= end; address += PGDIR_SIZE) {
19591 const pgd_t *pgd_ref = pgd_offset_k(address);
19592 +
19593 +#ifdef CONFIG_PAX_PER_CPU_PGD
19594 + unsigned long cpu;
19595 +#else
19596 struct page *page;
19597 +#endif
19598
19599 if (pgd_none(*pgd_ref))
19600 continue;
19601
19602 spin_lock(&pgd_lock);
19603 +
19604 +#ifdef CONFIG_PAX_PER_CPU_PGD
19605 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19606 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
19607 +#else
19608 list_for_each_entry(page, &pgd_list, lru) {
19609 pgd_t *pgd;
19610 spinlock_t *pgt_lock;
19611 @@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long star
19612 /* the pgt_lock only for Xen */
19613 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
19614 spin_lock(pgt_lock);
19615 +#endif
19616
19617 if (pgd_none(*pgd))
19618 set_pgd(pgd, *pgd_ref);
19619 @@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long star
19620 BUG_ON(pgd_page_vaddr(*pgd)
19621 != pgd_page_vaddr(*pgd_ref));
19622
19623 +#ifndef CONFIG_PAX_PER_CPU_PGD
19624 spin_unlock(pgt_lock);
19625 +#endif
19626 +
19627 }
19628 spin_unlock(&pgd_lock);
19629 }
19630 @@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
19631 pmd = fill_pmd(pud, vaddr);
19632 pte = fill_pte(pmd, vaddr);
19633
19634 + pax_open_kernel();
19635 set_pte(pte, new_pte);
19636 + pax_close_kernel();
19637
19638 /*
19639 * It's enough to flush this one mapping.
19640 @@ -262,14 +278,12 @@ static void __init __init_extra_mapping(
19641 pgd = pgd_offset_k((unsigned long)__va(phys));
19642 if (pgd_none(*pgd)) {
19643 pud = (pud_t *) spp_getpage();
19644 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
19645 - _PAGE_USER));
19646 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
19647 }
19648 pud = pud_offset(pgd, (unsigned long)__va(phys));
19649 if (pud_none(*pud)) {
19650 pmd = (pmd_t *) spp_getpage();
19651 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
19652 - _PAGE_USER));
19653 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
19654 }
19655 pmd = pmd_offset(pud, phys);
19656 BUG_ON(!pmd_none(*pmd));
19657 @@ -693,6 +707,12 @@ void __init mem_init(void)
19658
19659 pci_iommu_alloc();
19660
19661 +#ifdef CONFIG_PAX_PER_CPU_PGD
19662 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19663 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19664 + KERNEL_PGD_PTRS);
19665 +#endif
19666 +
19667 /* clear_bss() already clear the empty_zero_page */
19668
19669 reservedpages = 0;
19670 @@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
19671 static struct vm_area_struct gate_vma = {
19672 .vm_start = VSYSCALL_START,
19673 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
19674 - .vm_page_prot = PAGE_READONLY_EXEC,
19675 - .vm_flags = VM_READ | VM_EXEC
19676 + .vm_page_prot = PAGE_READONLY,
19677 + .vm_flags = VM_READ
19678 };
19679
19680 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
19681 @@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long add
19682
19683 const char *arch_vma_name(struct vm_area_struct *vma)
19684 {
19685 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
19686 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
19687 return "[vdso]";
19688 if (vma == &gate_vma)
19689 return "[vsyscall]";
19690 diff -urNp linux-3.0.3/arch/x86/mm/init.c linux-3.0.3/arch/x86/mm/init.c
19691 --- linux-3.0.3/arch/x86/mm/init.c 2011-07-21 22:17:23.000000000 -0400
19692 +++ linux-3.0.3/arch/x86/mm/init.c 2011-08-23 21:48:14.000000000 -0400
19693 @@ -31,7 +31,7 @@ int direct_gbpages
19694 static void __init find_early_table_space(unsigned long end, int use_pse,
19695 int use_gbpages)
19696 {
19697 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
19698 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
19699 phys_addr_t base;
19700
19701 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
19702 @@ -313,12 +313,34 @@ unsigned long __init_refok init_memory_m
19703 */
19704 int devmem_is_allowed(unsigned long pagenr)
19705 {
19706 - if (pagenr <= 256)
19707 +#ifdef CONFIG_GRKERNSEC_KMEM
19708 + /* allow BDA */
19709 + if (!pagenr)
19710 + return 1;
19711 + /* allow EBDA */
19712 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
19713 + return 1;
19714 +#else
19715 + if (!pagenr)
19716 + return 1;
19717 +#ifdef CONFIG_VM86
19718 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
19719 + return 1;
19720 +#endif
19721 +#endif
19722 +
19723 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
19724 return 1;
19725 +#ifdef CONFIG_GRKERNSEC_KMEM
19726 + /* throw out everything else below 1MB */
19727 + if (pagenr <= 256)
19728 + return 0;
19729 +#endif
19730 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
19731 return 0;
19732 if (!page_is_ram(pagenr))
19733 return 1;
19734 +
19735 return 0;
19736 }
19737
19738 @@ -373,6 +395,86 @@ void free_init_pages(char *what, unsigne
19739
19740 void free_initmem(void)
19741 {
19742 +
19743 +#ifdef CONFIG_PAX_KERNEXEC
19744 +#ifdef CONFIG_X86_32
19745 + /* PaX: limit KERNEL_CS to actual size */
19746 + unsigned long addr, limit;
19747 + struct desc_struct d;
19748 + int cpu;
19749 +
19750 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
19751 + limit = (limit - 1UL) >> PAGE_SHIFT;
19752 +
19753 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
19754 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
19755 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
19756 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
19757 + }
19758 +
19759 + /* PaX: make KERNEL_CS read-only */
19760 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
19761 + if (!paravirt_enabled())
19762 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
19763 +/*
19764 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
19765 + pgd = pgd_offset_k(addr);
19766 + pud = pud_offset(pgd, addr);
19767 + pmd = pmd_offset(pud, addr);
19768 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19769 + }
19770 +*/
19771 +#ifdef CONFIG_X86_PAE
19772 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
19773 +/*
19774 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
19775 + pgd = pgd_offset_k(addr);
19776 + pud = pud_offset(pgd, addr);
19777 + pmd = pmd_offset(pud, addr);
19778 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19779 + }
19780 +*/
19781 +#endif
19782 +
19783 +#ifdef CONFIG_MODULES
19784 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
19785 +#endif
19786 +
19787 +#else
19788 + pgd_t *pgd;
19789 + pud_t *pud;
19790 + pmd_t *pmd;
19791 + unsigned long addr, end;
19792 +
19793 + /* PaX: make kernel code/rodata read-only, rest non-executable */
19794 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
19795 + pgd = pgd_offset_k(addr);
19796 + pud = pud_offset(pgd, addr);
19797 + pmd = pmd_offset(pud, addr);
19798 + if (!pmd_present(*pmd))
19799 + continue;
19800 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
19801 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19802 + else
19803 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19804 + }
19805 +
19806 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
19807 + end = addr + KERNEL_IMAGE_SIZE;
19808 + for (; addr < end; addr += PMD_SIZE) {
19809 + pgd = pgd_offset_k(addr);
19810 + pud = pud_offset(pgd, addr);
19811 + pmd = pmd_offset(pud, addr);
19812 + if (!pmd_present(*pmd))
19813 + continue;
19814 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
19815 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19816 + }
19817 +#endif
19818 +
19819 + flush_tlb_all();
19820 +#endif
19821 +
19822 free_init_pages("unused kernel memory",
19823 (unsigned long)(&__init_begin),
19824 (unsigned long)(&__init_end));
19825 diff -urNp linux-3.0.3/arch/x86/mm/iomap_32.c linux-3.0.3/arch/x86/mm/iomap_32.c
19826 --- linux-3.0.3/arch/x86/mm/iomap_32.c 2011-07-21 22:17:23.000000000 -0400
19827 +++ linux-3.0.3/arch/x86/mm/iomap_32.c 2011-08-23 21:47:55.000000000 -0400
19828 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
19829 type = kmap_atomic_idx_push();
19830 idx = type + KM_TYPE_NR * smp_processor_id();
19831 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19832 +
19833 + pax_open_kernel();
19834 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
19835 + pax_close_kernel();
19836 +
19837 arch_flush_lazy_mmu_mode();
19838
19839 return (void *)vaddr;
19840 diff -urNp linux-3.0.3/arch/x86/mm/ioremap.c linux-3.0.3/arch/x86/mm/ioremap.c
19841 --- linux-3.0.3/arch/x86/mm/ioremap.c 2011-07-21 22:17:23.000000000 -0400
19842 +++ linux-3.0.3/arch/x86/mm/ioremap.c 2011-08-23 21:47:55.000000000 -0400
19843 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(re
19844 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
19845 int is_ram = page_is_ram(pfn);
19846
19847 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
19848 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
19849 return NULL;
19850 WARN_ON_ONCE(is_ram);
19851 }
19852 @@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
19853 early_param("early_ioremap_debug", early_ioremap_debug_setup);
19854
19855 static __initdata int after_paging_init;
19856 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
19857 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
19858
19859 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
19860 {
19861 @@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
19862 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
19863
19864 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
19865 - memset(bm_pte, 0, sizeof(bm_pte));
19866 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
19867 + pmd_populate_user(&init_mm, pmd, bm_pte);
19868
19869 /*
19870 * The boot-ioremap range spans multiple pmds, for which
19871 diff -urNp linux-3.0.3/arch/x86/mm/kmemcheck/kmemcheck.c linux-3.0.3/arch/x86/mm/kmemcheck/kmemcheck.c
19872 --- linux-3.0.3/arch/x86/mm/kmemcheck/kmemcheck.c 2011-07-21 22:17:23.000000000 -0400
19873 +++ linux-3.0.3/arch/x86/mm/kmemcheck/kmemcheck.c 2011-08-23 21:47:55.000000000 -0400
19874 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
19875 * memory (e.g. tracked pages)? For now, we need this to avoid
19876 * invoking kmemcheck for PnP BIOS calls.
19877 */
19878 - if (regs->flags & X86_VM_MASK)
19879 + if (v8086_mode(regs))
19880 return false;
19881 - if (regs->cs != __KERNEL_CS)
19882 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
19883 return false;
19884
19885 pte = kmemcheck_pte_lookup(address);
19886 diff -urNp linux-3.0.3/arch/x86/mm/mmap.c linux-3.0.3/arch/x86/mm/mmap.c
19887 --- linux-3.0.3/arch/x86/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
19888 +++ linux-3.0.3/arch/x86/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
19889 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
19890 * Leave an at least ~128 MB hole with possible stack randomization.
19891 */
19892 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
19893 -#define MAX_GAP (TASK_SIZE/6*5)
19894 +#define MAX_GAP (pax_task_size/6*5)
19895
19896 /*
19897 * True on X86_32 or when emulating IA32 on X86_64
19898 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
19899 return rnd << PAGE_SHIFT;
19900 }
19901
19902 -static unsigned long mmap_base(void)
19903 +static unsigned long mmap_base(struct mm_struct *mm)
19904 {
19905 unsigned long gap = rlimit(RLIMIT_STACK);
19906 + unsigned long pax_task_size = TASK_SIZE;
19907 +
19908 +#ifdef CONFIG_PAX_SEGMEXEC
19909 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19910 + pax_task_size = SEGMEXEC_TASK_SIZE;
19911 +#endif
19912
19913 if (gap < MIN_GAP)
19914 gap = MIN_GAP;
19915 else if (gap > MAX_GAP)
19916 gap = MAX_GAP;
19917
19918 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
19919 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
19920 }
19921
19922 /*
19923 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
19924 * does, but not when emulating X86_32
19925 */
19926 -static unsigned long mmap_legacy_base(void)
19927 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
19928 {
19929 - if (mmap_is_ia32())
19930 + if (mmap_is_ia32()) {
19931 +
19932 +#ifdef CONFIG_PAX_SEGMEXEC
19933 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19934 + return SEGMEXEC_TASK_UNMAPPED_BASE;
19935 + else
19936 +#endif
19937 +
19938 return TASK_UNMAPPED_BASE;
19939 - else
19940 + } else
19941 return TASK_UNMAPPED_BASE + mmap_rnd();
19942 }
19943
19944 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
19945 void arch_pick_mmap_layout(struct mm_struct *mm)
19946 {
19947 if (mmap_is_legacy()) {
19948 - mm->mmap_base = mmap_legacy_base();
19949 + mm->mmap_base = mmap_legacy_base(mm);
19950 +
19951 +#ifdef CONFIG_PAX_RANDMMAP
19952 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19953 + mm->mmap_base += mm->delta_mmap;
19954 +#endif
19955 +
19956 mm->get_unmapped_area = arch_get_unmapped_area;
19957 mm->unmap_area = arch_unmap_area;
19958 } else {
19959 - mm->mmap_base = mmap_base();
19960 + mm->mmap_base = mmap_base(mm);
19961 +
19962 +#ifdef CONFIG_PAX_RANDMMAP
19963 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19964 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
19965 +#endif
19966 +
19967 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
19968 mm->unmap_area = arch_unmap_area_topdown;
19969 }
19970 diff -urNp linux-3.0.3/arch/x86/mm/mmio-mod.c linux-3.0.3/arch/x86/mm/mmio-mod.c
19971 --- linux-3.0.3/arch/x86/mm/mmio-mod.c 2011-07-21 22:17:23.000000000 -0400
19972 +++ linux-3.0.3/arch/x86/mm/mmio-mod.c 2011-08-23 21:47:55.000000000 -0400
19973 @@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
19974 break;
19975 default:
19976 {
19977 - unsigned char *ip = (unsigned char *)instptr;
19978 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
19979 my_trace->opcode = MMIO_UNKNOWN_OP;
19980 my_trace->width = 0;
19981 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
19982 @@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
19983 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
19984 void __iomem *addr)
19985 {
19986 - static atomic_t next_id;
19987 + static atomic_unchecked_t next_id;
19988 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
19989 /* These are page-unaligned. */
19990 struct mmiotrace_map map = {
19991 @@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
19992 .private = trace
19993 },
19994 .phys = offset,
19995 - .id = atomic_inc_return(&next_id)
19996 + .id = atomic_inc_return_unchecked(&next_id)
19997 };
19998 map.map_id = trace->id;
19999
20000 diff -urNp linux-3.0.3/arch/x86/mm/pageattr.c linux-3.0.3/arch/x86/mm/pageattr.c
20001 --- linux-3.0.3/arch/x86/mm/pageattr.c 2011-07-21 22:17:23.000000000 -0400
20002 +++ linux-3.0.3/arch/x86/mm/pageattr.c 2011-08-23 21:47:55.000000000 -0400
20003 @@ -261,7 +261,7 @@ static inline pgprot_t static_protection
20004 */
20005 #ifdef CONFIG_PCI_BIOS
20006 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
20007 - pgprot_val(forbidden) |= _PAGE_NX;
20008 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20009 #endif
20010
20011 /*
20012 @@ -269,9 +269,10 @@ static inline pgprot_t static_protection
20013 * Does not cover __inittext since that is gone later on. On
20014 * 64bit we do not enforce !NX on the low mapping
20015 */
20016 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
20017 - pgprot_val(forbidden) |= _PAGE_NX;
20018 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
20019 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20020
20021 +#ifdef CONFIG_DEBUG_RODATA
20022 /*
20023 * The .rodata section needs to be read-only. Using the pfn
20024 * catches all aliases.
20025 @@ -279,6 +280,7 @@ static inline pgprot_t static_protection
20026 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
20027 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
20028 pgprot_val(forbidden) |= _PAGE_RW;
20029 +#endif
20030
20031 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
20032 /*
20033 @@ -317,6 +319,13 @@ static inline pgprot_t static_protection
20034 }
20035 #endif
20036
20037 +#ifdef CONFIG_PAX_KERNEXEC
20038 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
20039 + pgprot_val(forbidden) |= _PAGE_RW;
20040 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20041 + }
20042 +#endif
20043 +
20044 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
20045
20046 return prot;
20047 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
20048 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
20049 {
20050 /* change init_mm */
20051 + pax_open_kernel();
20052 set_pte_atomic(kpte, pte);
20053 +
20054 #ifdef CONFIG_X86_32
20055 if (!SHARED_KERNEL_PMD) {
20056 +
20057 +#ifdef CONFIG_PAX_PER_CPU_PGD
20058 + unsigned long cpu;
20059 +#else
20060 struct page *page;
20061 +#endif
20062
20063 +#ifdef CONFIG_PAX_PER_CPU_PGD
20064 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20065 + pgd_t *pgd = get_cpu_pgd(cpu);
20066 +#else
20067 list_for_each_entry(page, &pgd_list, lru) {
20068 - pgd_t *pgd;
20069 + pgd_t *pgd = (pgd_t *)page_address(page);
20070 +#endif
20071 +
20072 pud_t *pud;
20073 pmd_t *pmd;
20074
20075 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
20076 + pgd += pgd_index(address);
20077 pud = pud_offset(pgd, address);
20078 pmd = pmd_offset(pud, address);
20079 set_pte_atomic((pte_t *)pmd, pte);
20080 }
20081 }
20082 #endif
20083 + pax_close_kernel();
20084 }
20085
20086 static int
20087 diff -urNp linux-3.0.3/arch/x86/mm/pageattr-test.c linux-3.0.3/arch/x86/mm/pageattr-test.c
20088 --- linux-3.0.3/arch/x86/mm/pageattr-test.c 2011-07-21 22:17:23.000000000 -0400
20089 +++ linux-3.0.3/arch/x86/mm/pageattr-test.c 2011-08-23 21:47:55.000000000 -0400
20090 @@ -36,7 +36,7 @@ enum {
20091
20092 static int pte_testbit(pte_t pte)
20093 {
20094 - return pte_flags(pte) & _PAGE_UNUSED1;
20095 + return pte_flags(pte) & _PAGE_CPA_TEST;
20096 }
20097
20098 struct split_state {
20099 diff -urNp linux-3.0.3/arch/x86/mm/pat.c linux-3.0.3/arch/x86/mm/pat.c
20100 --- linux-3.0.3/arch/x86/mm/pat.c 2011-07-21 22:17:23.000000000 -0400
20101 +++ linux-3.0.3/arch/x86/mm/pat.c 2011-08-23 21:47:55.000000000 -0400
20102 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
20103
20104 if (!entry) {
20105 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
20106 - current->comm, current->pid, start, end);
20107 + current->comm, task_pid_nr(current), start, end);
20108 return -EINVAL;
20109 }
20110
20111 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
20112 while (cursor < to) {
20113 if (!devmem_is_allowed(pfn)) {
20114 printk(KERN_INFO
20115 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
20116 - current->comm, from, to);
20117 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
20118 + current->comm, from, to, cursor);
20119 return 0;
20120 }
20121 cursor += PAGE_SIZE;
20122 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
20123 printk(KERN_INFO
20124 "%s:%d ioremap_change_attr failed %s "
20125 "for %Lx-%Lx\n",
20126 - current->comm, current->pid,
20127 + current->comm, task_pid_nr(current),
20128 cattr_name(flags),
20129 base, (unsigned long long)(base + size));
20130 return -EINVAL;
20131 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
20132 if (want_flags != flags) {
20133 printk(KERN_WARNING
20134 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
20135 - current->comm, current->pid,
20136 + current->comm, task_pid_nr(current),
20137 cattr_name(want_flags),
20138 (unsigned long long)paddr,
20139 (unsigned long long)(paddr + size),
20140 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
20141 free_memtype(paddr, paddr + size);
20142 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
20143 " for %Lx-%Lx, got %s\n",
20144 - current->comm, current->pid,
20145 + current->comm, task_pid_nr(current),
20146 cattr_name(want_flags),
20147 (unsigned long long)paddr,
20148 (unsigned long long)(paddr + size),
20149 diff -urNp linux-3.0.3/arch/x86/mm/pf_in.c linux-3.0.3/arch/x86/mm/pf_in.c
20150 --- linux-3.0.3/arch/x86/mm/pf_in.c 2011-07-21 22:17:23.000000000 -0400
20151 +++ linux-3.0.3/arch/x86/mm/pf_in.c 2011-08-23 21:47:55.000000000 -0400
20152 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
20153 int i;
20154 enum reason_type rv = OTHERS;
20155
20156 - p = (unsigned char *)ins_addr;
20157 + p = (unsigned char *)ktla_ktva(ins_addr);
20158 p += skip_prefix(p, &prf);
20159 p += get_opcode(p, &opcode);
20160
20161 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
20162 struct prefix_bits prf;
20163 int i;
20164
20165 - p = (unsigned char *)ins_addr;
20166 + p = (unsigned char *)ktla_ktva(ins_addr);
20167 p += skip_prefix(p, &prf);
20168 p += get_opcode(p, &opcode);
20169
20170 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
20171 struct prefix_bits prf;
20172 int i;
20173
20174 - p = (unsigned char *)ins_addr;
20175 + p = (unsigned char *)ktla_ktva(ins_addr);
20176 p += skip_prefix(p, &prf);
20177 p += get_opcode(p, &opcode);
20178
20179 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned l
20180 struct prefix_bits prf;
20181 int i;
20182
20183 - p = (unsigned char *)ins_addr;
20184 + p = (unsigned char *)ktla_ktva(ins_addr);
20185 p += skip_prefix(p, &prf);
20186 p += get_opcode(p, &opcode);
20187 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
20188 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned l
20189 struct prefix_bits prf;
20190 int i;
20191
20192 - p = (unsigned char *)ins_addr;
20193 + p = (unsigned char *)ktla_ktva(ins_addr);
20194 p += skip_prefix(p, &prf);
20195 p += get_opcode(p, &opcode);
20196 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
20197 diff -urNp linux-3.0.3/arch/x86/mm/pgtable_32.c linux-3.0.3/arch/x86/mm/pgtable_32.c
20198 --- linux-3.0.3/arch/x86/mm/pgtable_32.c 2011-07-21 22:17:23.000000000 -0400
20199 +++ linux-3.0.3/arch/x86/mm/pgtable_32.c 2011-08-23 21:47:55.000000000 -0400
20200 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
20201 return;
20202 }
20203 pte = pte_offset_kernel(pmd, vaddr);
20204 +
20205 + pax_open_kernel();
20206 if (pte_val(pteval))
20207 set_pte_at(&init_mm, vaddr, pte, pteval);
20208 else
20209 pte_clear(&init_mm, vaddr, pte);
20210 + pax_close_kernel();
20211
20212 /*
20213 * It's enough to flush this one mapping.
20214 diff -urNp linux-3.0.3/arch/x86/mm/pgtable.c linux-3.0.3/arch/x86/mm/pgtable.c
20215 --- linux-3.0.3/arch/x86/mm/pgtable.c 2011-07-21 22:17:23.000000000 -0400
20216 +++ linux-3.0.3/arch/x86/mm/pgtable.c 2011-08-23 21:47:55.000000000 -0400
20217 @@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
20218 list_del(&page->lru);
20219 }
20220
20221 -#define UNSHARED_PTRS_PER_PGD \
20222 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20223 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20224 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
20225
20226 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20227 +{
20228 + while (count--)
20229 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
20230 +}
20231 +#endif
20232 +
20233 +#ifdef CONFIG_PAX_PER_CPU_PGD
20234 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20235 +{
20236 + while (count--)
20237 +
20238 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20239 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
20240 +#else
20241 + *dst++ = *src++;
20242 +#endif
20243
20244 +}
20245 +#endif
20246 +
20247 +#ifdef CONFIG_X86_64
20248 +#define pxd_t pud_t
20249 +#define pyd_t pgd_t
20250 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
20251 +#define pxd_free(mm, pud) pud_free((mm), (pud))
20252 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
20253 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
20254 +#define PYD_SIZE PGDIR_SIZE
20255 +#else
20256 +#define pxd_t pmd_t
20257 +#define pyd_t pud_t
20258 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
20259 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
20260 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
20261 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
20262 +#define PYD_SIZE PUD_SIZE
20263 +#endif
20264 +
20265 +#ifdef CONFIG_PAX_PER_CPU_PGD
20266 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
20267 +static inline void pgd_dtor(pgd_t *pgd) {}
20268 +#else
20269 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
20270 {
20271 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
20272 @@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
20273 pgd_list_del(pgd);
20274 spin_unlock(&pgd_lock);
20275 }
20276 +#endif
20277
20278 /*
20279 * List of all pgd's needed for non-PAE so it can invalidate entries
20280 @@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
20281 * -- wli
20282 */
20283
20284 -#ifdef CONFIG_X86_PAE
20285 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
20286 /*
20287 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
20288 * updating the top-level pagetable entries to guarantee the
20289 @@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
20290 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
20291 * and initialize the kernel pmds here.
20292 */
20293 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
20294 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20295
20296 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
20297 {
20298 @@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
20299 */
20300 flush_tlb_mm(mm);
20301 }
20302 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
20303 +#define PREALLOCATED_PXDS USER_PGD_PTRS
20304 #else /* !CONFIG_X86_PAE */
20305
20306 /* No need to prepopulate any pagetable entries in non-PAE modes. */
20307 -#define PREALLOCATED_PMDS 0
20308 +#define PREALLOCATED_PXDS 0
20309
20310 #endif /* CONFIG_X86_PAE */
20311
20312 -static void free_pmds(pmd_t *pmds[])
20313 +static void free_pxds(pxd_t *pxds[])
20314 {
20315 int i;
20316
20317 - for(i = 0; i < PREALLOCATED_PMDS; i++)
20318 - if (pmds[i])
20319 - free_page((unsigned long)pmds[i]);
20320 + for(i = 0; i < PREALLOCATED_PXDS; i++)
20321 + if (pxds[i])
20322 + free_page((unsigned long)pxds[i]);
20323 }
20324
20325 -static int preallocate_pmds(pmd_t *pmds[])
20326 +static int preallocate_pxds(pxd_t *pxds[])
20327 {
20328 int i;
20329 bool failed = false;
20330
20331 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
20332 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
20333 - if (pmd == NULL)
20334 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
20335 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
20336 + if (pxd == NULL)
20337 failed = true;
20338 - pmds[i] = pmd;
20339 + pxds[i] = pxd;
20340 }
20341
20342 if (failed) {
20343 - free_pmds(pmds);
20344 + free_pxds(pxds);
20345 return -ENOMEM;
20346 }
20347
20348 @@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
20349 * preallocate which never got a corresponding vma will need to be
20350 * freed manually.
20351 */
20352 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
20353 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
20354 {
20355 int i;
20356
20357 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
20358 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
20359 pgd_t pgd = pgdp[i];
20360
20361 if (pgd_val(pgd) != 0) {
20362 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
20363 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
20364
20365 - pgdp[i] = native_make_pgd(0);
20366 + set_pgd(pgdp + i, native_make_pgd(0));
20367
20368 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
20369 - pmd_free(mm, pmd);
20370 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
20371 + pxd_free(mm, pxd);
20372 }
20373 }
20374 }
20375
20376 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
20377 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
20378 {
20379 - pud_t *pud;
20380 + pyd_t *pyd;
20381 unsigned long addr;
20382 int i;
20383
20384 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
20385 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
20386 return;
20387
20388 - pud = pud_offset(pgd, 0);
20389 +#ifdef CONFIG_X86_64
20390 + pyd = pyd_offset(mm, 0L);
20391 +#else
20392 + pyd = pyd_offset(pgd, 0L);
20393 +#endif
20394
20395 - for (addr = i = 0; i < PREALLOCATED_PMDS;
20396 - i++, pud++, addr += PUD_SIZE) {
20397 - pmd_t *pmd = pmds[i];
20398 + for (addr = i = 0; i < PREALLOCATED_PXDS;
20399 + i++, pyd++, addr += PYD_SIZE) {
20400 + pxd_t *pxd = pxds[i];
20401
20402 if (i >= KERNEL_PGD_BOUNDARY)
20403 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20404 - sizeof(pmd_t) * PTRS_PER_PMD);
20405 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20406 + sizeof(pxd_t) * PTRS_PER_PMD);
20407
20408 - pud_populate(mm, pud, pmd);
20409 + pyd_populate(mm, pyd, pxd);
20410 }
20411 }
20412
20413 pgd_t *pgd_alloc(struct mm_struct *mm)
20414 {
20415 pgd_t *pgd;
20416 - pmd_t *pmds[PREALLOCATED_PMDS];
20417 + pxd_t *pxds[PREALLOCATED_PXDS];
20418
20419 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
20420
20421 @@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20422
20423 mm->pgd = pgd;
20424
20425 - if (preallocate_pmds(pmds) != 0)
20426 + if (preallocate_pxds(pxds) != 0)
20427 goto out_free_pgd;
20428
20429 if (paravirt_pgd_alloc(mm) != 0)
20430 - goto out_free_pmds;
20431 + goto out_free_pxds;
20432
20433 /*
20434 * Make sure that pre-populating the pmds is atomic with
20435 @@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20436 spin_lock(&pgd_lock);
20437
20438 pgd_ctor(mm, pgd);
20439 - pgd_prepopulate_pmd(mm, pgd, pmds);
20440 + pgd_prepopulate_pxd(mm, pgd, pxds);
20441
20442 spin_unlock(&pgd_lock);
20443
20444 return pgd;
20445
20446 -out_free_pmds:
20447 - free_pmds(pmds);
20448 +out_free_pxds:
20449 + free_pxds(pxds);
20450 out_free_pgd:
20451 free_page((unsigned long)pgd);
20452 out:
20453 @@ -295,7 +344,7 @@ out:
20454
20455 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
20456 {
20457 - pgd_mop_up_pmds(mm, pgd);
20458 + pgd_mop_up_pxds(mm, pgd);
20459 pgd_dtor(pgd);
20460 paravirt_pgd_free(mm, pgd);
20461 free_page((unsigned long)pgd);
20462 diff -urNp linux-3.0.3/arch/x86/mm/setup_nx.c linux-3.0.3/arch/x86/mm/setup_nx.c
20463 --- linux-3.0.3/arch/x86/mm/setup_nx.c 2011-07-21 22:17:23.000000000 -0400
20464 +++ linux-3.0.3/arch/x86/mm/setup_nx.c 2011-08-23 21:47:55.000000000 -0400
20465 @@ -5,8 +5,10 @@
20466 #include <asm/pgtable.h>
20467 #include <asm/proto.h>
20468
20469 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20470 static int disable_nx __cpuinitdata;
20471
20472 +#ifndef CONFIG_PAX_PAGEEXEC
20473 /*
20474 * noexec = on|off
20475 *
20476 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
20477 return 0;
20478 }
20479 early_param("noexec", noexec_setup);
20480 +#endif
20481 +
20482 +#endif
20483
20484 void __cpuinit x86_configure_nx(void)
20485 {
20486 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20487 if (cpu_has_nx && !disable_nx)
20488 __supported_pte_mask |= _PAGE_NX;
20489 else
20490 +#endif
20491 __supported_pte_mask &= ~_PAGE_NX;
20492 }
20493
20494 diff -urNp linux-3.0.3/arch/x86/mm/tlb.c linux-3.0.3/arch/x86/mm/tlb.c
20495 --- linux-3.0.3/arch/x86/mm/tlb.c 2011-07-21 22:17:23.000000000 -0400
20496 +++ linux-3.0.3/arch/x86/mm/tlb.c 2011-08-23 21:47:55.000000000 -0400
20497 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
20498 BUG();
20499 cpumask_clear_cpu(cpu,
20500 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
20501 +
20502 +#ifndef CONFIG_PAX_PER_CPU_PGD
20503 load_cr3(swapper_pg_dir);
20504 +#endif
20505 +
20506 }
20507 EXPORT_SYMBOL_GPL(leave_mm);
20508
20509 diff -urNp linux-3.0.3/arch/x86/net/bpf_jit_comp.c linux-3.0.3/arch/x86/net/bpf_jit_comp.c
20510 --- linux-3.0.3/arch/x86/net/bpf_jit_comp.c 2011-07-21 22:17:23.000000000 -0400
20511 +++ linux-3.0.3/arch/x86/net/bpf_jit_comp.c 2011-08-23 21:47:55.000000000 -0400
20512 @@ -589,7 +589,9 @@ cond_branch: f_offset = addrs[i + filt
20513 module_free(NULL, image);
20514 return;
20515 }
20516 + pax_open_kernel();
20517 memcpy(image + proglen, temp, ilen);
20518 + pax_close_kernel();
20519 }
20520 proglen += ilen;
20521 addrs[i] = proglen;
20522 @@ -609,7 +611,7 @@ cond_branch: f_offset = addrs[i + filt
20523 break;
20524 }
20525 if (proglen == oldproglen) {
20526 - image = module_alloc(max_t(unsigned int,
20527 + image = module_alloc_exec(max_t(unsigned int,
20528 proglen,
20529 sizeof(struct work_struct)));
20530 if (!image)
20531 diff -urNp linux-3.0.3/arch/x86/oprofile/backtrace.c linux-3.0.3/arch/x86/oprofile/backtrace.c
20532 --- linux-3.0.3/arch/x86/oprofile/backtrace.c 2011-08-23 21:44:40.000000000 -0400
20533 +++ linux-3.0.3/arch/x86/oprofile/backtrace.c 2011-08-23 21:47:55.000000000 -0400
20534 @@ -148,7 +148,7 @@ x86_backtrace(struct pt_regs * const reg
20535 {
20536 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
20537
20538 - if (!user_mode_vm(regs)) {
20539 + if (!user_mode(regs)) {
20540 unsigned long stack = kernel_stack_pointer(regs);
20541 if (depth)
20542 dump_trace(NULL, regs, (unsigned long *)stack, 0,
20543 diff -urNp linux-3.0.3/arch/x86/pci/mrst.c linux-3.0.3/arch/x86/pci/mrst.c
20544 --- linux-3.0.3/arch/x86/pci/mrst.c 2011-07-21 22:17:23.000000000 -0400
20545 +++ linux-3.0.3/arch/x86/pci/mrst.c 2011-08-23 21:47:55.000000000 -0400
20546 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
20547 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
20548 pci_mmcfg_late_init();
20549 pcibios_enable_irq = mrst_pci_irq_enable;
20550 - pci_root_ops = pci_mrst_ops;
20551 + pax_open_kernel();
20552 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
20553 + pax_close_kernel();
20554 /* Continue with standard init */
20555 return 1;
20556 }
20557 diff -urNp linux-3.0.3/arch/x86/pci/pcbios.c linux-3.0.3/arch/x86/pci/pcbios.c
20558 --- linux-3.0.3/arch/x86/pci/pcbios.c 2011-07-21 22:17:23.000000000 -0400
20559 +++ linux-3.0.3/arch/x86/pci/pcbios.c 2011-08-23 21:47:55.000000000 -0400
20560 @@ -79,50 +79,93 @@ union bios32 {
20561 static struct {
20562 unsigned long address;
20563 unsigned short segment;
20564 -} bios32_indirect = { 0, __KERNEL_CS };
20565 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
20566
20567 /*
20568 * Returns the entry point for the given service, NULL on error
20569 */
20570
20571 -static unsigned long bios32_service(unsigned long service)
20572 +static unsigned long __devinit bios32_service(unsigned long service)
20573 {
20574 unsigned char return_code; /* %al */
20575 unsigned long address; /* %ebx */
20576 unsigned long length; /* %ecx */
20577 unsigned long entry; /* %edx */
20578 unsigned long flags;
20579 + struct desc_struct d, *gdt;
20580
20581 local_irq_save(flags);
20582 - __asm__("lcall *(%%edi); cld"
20583 +
20584 + gdt = get_cpu_gdt_table(smp_processor_id());
20585 +
20586 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
20587 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20588 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
20589 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20590 +
20591 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
20592 : "=a" (return_code),
20593 "=b" (address),
20594 "=c" (length),
20595 "=d" (entry)
20596 : "0" (service),
20597 "1" (0),
20598 - "D" (&bios32_indirect));
20599 + "D" (&bios32_indirect),
20600 + "r"(__PCIBIOS_DS)
20601 + : "memory");
20602 +
20603 + pax_open_kernel();
20604 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
20605 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
20606 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
20607 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
20608 + pax_close_kernel();
20609 +
20610 local_irq_restore(flags);
20611
20612 switch (return_code) {
20613 - case 0:
20614 - return address + entry;
20615 - case 0x80: /* Not present */
20616 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20617 - return 0;
20618 - default: /* Shouldn't happen */
20619 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20620 - service, return_code);
20621 + case 0: {
20622 + int cpu;
20623 + unsigned char flags;
20624 +
20625 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
20626 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
20627 + printk(KERN_WARNING "bios32_service: not valid\n");
20628 return 0;
20629 + }
20630 + address = address + PAGE_OFFSET;
20631 + length += 16UL; /* some BIOSs underreport this... */
20632 + flags = 4;
20633 + if (length >= 64*1024*1024) {
20634 + length >>= PAGE_SHIFT;
20635 + flags |= 8;
20636 + }
20637 +
20638 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
20639 + gdt = get_cpu_gdt_table(cpu);
20640 + pack_descriptor(&d, address, length, 0x9b, flags);
20641 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20642 + pack_descriptor(&d, address, length, 0x93, flags);
20643 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20644 + }
20645 + return entry;
20646 + }
20647 + case 0x80: /* Not present */
20648 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20649 + return 0;
20650 + default: /* Shouldn't happen */
20651 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20652 + service, return_code);
20653 + return 0;
20654 }
20655 }
20656
20657 static struct {
20658 unsigned long address;
20659 unsigned short segment;
20660 -} pci_indirect = { 0, __KERNEL_CS };
20661 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
20662
20663 -static int pci_bios_present;
20664 +static int pci_bios_present __read_only;
20665
20666 static int __devinit check_pcibios(void)
20667 {
20668 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
20669 unsigned long flags, pcibios_entry;
20670
20671 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
20672 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
20673 + pci_indirect.address = pcibios_entry;
20674
20675 local_irq_save(flags);
20676 - __asm__(
20677 - "lcall *(%%edi); cld\n\t"
20678 + __asm__("movw %w6, %%ds\n\t"
20679 + "lcall *%%ss:(%%edi); cld\n\t"
20680 + "push %%ss\n\t"
20681 + "pop %%ds\n\t"
20682 "jc 1f\n\t"
20683 "xor %%ah, %%ah\n"
20684 "1:"
20685 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
20686 "=b" (ebx),
20687 "=c" (ecx)
20688 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
20689 - "D" (&pci_indirect)
20690 + "D" (&pci_indirect),
20691 + "r" (__PCIBIOS_DS)
20692 : "memory");
20693 local_irq_restore(flags);
20694
20695 @@ -188,7 +234,10 @@ static int pci_bios_read(unsigned int se
20696
20697 switch (len) {
20698 case 1:
20699 - __asm__("lcall *(%%esi); cld\n\t"
20700 + __asm__("movw %w6, %%ds\n\t"
20701 + "lcall *%%ss:(%%esi); cld\n\t"
20702 + "push %%ss\n\t"
20703 + "pop %%ds\n\t"
20704 "jc 1f\n\t"
20705 "xor %%ah, %%ah\n"
20706 "1:"
20707 @@ -197,7 +246,8 @@ static int pci_bios_read(unsigned int se
20708 : "1" (PCIBIOS_READ_CONFIG_BYTE),
20709 "b" (bx),
20710 "D" ((long)reg),
20711 - "S" (&pci_indirect));
20712 + "S" (&pci_indirect),
20713 + "r" (__PCIBIOS_DS));
20714 /*
20715 * Zero-extend the result beyond 8 bits, do not trust the
20716 * BIOS having done it:
20717 @@ -205,7 +255,10 @@ static int pci_bios_read(unsigned int se
20718 *value &= 0xff;
20719 break;
20720 case 2:
20721 - __asm__("lcall *(%%esi); cld\n\t"
20722 + __asm__("movw %w6, %%ds\n\t"
20723 + "lcall *%%ss:(%%esi); cld\n\t"
20724 + "push %%ss\n\t"
20725 + "pop %%ds\n\t"
20726 "jc 1f\n\t"
20727 "xor %%ah, %%ah\n"
20728 "1:"
20729 @@ -214,7 +267,8 @@ static int pci_bios_read(unsigned int se
20730 : "1" (PCIBIOS_READ_CONFIG_WORD),
20731 "b" (bx),
20732 "D" ((long)reg),
20733 - "S" (&pci_indirect));
20734 + "S" (&pci_indirect),
20735 + "r" (__PCIBIOS_DS));
20736 /*
20737 * Zero-extend the result beyond 16 bits, do not trust the
20738 * BIOS having done it:
20739 @@ -222,7 +276,10 @@ static int pci_bios_read(unsigned int se
20740 *value &= 0xffff;
20741 break;
20742 case 4:
20743 - __asm__("lcall *(%%esi); cld\n\t"
20744 + __asm__("movw %w6, %%ds\n\t"
20745 + "lcall *%%ss:(%%esi); cld\n\t"
20746 + "push %%ss\n\t"
20747 + "pop %%ds\n\t"
20748 "jc 1f\n\t"
20749 "xor %%ah, %%ah\n"
20750 "1:"
20751 @@ -231,7 +288,8 @@ static int pci_bios_read(unsigned int se
20752 : "1" (PCIBIOS_READ_CONFIG_DWORD),
20753 "b" (bx),
20754 "D" ((long)reg),
20755 - "S" (&pci_indirect));
20756 + "S" (&pci_indirect),
20757 + "r" (__PCIBIOS_DS));
20758 break;
20759 }
20760
20761 @@ -254,7 +312,10 @@ static int pci_bios_write(unsigned int s
20762
20763 switch (len) {
20764 case 1:
20765 - __asm__("lcall *(%%esi); cld\n\t"
20766 + __asm__("movw %w6, %%ds\n\t"
20767 + "lcall *%%ss:(%%esi); cld\n\t"
20768 + "push %%ss\n\t"
20769 + "pop %%ds\n\t"
20770 "jc 1f\n\t"
20771 "xor %%ah, %%ah\n"
20772 "1:"
20773 @@ -263,10 +324,14 @@ static int pci_bios_write(unsigned int s
20774 "c" (value),
20775 "b" (bx),
20776 "D" ((long)reg),
20777 - "S" (&pci_indirect));
20778 + "S" (&pci_indirect),
20779 + "r" (__PCIBIOS_DS));
20780 break;
20781 case 2:
20782 - __asm__("lcall *(%%esi); cld\n\t"
20783 + __asm__("movw %w6, %%ds\n\t"
20784 + "lcall *%%ss:(%%esi); cld\n\t"
20785 + "push %%ss\n\t"
20786 + "pop %%ds\n\t"
20787 "jc 1f\n\t"
20788 "xor %%ah, %%ah\n"
20789 "1:"
20790 @@ -275,10 +340,14 @@ static int pci_bios_write(unsigned int s
20791 "c" (value),
20792 "b" (bx),
20793 "D" ((long)reg),
20794 - "S" (&pci_indirect));
20795 + "S" (&pci_indirect),
20796 + "r" (__PCIBIOS_DS));
20797 break;
20798 case 4:
20799 - __asm__("lcall *(%%esi); cld\n\t"
20800 + __asm__("movw %w6, %%ds\n\t"
20801 + "lcall *%%ss:(%%esi); cld\n\t"
20802 + "push %%ss\n\t"
20803 + "pop %%ds\n\t"
20804 "jc 1f\n\t"
20805 "xor %%ah, %%ah\n"
20806 "1:"
20807 @@ -287,7 +356,8 @@ static int pci_bios_write(unsigned int s
20808 "c" (value),
20809 "b" (bx),
20810 "D" ((long)reg),
20811 - "S" (&pci_indirect));
20812 + "S" (&pci_indirect),
20813 + "r" (__PCIBIOS_DS));
20814 break;
20815 }
20816
20817 @@ -392,10 +462,13 @@ struct irq_routing_table * pcibios_get_i
20818
20819 DBG("PCI: Fetching IRQ routing table... ");
20820 __asm__("push %%es\n\t"
20821 + "movw %w8, %%ds\n\t"
20822 "push %%ds\n\t"
20823 "pop %%es\n\t"
20824 - "lcall *(%%esi); cld\n\t"
20825 + "lcall *%%ss:(%%esi); cld\n\t"
20826 "pop %%es\n\t"
20827 + "push %%ss\n\t"
20828 + "pop %%ds\n"
20829 "jc 1f\n\t"
20830 "xor %%ah, %%ah\n"
20831 "1:"
20832 @@ -406,7 +479,8 @@ struct irq_routing_table * pcibios_get_i
20833 "1" (0),
20834 "D" ((long) &opt),
20835 "S" (&pci_indirect),
20836 - "m" (opt)
20837 + "m" (opt),
20838 + "r" (__PCIBIOS_DS)
20839 : "memory");
20840 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
20841 if (ret & 0xff00)
20842 @@ -430,7 +504,10 @@ int pcibios_set_irq_routing(struct pci_d
20843 {
20844 int ret;
20845
20846 - __asm__("lcall *(%%esi); cld\n\t"
20847 + __asm__("movw %w5, %%ds\n\t"
20848 + "lcall *%%ss:(%%esi); cld\n\t"
20849 + "push %%ss\n\t"
20850 + "pop %%ds\n"
20851 "jc 1f\n\t"
20852 "xor %%ah, %%ah\n"
20853 "1:"
20854 @@ -438,7 +515,8 @@ int pcibios_set_irq_routing(struct pci_d
20855 : "0" (PCIBIOS_SET_PCI_HW_INT),
20856 "b" ((dev->bus->number << 8) | dev->devfn),
20857 "c" ((irq << 8) | (pin + 10)),
20858 - "S" (&pci_indirect));
20859 + "S" (&pci_indirect),
20860 + "r" (__PCIBIOS_DS));
20861 return !(ret & 0xff00);
20862 }
20863 EXPORT_SYMBOL(pcibios_set_irq_routing);
20864 diff -urNp linux-3.0.3/arch/x86/platform/efi/efi_32.c linux-3.0.3/arch/x86/platform/efi/efi_32.c
20865 --- linux-3.0.3/arch/x86/platform/efi/efi_32.c 2011-07-21 22:17:23.000000000 -0400
20866 +++ linux-3.0.3/arch/x86/platform/efi/efi_32.c 2011-08-23 21:47:55.000000000 -0400
20867 @@ -38,70 +38,37 @@
20868 */
20869
20870 static unsigned long efi_rt_eflags;
20871 -static pgd_t efi_bak_pg_dir_pointer[2];
20872 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
20873
20874 -void efi_call_phys_prelog(void)
20875 +void __init efi_call_phys_prelog(void)
20876 {
20877 - unsigned long cr4;
20878 - unsigned long temp;
20879 struct desc_ptr gdt_descr;
20880
20881 local_irq_save(efi_rt_eflags);
20882
20883 - /*
20884 - * If I don't have PAE, I should just duplicate two entries in page
20885 - * directory. If I have PAE, I just need to duplicate one entry in
20886 - * page directory.
20887 - */
20888 - cr4 = read_cr4_safe();
20889 -
20890 - if (cr4 & X86_CR4_PAE) {
20891 - efi_bak_pg_dir_pointer[0].pgd =
20892 - swapper_pg_dir[pgd_index(0)].pgd;
20893 - swapper_pg_dir[0].pgd =
20894 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
20895 - } else {
20896 - efi_bak_pg_dir_pointer[0].pgd =
20897 - swapper_pg_dir[pgd_index(0)].pgd;
20898 - efi_bak_pg_dir_pointer[1].pgd =
20899 - swapper_pg_dir[pgd_index(0x400000)].pgd;
20900 - swapper_pg_dir[pgd_index(0)].pgd =
20901 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
20902 - temp = PAGE_OFFSET + 0x400000;
20903 - swapper_pg_dir[pgd_index(0x400000)].pgd =
20904 - swapper_pg_dir[pgd_index(temp)].pgd;
20905 - }
20906 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
20907 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20908 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
20909
20910 /*
20911 * After the lock is released, the original page table is restored.
20912 */
20913 __flush_tlb_all();
20914
20915 - gdt_descr.address = __pa(get_cpu_gdt_table(0));
20916 + gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
20917 gdt_descr.size = GDT_SIZE - 1;
20918 load_gdt(&gdt_descr);
20919 }
20920
20921 -void efi_call_phys_epilog(void)
20922 +void __init efi_call_phys_epilog(void)
20923 {
20924 - unsigned long cr4;
20925 struct desc_ptr gdt_descr;
20926
20927 - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
20928 + gdt_descr.address = get_cpu_gdt_table(0);
20929 gdt_descr.size = GDT_SIZE - 1;
20930 load_gdt(&gdt_descr);
20931
20932 - cr4 = read_cr4_safe();
20933 -
20934 - if (cr4 & X86_CR4_PAE) {
20935 - swapper_pg_dir[pgd_index(0)].pgd =
20936 - efi_bak_pg_dir_pointer[0].pgd;
20937 - } else {
20938 - swapper_pg_dir[pgd_index(0)].pgd =
20939 - efi_bak_pg_dir_pointer[0].pgd;
20940 - swapper_pg_dir[pgd_index(0x400000)].pgd =
20941 - efi_bak_pg_dir_pointer[1].pgd;
20942 - }
20943 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
20944
20945 /*
20946 * After the lock is released, the original page table is restored.
20947 diff -urNp linux-3.0.3/arch/x86/platform/efi/efi_stub_32.S linux-3.0.3/arch/x86/platform/efi/efi_stub_32.S
20948 --- linux-3.0.3/arch/x86/platform/efi/efi_stub_32.S 2011-07-21 22:17:23.000000000 -0400
20949 +++ linux-3.0.3/arch/x86/platform/efi/efi_stub_32.S 2011-08-23 21:47:55.000000000 -0400
20950 @@ -6,6 +6,7 @@
20951 */
20952
20953 #include <linux/linkage.h>
20954 +#include <linux/init.h>
20955 #include <asm/page_types.h>
20956
20957 /*
20958 @@ -20,7 +21,7 @@
20959 * service functions will comply with gcc calling convention, too.
20960 */
20961
20962 -.text
20963 +__INIT
20964 ENTRY(efi_call_phys)
20965 /*
20966 * 0. The function can only be called in Linux kernel. So CS has been
20967 @@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
20968 * The mapping of lower virtual memory has been created in prelog and
20969 * epilog.
20970 */
20971 - movl $1f, %edx
20972 - subl $__PAGE_OFFSET, %edx
20973 - jmp *%edx
20974 + jmp 1f-__PAGE_OFFSET
20975 1:
20976
20977 /*
20978 @@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
20979 * parameter 2, ..., param n. To make things easy, we save the return
20980 * address of efi_call_phys in a global variable.
20981 */
20982 - popl %edx
20983 - movl %edx, saved_return_addr
20984 - /* get the function pointer into ECX*/
20985 - popl %ecx
20986 - movl %ecx, efi_rt_function_ptr
20987 - movl $2f, %edx
20988 - subl $__PAGE_OFFSET, %edx
20989 - pushl %edx
20990 + popl (saved_return_addr)
20991 + popl (efi_rt_function_ptr)
20992
20993 /*
20994 * 3. Clear PG bit in %CR0.
20995 @@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
20996 /*
20997 * 5. Call the physical function.
20998 */
20999 - jmp *%ecx
21000 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
21001
21002 -2:
21003 /*
21004 * 6. After EFI runtime service returns, control will return to
21005 * following instruction. We'd better readjust stack pointer first.
21006 @@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
21007 movl %cr0, %edx
21008 orl $0x80000000, %edx
21009 movl %edx, %cr0
21010 - jmp 1f
21011 -1:
21012 +
21013 /*
21014 * 8. Now restore the virtual mode from flat mode by
21015 * adding EIP with PAGE_OFFSET.
21016 */
21017 - movl $1f, %edx
21018 - jmp *%edx
21019 + jmp 1f+__PAGE_OFFSET
21020 1:
21021
21022 /*
21023 * 9. Balance the stack. And because EAX contain the return value,
21024 * we'd better not clobber it.
21025 */
21026 - leal efi_rt_function_ptr, %edx
21027 - movl (%edx), %ecx
21028 - pushl %ecx
21029 + pushl (efi_rt_function_ptr)
21030
21031 /*
21032 - * 10. Push the saved return address onto the stack and return.
21033 + * 10. Return to the saved return address.
21034 */
21035 - leal saved_return_addr, %edx
21036 - movl (%edx), %ecx
21037 - pushl %ecx
21038 - ret
21039 + jmpl *(saved_return_addr)
21040 ENDPROC(efi_call_phys)
21041 .previous
21042
21043 -.data
21044 +__INITDATA
21045 saved_return_addr:
21046 .long 0
21047 efi_rt_function_ptr:
21048 diff -urNp linux-3.0.3/arch/x86/platform/mrst/mrst.c linux-3.0.3/arch/x86/platform/mrst/mrst.c
21049 --- linux-3.0.3/arch/x86/platform/mrst/mrst.c 2011-07-21 22:17:23.000000000 -0400
21050 +++ linux-3.0.3/arch/x86/platform/mrst/mrst.c 2011-08-23 21:47:55.000000000 -0400
21051 @@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
21052 }
21053
21054 /* Reboot and power off are handled by the SCU on a MID device */
21055 -static void mrst_power_off(void)
21056 +static __noreturn void mrst_power_off(void)
21057 {
21058 intel_scu_ipc_simple_command(0xf1, 1);
21059 + BUG();
21060 }
21061
21062 -static void mrst_reboot(void)
21063 +static __noreturn void mrst_reboot(void)
21064 {
21065 intel_scu_ipc_simple_command(0xf1, 0);
21066 + BUG();
21067 }
21068
21069 /*
21070 diff -urNp linux-3.0.3/arch/x86/platform/uv/tlb_uv.c linux-3.0.3/arch/x86/platform/uv/tlb_uv.c
21071 --- linux-3.0.3/arch/x86/platform/uv/tlb_uv.c 2011-07-21 22:17:23.000000000 -0400
21072 +++ linux-3.0.3/arch/x86/platform/uv/tlb_uv.c 2011-08-23 21:48:14.000000000 -0400
21073 @@ -373,6 +373,8 @@ static void reset_with_ipi(struct bau_ta
21074 cpumask_t mask;
21075 struct reset_args reset_args;
21076
21077 + pax_track_stack();
21078 +
21079 reset_args.sender = sender;
21080 cpus_clear(mask);
21081 /* find a single cpu for each uvhub in this distribution mask */
21082 diff -urNp linux-3.0.3/arch/x86/power/cpu.c linux-3.0.3/arch/x86/power/cpu.c
21083 --- linux-3.0.3/arch/x86/power/cpu.c 2011-07-21 22:17:23.000000000 -0400
21084 +++ linux-3.0.3/arch/x86/power/cpu.c 2011-08-23 21:47:55.000000000 -0400
21085 @@ -130,7 +130,7 @@ static void do_fpu_end(void)
21086 static void fix_processor_context(void)
21087 {
21088 int cpu = smp_processor_id();
21089 - struct tss_struct *t = &per_cpu(init_tss, cpu);
21090 + struct tss_struct *t = init_tss + cpu;
21091
21092 set_tss_desc(cpu, t); /*
21093 * This just modifies memory; should not be
21094 @@ -140,7 +140,9 @@ static void fix_processor_context(void)
21095 */
21096
21097 #ifdef CONFIG_X86_64
21098 + pax_open_kernel();
21099 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
21100 + pax_close_kernel();
21101
21102 syscall_init(); /* This sets MSR_*STAR and related */
21103 #endif
21104 diff -urNp linux-3.0.3/arch/x86/vdso/Makefile linux-3.0.3/arch/x86/vdso/Makefile
21105 --- linux-3.0.3/arch/x86/vdso/Makefile 2011-07-21 22:17:23.000000000 -0400
21106 +++ linux-3.0.3/arch/x86/vdso/Makefile 2011-08-23 21:47:55.000000000 -0400
21107 @@ -136,7 +136,7 @@ quiet_cmd_vdso = VDSO $@
21108 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
21109 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
21110
21111 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21112 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21113 GCOV_PROFILE := n
21114
21115 #
21116 diff -urNp linux-3.0.3/arch/x86/vdso/vdso32-setup.c linux-3.0.3/arch/x86/vdso/vdso32-setup.c
21117 --- linux-3.0.3/arch/x86/vdso/vdso32-setup.c 2011-07-21 22:17:23.000000000 -0400
21118 +++ linux-3.0.3/arch/x86/vdso/vdso32-setup.c 2011-08-23 21:47:55.000000000 -0400
21119 @@ -25,6 +25,7 @@
21120 #include <asm/tlbflush.h>
21121 #include <asm/vdso.h>
21122 #include <asm/proto.h>
21123 +#include <asm/mman.h>
21124
21125 enum {
21126 VDSO_DISABLED = 0,
21127 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
21128 void enable_sep_cpu(void)
21129 {
21130 int cpu = get_cpu();
21131 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
21132 + struct tss_struct *tss = init_tss + cpu;
21133
21134 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21135 put_cpu();
21136 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
21137 gate_vma.vm_start = FIXADDR_USER_START;
21138 gate_vma.vm_end = FIXADDR_USER_END;
21139 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
21140 - gate_vma.vm_page_prot = __P101;
21141 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
21142 /*
21143 * Make sure the vDSO gets into every core dump.
21144 * Dumping its contents makes post-mortem fully interpretable later
21145 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
21146 if (compat)
21147 addr = VDSO_HIGH_BASE;
21148 else {
21149 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
21150 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
21151 if (IS_ERR_VALUE(addr)) {
21152 ret = addr;
21153 goto up_fail;
21154 }
21155 }
21156
21157 - current->mm->context.vdso = (void *)addr;
21158 + current->mm->context.vdso = addr;
21159
21160 if (compat_uses_vma || !compat) {
21161 /*
21162 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
21163 }
21164
21165 current_thread_info()->sysenter_return =
21166 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21167 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21168
21169 up_fail:
21170 if (ret)
21171 - current->mm->context.vdso = NULL;
21172 + current->mm->context.vdso = 0;
21173
21174 up_write(&mm->mmap_sem);
21175
21176 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
21177
21178 const char *arch_vma_name(struct vm_area_struct *vma)
21179 {
21180 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21181 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21182 return "[vdso]";
21183 +
21184 +#ifdef CONFIG_PAX_SEGMEXEC
21185 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
21186 + return "[vdso]";
21187 +#endif
21188 +
21189 return NULL;
21190 }
21191
21192 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
21193 * Check to see if the corresponding task was created in compat vdso
21194 * mode.
21195 */
21196 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
21197 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
21198 return &gate_vma;
21199 return NULL;
21200 }
21201 diff -urNp linux-3.0.3/arch/x86/vdso/vma.c linux-3.0.3/arch/x86/vdso/vma.c
21202 --- linux-3.0.3/arch/x86/vdso/vma.c 2011-07-21 22:17:23.000000000 -0400
21203 +++ linux-3.0.3/arch/x86/vdso/vma.c 2011-08-23 21:47:55.000000000 -0400
21204 @@ -15,18 +15,19 @@
21205 #include <asm/proto.h>
21206 #include <asm/vdso.h>
21207
21208 -unsigned int __read_mostly vdso_enabled = 1;
21209 -
21210 extern char vdso_start[], vdso_end[];
21211 extern unsigned short vdso_sync_cpuid;
21212 +extern char __vsyscall_0;
21213
21214 static struct page **vdso_pages;
21215 +static struct page *vsyscall_page;
21216 static unsigned vdso_size;
21217
21218 static int __init init_vdso_vars(void)
21219 {
21220 - int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
21221 - int i;
21222 + size_t nbytes = vdso_end - vdso_start;
21223 + size_t npages = (nbytes + PAGE_SIZE - 1) / PAGE_SIZE;
21224 + size_t i;
21225
21226 vdso_size = npages << PAGE_SHIFT;
21227 vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
21228 @@ -34,19 +35,19 @@ static int __init init_vdso_vars(void)
21229 goto oom;
21230 for (i = 0; i < npages; i++) {
21231 struct page *p;
21232 - p = alloc_page(GFP_KERNEL);
21233 + p = alloc_page(GFP_KERNEL | __GFP_ZERO);
21234 if (!p)
21235 goto oom;
21236 vdso_pages[i] = p;
21237 - copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
21238 + memcpy(page_address(p), vdso_start + i*PAGE_SIZE, nbytes > PAGE_SIZE ? PAGE_SIZE : nbytes);
21239 + nbytes -= PAGE_SIZE;
21240 }
21241 + vsyscall_page = pfn_to_page((__pa_symbol(&__vsyscall_0)) >> PAGE_SHIFT);
21242
21243 return 0;
21244
21245 oom:
21246 - printk("Cannot allocate vdso\n");
21247 - vdso_enabled = 0;
21248 - return -ENOMEM;
21249 + panic("Cannot allocate vdso\n");
21250 }
21251 subsys_initcall(init_vdso_vars);
21252
21253 @@ -80,37 +81,35 @@ int arch_setup_additional_pages(struct l
21254 unsigned long addr;
21255 int ret;
21256
21257 - if (!vdso_enabled)
21258 - return 0;
21259 -
21260 down_write(&mm->mmap_sem);
21261 - addr = vdso_addr(mm->start_stack, vdso_size);
21262 - addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
21263 + addr = vdso_addr(mm->start_stack, vdso_size + PAGE_SIZE);
21264 + addr = get_unmapped_area(NULL, addr, vdso_size + PAGE_SIZE, 0, 0);
21265 if (IS_ERR_VALUE(addr)) {
21266 ret = addr;
21267 goto up_fail;
21268 }
21269
21270 - current->mm->context.vdso = (void *)addr;
21271 + mm->context.vdso = addr + PAGE_SIZE;
21272
21273 - ret = install_special_mapping(mm, addr, vdso_size,
21274 + ret = install_special_mapping(mm, addr, PAGE_SIZE,
21275 VM_READ|VM_EXEC|
21276 - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
21277 + VM_MAYREAD|VM_MAYEXEC|
21278 VM_ALWAYSDUMP,
21279 - vdso_pages);
21280 + &vsyscall_page);
21281 if (ret) {
21282 - current->mm->context.vdso = NULL;
21283 + mm->context.vdso = 0;
21284 goto up_fail;
21285 }
21286
21287 + ret = install_special_mapping(mm, addr + PAGE_SIZE, vdso_size,
21288 + VM_READ|VM_EXEC|
21289 + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
21290 + VM_ALWAYSDUMP,
21291 + vdso_pages);
21292 + if (ret)
21293 + mm->context.vdso = 0;
21294 +
21295 up_fail:
21296 up_write(&mm->mmap_sem);
21297 return ret;
21298 }
21299 -
21300 -static __init int vdso_setup(char *s)
21301 -{
21302 - vdso_enabled = simple_strtoul(s, NULL, 0);
21303 - return 0;
21304 -}
21305 -__setup("vdso=", vdso_setup);
21306 diff -urNp linux-3.0.3/arch/x86/xen/enlighten.c linux-3.0.3/arch/x86/xen/enlighten.c
21307 --- linux-3.0.3/arch/x86/xen/enlighten.c 2011-08-23 21:44:40.000000000 -0400
21308 +++ linux-3.0.3/arch/x86/xen/enlighten.c 2011-08-23 21:47:55.000000000 -0400
21309 @@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
21310
21311 struct shared_info xen_dummy_shared_info;
21312
21313 -void *xen_initial_gdt;
21314 -
21315 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
21316 __read_mostly int xen_have_vector_callback;
21317 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
21318 @@ -1010,7 +1008,7 @@ static const struct pv_apic_ops xen_apic
21319 #endif
21320 };
21321
21322 -static void xen_reboot(int reason)
21323 +static __noreturn void xen_reboot(int reason)
21324 {
21325 struct sched_shutdown r = { .reason = reason };
21326
21327 @@ -1018,17 +1016,17 @@ static void xen_reboot(int reason)
21328 BUG();
21329 }
21330
21331 -static void xen_restart(char *msg)
21332 +static __noreturn void xen_restart(char *msg)
21333 {
21334 xen_reboot(SHUTDOWN_reboot);
21335 }
21336
21337 -static void xen_emergency_restart(void)
21338 +static __noreturn void xen_emergency_restart(void)
21339 {
21340 xen_reboot(SHUTDOWN_reboot);
21341 }
21342
21343 -static void xen_machine_halt(void)
21344 +static __noreturn void xen_machine_halt(void)
21345 {
21346 xen_reboot(SHUTDOWN_poweroff);
21347 }
21348 @@ -1134,7 +1132,17 @@ asmlinkage void __init xen_start_kernel(
21349 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
21350
21351 /* Work out if we support NX */
21352 - x86_configure_nx();
21353 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21354 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
21355 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
21356 + unsigned l, h;
21357 +
21358 + __supported_pte_mask |= _PAGE_NX;
21359 + rdmsr(MSR_EFER, l, h);
21360 + l |= EFER_NX;
21361 + wrmsr(MSR_EFER, l, h);
21362 + }
21363 +#endif
21364
21365 xen_setup_features();
21366
21367 @@ -1165,13 +1173,6 @@ asmlinkage void __init xen_start_kernel(
21368
21369 machine_ops = xen_machine_ops;
21370
21371 - /*
21372 - * The only reliable way to retain the initial address of the
21373 - * percpu gdt_page is to remember it here, so we can go and
21374 - * mark it RW later, when the initial percpu area is freed.
21375 - */
21376 - xen_initial_gdt = &per_cpu(gdt_page, 0);
21377 -
21378 xen_smp_init();
21379
21380 #ifdef CONFIG_ACPI_NUMA
21381 diff -urNp linux-3.0.3/arch/x86/xen/mmu.c linux-3.0.3/arch/x86/xen/mmu.c
21382 --- linux-3.0.3/arch/x86/xen/mmu.c 2011-07-21 22:17:23.000000000 -0400
21383 +++ linux-3.0.3/arch/x86/xen/mmu.c 2011-08-23 21:47:55.000000000 -0400
21384 @@ -1679,6 +1679,8 @@ pgd_t * __init xen_setup_kernel_pagetabl
21385 convert_pfn_mfn(init_level4_pgt);
21386 convert_pfn_mfn(level3_ident_pgt);
21387 convert_pfn_mfn(level3_kernel_pgt);
21388 + convert_pfn_mfn(level3_vmalloc_pgt);
21389 + convert_pfn_mfn(level3_vmemmap_pgt);
21390
21391 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
21392 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
21393 @@ -1697,7 +1699,10 @@ pgd_t * __init xen_setup_kernel_pagetabl
21394 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
21395 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
21396 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
21397 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
21398 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
21399 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
21400 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
21401 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
21402 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
21403
21404 diff -urNp linux-3.0.3/arch/x86/xen/smp.c linux-3.0.3/arch/x86/xen/smp.c
21405 --- linux-3.0.3/arch/x86/xen/smp.c 2011-07-21 22:17:23.000000000 -0400
21406 +++ linux-3.0.3/arch/x86/xen/smp.c 2011-08-23 21:47:55.000000000 -0400
21407 @@ -193,11 +193,6 @@ static void __init xen_smp_prepare_boot_
21408 {
21409 BUG_ON(smp_processor_id() != 0);
21410 native_smp_prepare_boot_cpu();
21411 -
21412 - /* We've switched to the "real" per-cpu gdt, so make sure the
21413 - old memory can be recycled */
21414 - make_lowmem_page_readwrite(xen_initial_gdt);
21415 -
21416 xen_filter_cpu_maps();
21417 xen_setup_vcpu_info_placement();
21418 }
21419 @@ -265,12 +260,12 @@ cpu_initialize_context(unsigned int cpu,
21420 gdt = get_cpu_gdt_table(cpu);
21421
21422 ctxt->flags = VGCF_IN_KERNEL;
21423 - ctxt->user_regs.ds = __USER_DS;
21424 - ctxt->user_regs.es = __USER_DS;
21425 + ctxt->user_regs.ds = __KERNEL_DS;
21426 + ctxt->user_regs.es = __KERNEL_DS;
21427 ctxt->user_regs.ss = __KERNEL_DS;
21428 #ifdef CONFIG_X86_32
21429 ctxt->user_regs.fs = __KERNEL_PERCPU;
21430 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
21431 + savesegment(gs, ctxt->user_regs.gs);
21432 #else
21433 ctxt->gs_base_kernel = per_cpu_offset(cpu);
21434 #endif
21435 @@ -321,13 +316,12 @@ static int __cpuinit xen_cpu_up(unsigned
21436 int rc;
21437
21438 per_cpu(current_task, cpu) = idle;
21439 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
21440 #ifdef CONFIG_X86_32
21441 irq_ctx_init(cpu);
21442 #else
21443 clear_tsk_thread_flag(idle, TIF_FORK);
21444 - per_cpu(kernel_stack, cpu) =
21445 - (unsigned long)task_stack_page(idle) -
21446 - KERNEL_STACK_OFFSET + THREAD_SIZE;
21447 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
21448 #endif
21449 xen_setup_runstate_info(cpu);
21450 xen_setup_timer(cpu);
21451 diff -urNp linux-3.0.3/arch/x86/xen/xen-asm_32.S linux-3.0.3/arch/x86/xen/xen-asm_32.S
21452 --- linux-3.0.3/arch/x86/xen/xen-asm_32.S 2011-07-21 22:17:23.000000000 -0400
21453 +++ linux-3.0.3/arch/x86/xen/xen-asm_32.S 2011-08-23 21:47:55.000000000 -0400
21454 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
21455 ESP_OFFSET=4 # bytes pushed onto stack
21456
21457 /*
21458 - * Store vcpu_info pointer for easy access. Do it this way to
21459 - * avoid having to reload %fs
21460 + * Store vcpu_info pointer for easy access.
21461 */
21462 #ifdef CONFIG_SMP
21463 - GET_THREAD_INFO(%eax)
21464 - movl TI_cpu(%eax), %eax
21465 - movl __per_cpu_offset(,%eax,4), %eax
21466 - mov xen_vcpu(%eax), %eax
21467 + push %fs
21468 + mov $(__KERNEL_PERCPU), %eax
21469 + mov %eax, %fs
21470 + mov PER_CPU_VAR(xen_vcpu), %eax
21471 + pop %fs
21472 #else
21473 movl xen_vcpu, %eax
21474 #endif
21475 diff -urNp linux-3.0.3/arch/x86/xen/xen-head.S linux-3.0.3/arch/x86/xen/xen-head.S
21476 --- linux-3.0.3/arch/x86/xen/xen-head.S 2011-07-21 22:17:23.000000000 -0400
21477 +++ linux-3.0.3/arch/x86/xen/xen-head.S 2011-08-23 21:47:55.000000000 -0400
21478 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
21479 #ifdef CONFIG_X86_32
21480 mov %esi,xen_start_info
21481 mov $init_thread_union+THREAD_SIZE,%esp
21482 +#ifdef CONFIG_SMP
21483 + movl $cpu_gdt_table,%edi
21484 + movl $__per_cpu_load,%eax
21485 + movw %ax,__KERNEL_PERCPU + 2(%edi)
21486 + rorl $16,%eax
21487 + movb %al,__KERNEL_PERCPU + 4(%edi)
21488 + movb %ah,__KERNEL_PERCPU + 7(%edi)
21489 + movl $__per_cpu_end - 1,%eax
21490 + subl $__per_cpu_start,%eax
21491 + movw %ax,__KERNEL_PERCPU + 0(%edi)
21492 +#endif
21493 #else
21494 mov %rsi,xen_start_info
21495 mov $init_thread_union+THREAD_SIZE,%rsp
21496 diff -urNp linux-3.0.3/arch/x86/xen/xen-ops.h linux-3.0.3/arch/x86/xen/xen-ops.h
21497 --- linux-3.0.3/arch/x86/xen/xen-ops.h 2011-08-23 21:44:40.000000000 -0400
21498 +++ linux-3.0.3/arch/x86/xen/xen-ops.h 2011-08-23 21:47:55.000000000 -0400
21499 @@ -10,8 +10,6 @@
21500 extern const char xen_hypervisor_callback[];
21501 extern const char xen_failsafe_callback[];
21502
21503 -extern void *xen_initial_gdt;
21504 -
21505 struct trap_info;
21506 void xen_copy_trap_info(struct trap_info *traps);
21507
21508 diff -urNp linux-3.0.3/block/blk-iopoll.c linux-3.0.3/block/blk-iopoll.c
21509 --- linux-3.0.3/block/blk-iopoll.c 2011-07-21 22:17:23.000000000 -0400
21510 +++ linux-3.0.3/block/blk-iopoll.c 2011-08-23 21:47:55.000000000 -0400
21511 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
21512 }
21513 EXPORT_SYMBOL(blk_iopoll_complete);
21514
21515 -static void blk_iopoll_softirq(struct softirq_action *h)
21516 +static void blk_iopoll_softirq(void)
21517 {
21518 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
21519 int rearm = 0, budget = blk_iopoll_budget;
21520 diff -urNp linux-3.0.3/block/blk-map.c linux-3.0.3/block/blk-map.c
21521 --- linux-3.0.3/block/blk-map.c 2011-07-21 22:17:23.000000000 -0400
21522 +++ linux-3.0.3/block/blk-map.c 2011-08-23 21:47:55.000000000 -0400
21523 @@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
21524 if (!len || !kbuf)
21525 return -EINVAL;
21526
21527 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
21528 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
21529 if (do_copy)
21530 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
21531 else
21532 diff -urNp linux-3.0.3/block/blk-softirq.c linux-3.0.3/block/blk-softirq.c
21533 --- linux-3.0.3/block/blk-softirq.c 2011-07-21 22:17:23.000000000 -0400
21534 +++ linux-3.0.3/block/blk-softirq.c 2011-08-23 21:47:55.000000000 -0400
21535 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
21536 * Softirq action handler - move entries to local list and loop over them
21537 * while passing them to the queue registered handler.
21538 */
21539 -static void blk_done_softirq(struct softirq_action *h)
21540 +static void blk_done_softirq(void)
21541 {
21542 struct list_head *cpu_list, local_list;
21543
21544 diff -urNp linux-3.0.3/block/bsg.c linux-3.0.3/block/bsg.c
21545 --- linux-3.0.3/block/bsg.c 2011-07-21 22:17:23.000000000 -0400
21546 +++ linux-3.0.3/block/bsg.c 2011-08-23 21:47:55.000000000 -0400
21547 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
21548 struct sg_io_v4 *hdr, struct bsg_device *bd,
21549 fmode_t has_write_perm)
21550 {
21551 + unsigned char tmpcmd[sizeof(rq->__cmd)];
21552 + unsigned char *cmdptr;
21553 +
21554 if (hdr->request_len > BLK_MAX_CDB) {
21555 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
21556 if (!rq->cmd)
21557 return -ENOMEM;
21558 - }
21559 + cmdptr = rq->cmd;
21560 + } else
21561 + cmdptr = tmpcmd;
21562
21563 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
21564 + if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
21565 hdr->request_len))
21566 return -EFAULT;
21567
21568 + if (cmdptr != rq->cmd)
21569 + memcpy(rq->cmd, cmdptr, hdr->request_len);
21570 +
21571 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
21572 if (blk_verify_command(rq->cmd, has_write_perm))
21573 return -EPERM;
21574 diff -urNp linux-3.0.3/block/scsi_ioctl.c linux-3.0.3/block/scsi_ioctl.c
21575 --- linux-3.0.3/block/scsi_ioctl.c 2011-07-21 22:17:23.000000000 -0400
21576 +++ linux-3.0.3/block/scsi_ioctl.c 2011-08-23 21:47:55.000000000 -0400
21577 @@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
21578 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
21579 struct sg_io_hdr *hdr, fmode_t mode)
21580 {
21581 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
21582 + unsigned char tmpcmd[sizeof(rq->__cmd)];
21583 + unsigned char *cmdptr;
21584 +
21585 + if (rq->cmd != rq->__cmd)
21586 + cmdptr = rq->cmd;
21587 + else
21588 + cmdptr = tmpcmd;
21589 +
21590 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
21591 return -EFAULT;
21592 +
21593 + if (cmdptr != rq->cmd)
21594 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
21595 +
21596 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
21597 return -EPERM;
21598
21599 @@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
21600 int err;
21601 unsigned int in_len, out_len, bytes, opcode, cmdlen;
21602 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
21603 + unsigned char tmpcmd[sizeof(rq->__cmd)];
21604 + unsigned char *cmdptr;
21605
21606 if (!sic)
21607 return -EINVAL;
21608 @@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
21609 */
21610 err = -EFAULT;
21611 rq->cmd_len = cmdlen;
21612 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
21613 +
21614 + if (rq->cmd != rq->__cmd)
21615 + cmdptr = rq->cmd;
21616 + else
21617 + cmdptr = tmpcmd;
21618 +
21619 + if (copy_from_user(cmdptr, sic->data, cmdlen))
21620 goto error;
21621
21622 + if (rq->cmd != cmdptr)
21623 + memcpy(rq->cmd, cmdptr, cmdlen);
21624 +
21625 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
21626 goto error;
21627
21628 diff -urNp linux-3.0.3/crypto/cryptd.c linux-3.0.3/crypto/cryptd.c
21629 --- linux-3.0.3/crypto/cryptd.c 2011-07-21 22:17:23.000000000 -0400
21630 +++ linux-3.0.3/crypto/cryptd.c 2011-08-23 21:47:55.000000000 -0400
21631 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
21632
21633 struct cryptd_blkcipher_request_ctx {
21634 crypto_completion_t complete;
21635 -};
21636 +} __no_const;
21637
21638 struct cryptd_hash_ctx {
21639 struct crypto_shash *child;
21640 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
21641
21642 struct cryptd_aead_request_ctx {
21643 crypto_completion_t complete;
21644 -};
21645 +} __no_const;
21646
21647 static void cryptd_queue_worker(struct work_struct *work);
21648
21649 diff -urNp linux-3.0.3/crypto/gf128mul.c linux-3.0.3/crypto/gf128mul.c
21650 --- linux-3.0.3/crypto/gf128mul.c 2011-07-21 22:17:23.000000000 -0400
21651 +++ linux-3.0.3/crypto/gf128mul.c 2011-08-23 21:47:55.000000000 -0400
21652 @@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
21653 for (i = 0; i < 7; ++i)
21654 gf128mul_x_lle(&p[i + 1], &p[i]);
21655
21656 - memset(r, 0, sizeof(r));
21657 + memset(r, 0, sizeof(*r));
21658 for (i = 0;;) {
21659 u8 ch = ((u8 *)b)[15 - i];
21660
21661 @@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
21662 for (i = 0; i < 7; ++i)
21663 gf128mul_x_bbe(&p[i + 1], &p[i]);
21664
21665 - memset(r, 0, sizeof(r));
21666 + memset(r, 0, sizeof(*r));
21667 for (i = 0;;) {
21668 u8 ch = ((u8 *)b)[i];
21669
21670 diff -urNp linux-3.0.3/crypto/serpent.c linux-3.0.3/crypto/serpent.c
21671 --- linux-3.0.3/crypto/serpent.c 2011-07-21 22:17:23.000000000 -0400
21672 +++ linux-3.0.3/crypto/serpent.c 2011-08-23 21:48:14.000000000 -0400
21673 @@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
21674 u32 r0,r1,r2,r3,r4;
21675 int i;
21676
21677 + pax_track_stack();
21678 +
21679 /* Copy key, add padding */
21680
21681 for (i = 0; i < keylen; ++i)
21682 diff -urNp linux-3.0.3/Documentation/dontdiff linux-3.0.3/Documentation/dontdiff
21683 --- linux-3.0.3/Documentation/dontdiff 2011-07-21 22:17:23.000000000 -0400
21684 +++ linux-3.0.3/Documentation/dontdiff 2011-08-23 21:47:55.000000000 -0400
21685 @@ -5,6 +5,7 @@
21686 *.cis
21687 *.cpio
21688 *.csp
21689 +*.dbg
21690 *.dsp
21691 *.dvi
21692 *.elf
21693 @@ -48,9 +49,11 @@
21694 *.tab.h
21695 *.tex
21696 *.ver
21697 +*.vim
21698 *.xml
21699 *.xz
21700 *_MODULES
21701 +*_reg_safe.h
21702 *_vga16.c
21703 *~
21704 \#*#
21705 @@ -70,6 +73,7 @@ Kerntypes
21706 Module.markers
21707 Module.symvers
21708 PENDING
21709 +PERF*
21710 SCCS
21711 System.map*
21712 TAGS
21713 @@ -98,6 +102,8 @@ bzImage*
21714 capability_names.h
21715 capflags.c
21716 classlist.h*
21717 +clut_vga16.c
21718 +common-cmds.h
21719 comp*.log
21720 compile.h*
21721 conf
21722 @@ -126,12 +132,14 @@ fore200e_pca_fw.c*
21723 gconf
21724 gconf.glade.h
21725 gen-devlist
21726 +gen-kdb_cmds.c
21727 gen_crc32table
21728 gen_init_cpio
21729 generated
21730 genheaders
21731 genksyms
21732 *_gray256.c
21733 +hash
21734 hpet_example
21735 hugepage-mmap
21736 hugepage-shm
21737 @@ -146,7 +154,6 @@ int32.c
21738 int4.c
21739 int8.c
21740 kallsyms
21741 -kconfig
21742 keywords.c
21743 ksym.c*
21744 ksym.h*
21745 @@ -154,7 +161,6 @@ kxgettext
21746 lkc_defs.h
21747 lex.c
21748 lex.*.c
21749 -linux
21750 logo_*.c
21751 logo_*_clut224.c
21752 logo_*_mono.c
21753 @@ -174,6 +180,7 @@ mkboot
21754 mkbugboot
21755 mkcpustr
21756 mkdep
21757 +mkpiggy
21758 mkprep
21759 mkregtable
21760 mktables
21761 @@ -209,6 +216,7 @@ r300_reg_safe.h
21762 r420_reg_safe.h
21763 r600_reg_safe.h
21764 recordmcount
21765 +regdb.c
21766 relocs
21767 rlim_names.h
21768 rn50_reg_safe.h
21769 @@ -219,6 +227,7 @@ setup
21770 setup.bin
21771 setup.elf
21772 sImage
21773 +slabinfo
21774 sm_tbl*
21775 split-include
21776 syscalltab.h
21777 @@ -246,7 +255,9 @@ vmlinux
21778 vmlinux-*
21779 vmlinux.aout
21780 vmlinux.bin.all
21781 +vmlinux.bin.bz2
21782 vmlinux.lds
21783 +vmlinux.relocs
21784 vmlinuz
21785 voffset.h
21786 vsyscall.lds
21787 @@ -254,6 +265,7 @@ vsyscall_32.lds
21788 wanxlfw.inc
21789 uImage
21790 unifdef
21791 +utsrelease.h
21792 wakeup.bin
21793 wakeup.elf
21794 wakeup.lds
21795 diff -urNp linux-3.0.3/Documentation/kernel-parameters.txt linux-3.0.3/Documentation/kernel-parameters.txt
21796 --- linux-3.0.3/Documentation/kernel-parameters.txt 2011-07-21 22:17:23.000000000 -0400
21797 +++ linux-3.0.3/Documentation/kernel-parameters.txt 2011-08-23 21:47:55.000000000 -0400
21798 @@ -1883,6 +1883,13 @@ bytes respectively. Such letter suffixes
21799 the specified number of seconds. This is to be used if
21800 your oopses keep scrolling off the screen.
21801
21802 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
21803 + virtualization environments that don't cope well with the
21804 + expand down segment used by UDEREF on X86-32 or the frequent
21805 + page table updates on X86-64.
21806 +
21807 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
21808 +
21809 pcbit= [HW,ISDN]
21810
21811 pcd. [PARIDE]
21812 diff -urNp linux-3.0.3/drivers/acpi/apei/cper.c linux-3.0.3/drivers/acpi/apei/cper.c
21813 --- linux-3.0.3/drivers/acpi/apei/cper.c 2011-07-21 22:17:23.000000000 -0400
21814 +++ linux-3.0.3/drivers/acpi/apei/cper.c 2011-08-23 21:47:55.000000000 -0400
21815 @@ -38,12 +38,12 @@
21816 */
21817 u64 cper_next_record_id(void)
21818 {
21819 - static atomic64_t seq;
21820 + static atomic64_unchecked_t seq;
21821
21822 - if (!atomic64_read(&seq))
21823 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
21824 + if (!atomic64_read_unchecked(&seq))
21825 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
21826
21827 - return atomic64_inc_return(&seq);
21828 + return atomic64_inc_return_unchecked(&seq);
21829 }
21830 EXPORT_SYMBOL_GPL(cper_next_record_id);
21831
21832 diff -urNp linux-3.0.3/drivers/acpi/proc.c linux-3.0.3/drivers/acpi/proc.c
21833 --- linux-3.0.3/drivers/acpi/proc.c 2011-07-21 22:17:23.000000000 -0400
21834 +++ linux-3.0.3/drivers/acpi/proc.c 2011-08-23 21:47:55.000000000 -0400
21835 @@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
21836 size_t count, loff_t * ppos)
21837 {
21838 struct list_head *node, *next;
21839 - char strbuf[5];
21840 - char str[5] = "";
21841 - unsigned int len = count;
21842 -
21843 - if (len > 4)
21844 - len = 4;
21845 - if (len < 0)
21846 - return -EFAULT;
21847 + char strbuf[5] = {0};
21848
21849 - if (copy_from_user(strbuf, buffer, len))
21850 + if (count > 4)
21851 + count = 4;
21852 + if (copy_from_user(strbuf, buffer, count))
21853 return -EFAULT;
21854 - strbuf[len] = '\0';
21855 - sscanf(strbuf, "%s", str);
21856 + strbuf[count] = '\0';
21857
21858 mutex_lock(&acpi_device_lock);
21859 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
21860 @@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
21861 if (!dev->wakeup.flags.valid)
21862 continue;
21863
21864 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
21865 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
21866 if (device_can_wakeup(&dev->dev)) {
21867 bool enable = !device_may_wakeup(&dev->dev);
21868 device_set_wakeup_enable(&dev->dev, enable);
21869 diff -urNp linux-3.0.3/drivers/acpi/processor_driver.c linux-3.0.3/drivers/acpi/processor_driver.c
21870 --- linux-3.0.3/drivers/acpi/processor_driver.c 2011-07-21 22:17:23.000000000 -0400
21871 +++ linux-3.0.3/drivers/acpi/processor_driver.c 2011-08-23 21:47:55.000000000 -0400
21872 @@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
21873 return 0;
21874 #endif
21875
21876 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
21877 + BUG_ON(pr->id >= nr_cpu_ids);
21878
21879 /*
21880 * Buggy BIOS check
21881 diff -urNp linux-3.0.3/drivers/ata/libata-core.c linux-3.0.3/drivers/ata/libata-core.c
21882 --- linux-3.0.3/drivers/ata/libata-core.c 2011-07-21 22:17:23.000000000 -0400
21883 +++ linux-3.0.3/drivers/ata/libata-core.c 2011-08-23 21:47:55.000000000 -0400
21884 @@ -4753,7 +4753,7 @@ void ata_qc_free(struct ata_queued_cmd *
21885 struct ata_port *ap;
21886 unsigned int tag;
21887
21888 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21889 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21890 ap = qc->ap;
21891
21892 qc->flags = 0;
21893 @@ -4769,7 +4769,7 @@ void __ata_qc_complete(struct ata_queued
21894 struct ata_port *ap;
21895 struct ata_link *link;
21896
21897 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21898 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21899 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
21900 ap = qc->ap;
21901 link = qc->dev->link;
21902 @@ -5774,6 +5774,7 @@ static void ata_finalize_port_ops(struct
21903 return;
21904
21905 spin_lock(&lock);
21906 + pax_open_kernel();
21907
21908 for (cur = ops->inherits; cur; cur = cur->inherits) {
21909 void **inherit = (void **)cur;
21910 @@ -5787,8 +5788,9 @@ static void ata_finalize_port_ops(struct
21911 if (IS_ERR(*pp))
21912 *pp = NULL;
21913
21914 - ops->inherits = NULL;
21915 + *(struct ata_port_operations **)&ops->inherits = NULL;
21916
21917 + pax_close_kernel();
21918 spin_unlock(&lock);
21919 }
21920
21921 diff -urNp linux-3.0.3/drivers/ata/libata-eh.c linux-3.0.3/drivers/ata/libata-eh.c
21922 --- linux-3.0.3/drivers/ata/libata-eh.c 2011-07-21 22:17:23.000000000 -0400
21923 +++ linux-3.0.3/drivers/ata/libata-eh.c 2011-08-23 21:48:14.000000000 -0400
21924 @@ -2518,6 +2518,8 @@ void ata_eh_report(struct ata_port *ap)
21925 {
21926 struct ata_link *link;
21927
21928 + pax_track_stack();
21929 +
21930 ata_for_each_link(link, ap, HOST_FIRST)
21931 ata_eh_link_report(link);
21932 }
21933 diff -urNp linux-3.0.3/drivers/ata/pata_arasan_cf.c linux-3.0.3/drivers/ata/pata_arasan_cf.c
21934 --- linux-3.0.3/drivers/ata/pata_arasan_cf.c 2011-07-21 22:17:23.000000000 -0400
21935 +++ linux-3.0.3/drivers/ata/pata_arasan_cf.c 2011-08-23 21:47:55.000000000 -0400
21936 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
21937 /* Handle platform specific quirks */
21938 if (pdata->quirk) {
21939 if (pdata->quirk & CF_BROKEN_PIO) {
21940 - ap->ops->set_piomode = NULL;
21941 + pax_open_kernel();
21942 + *(void **)&ap->ops->set_piomode = NULL;
21943 + pax_close_kernel();
21944 ap->pio_mask = 0;
21945 }
21946 if (pdata->quirk & CF_BROKEN_MWDMA)
21947 diff -urNp linux-3.0.3/drivers/atm/adummy.c linux-3.0.3/drivers/atm/adummy.c
21948 --- linux-3.0.3/drivers/atm/adummy.c 2011-07-21 22:17:23.000000000 -0400
21949 +++ linux-3.0.3/drivers/atm/adummy.c 2011-08-23 21:47:55.000000000 -0400
21950 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
21951 vcc->pop(vcc, skb);
21952 else
21953 dev_kfree_skb_any(skb);
21954 - atomic_inc(&vcc->stats->tx);
21955 + atomic_inc_unchecked(&vcc->stats->tx);
21956
21957 return 0;
21958 }
21959 diff -urNp linux-3.0.3/drivers/atm/ambassador.c linux-3.0.3/drivers/atm/ambassador.c
21960 --- linux-3.0.3/drivers/atm/ambassador.c 2011-07-21 22:17:23.000000000 -0400
21961 +++ linux-3.0.3/drivers/atm/ambassador.c 2011-08-23 21:47:55.000000000 -0400
21962 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
21963 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
21964
21965 // VC layer stats
21966 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
21967 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
21968
21969 // free the descriptor
21970 kfree (tx_descr);
21971 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
21972 dump_skb ("<<<", vc, skb);
21973
21974 // VC layer stats
21975 - atomic_inc(&atm_vcc->stats->rx);
21976 + atomic_inc_unchecked(&atm_vcc->stats->rx);
21977 __net_timestamp(skb);
21978 // end of our responsibility
21979 atm_vcc->push (atm_vcc, skb);
21980 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
21981 } else {
21982 PRINTK (KERN_INFO, "dropped over-size frame");
21983 // should we count this?
21984 - atomic_inc(&atm_vcc->stats->rx_drop);
21985 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
21986 }
21987
21988 } else {
21989 @@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at
21990 }
21991
21992 if (check_area (skb->data, skb->len)) {
21993 - atomic_inc(&atm_vcc->stats->tx_err);
21994 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
21995 return -ENOMEM; // ?
21996 }
21997
21998 diff -urNp linux-3.0.3/drivers/atm/atmtcp.c linux-3.0.3/drivers/atm/atmtcp.c
21999 --- linux-3.0.3/drivers/atm/atmtcp.c 2011-07-21 22:17:23.000000000 -0400
22000 +++ linux-3.0.3/drivers/atm/atmtcp.c 2011-08-23 21:47:55.000000000 -0400
22001 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
22002 if (vcc->pop) vcc->pop(vcc,skb);
22003 else dev_kfree_skb(skb);
22004 if (dev_data) return 0;
22005 - atomic_inc(&vcc->stats->tx_err);
22006 + atomic_inc_unchecked(&vcc->stats->tx_err);
22007 return -ENOLINK;
22008 }
22009 size = skb->len+sizeof(struct atmtcp_hdr);
22010 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
22011 if (!new_skb) {
22012 if (vcc->pop) vcc->pop(vcc,skb);
22013 else dev_kfree_skb(skb);
22014 - atomic_inc(&vcc->stats->tx_err);
22015 + atomic_inc_unchecked(&vcc->stats->tx_err);
22016 return -ENOBUFS;
22017 }
22018 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
22019 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
22020 if (vcc->pop) vcc->pop(vcc,skb);
22021 else dev_kfree_skb(skb);
22022 out_vcc->push(out_vcc,new_skb);
22023 - atomic_inc(&vcc->stats->tx);
22024 - atomic_inc(&out_vcc->stats->rx);
22025 + atomic_inc_unchecked(&vcc->stats->tx);
22026 + atomic_inc_unchecked(&out_vcc->stats->rx);
22027 return 0;
22028 }
22029
22030 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
22031 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
22032 read_unlock(&vcc_sklist_lock);
22033 if (!out_vcc) {
22034 - atomic_inc(&vcc->stats->tx_err);
22035 + atomic_inc_unchecked(&vcc->stats->tx_err);
22036 goto done;
22037 }
22038 skb_pull(skb,sizeof(struct atmtcp_hdr));
22039 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
22040 __net_timestamp(new_skb);
22041 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
22042 out_vcc->push(out_vcc,new_skb);
22043 - atomic_inc(&vcc->stats->tx);
22044 - atomic_inc(&out_vcc->stats->rx);
22045 + atomic_inc_unchecked(&vcc->stats->tx);
22046 + atomic_inc_unchecked(&out_vcc->stats->rx);
22047 done:
22048 if (vcc->pop) vcc->pop(vcc,skb);
22049 else dev_kfree_skb(skb);
22050 diff -urNp linux-3.0.3/drivers/atm/eni.c linux-3.0.3/drivers/atm/eni.c
22051 --- linux-3.0.3/drivers/atm/eni.c 2011-07-21 22:17:23.000000000 -0400
22052 +++ linux-3.0.3/drivers/atm/eni.c 2011-08-23 21:47:55.000000000 -0400
22053 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
22054 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
22055 vcc->dev->number);
22056 length = 0;
22057 - atomic_inc(&vcc->stats->rx_err);
22058 + atomic_inc_unchecked(&vcc->stats->rx_err);
22059 }
22060 else {
22061 length = ATM_CELL_SIZE-1; /* no HEC */
22062 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22063 size);
22064 }
22065 eff = length = 0;
22066 - atomic_inc(&vcc->stats->rx_err);
22067 + atomic_inc_unchecked(&vcc->stats->rx_err);
22068 }
22069 else {
22070 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
22071 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22072 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
22073 vcc->dev->number,vcc->vci,length,size << 2,descr);
22074 length = eff = 0;
22075 - atomic_inc(&vcc->stats->rx_err);
22076 + atomic_inc_unchecked(&vcc->stats->rx_err);
22077 }
22078 }
22079 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
22080 @@ -771,7 +771,7 @@ rx_dequeued++;
22081 vcc->push(vcc,skb);
22082 pushed++;
22083 }
22084 - atomic_inc(&vcc->stats->rx);
22085 + atomic_inc_unchecked(&vcc->stats->rx);
22086 }
22087 wake_up(&eni_dev->rx_wait);
22088 }
22089 @@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
22090 PCI_DMA_TODEVICE);
22091 if (vcc->pop) vcc->pop(vcc,skb);
22092 else dev_kfree_skb_irq(skb);
22093 - atomic_inc(&vcc->stats->tx);
22094 + atomic_inc_unchecked(&vcc->stats->tx);
22095 wake_up(&eni_dev->tx_wait);
22096 dma_complete++;
22097 }
22098 diff -urNp linux-3.0.3/drivers/atm/firestream.c linux-3.0.3/drivers/atm/firestream.c
22099 --- linux-3.0.3/drivers/atm/firestream.c 2011-07-21 22:17:23.000000000 -0400
22100 +++ linux-3.0.3/drivers/atm/firestream.c 2011-08-23 21:47:55.000000000 -0400
22101 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct
22102 }
22103 }
22104
22105 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22106 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22107
22108 fs_dprintk (FS_DEBUG_TXMEM, "i");
22109 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
22110 @@ -816,7 +816,7 @@ static void process_incoming (struct fs_
22111 #endif
22112 skb_put (skb, qe->p1 & 0xffff);
22113 ATM_SKB(skb)->vcc = atm_vcc;
22114 - atomic_inc(&atm_vcc->stats->rx);
22115 + atomic_inc_unchecked(&atm_vcc->stats->rx);
22116 __net_timestamp(skb);
22117 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
22118 atm_vcc->push (atm_vcc, skb);
22119 @@ -837,12 +837,12 @@ static void process_incoming (struct fs_
22120 kfree (pe);
22121 }
22122 if (atm_vcc)
22123 - atomic_inc(&atm_vcc->stats->rx_drop);
22124 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22125 break;
22126 case 0x1f: /* Reassembly abort: no buffers. */
22127 /* Silently increment error counter. */
22128 if (atm_vcc)
22129 - atomic_inc(&atm_vcc->stats->rx_drop);
22130 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22131 break;
22132 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
22133 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
22134 diff -urNp linux-3.0.3/drivers/atm/fore200e.c linux-3.0.3/drivers/atm/fore200e.c
22135 --- linux-3.0.3/drivers/atm/fore200e.c 2011-07-21 22:17:23.000000000 -0400
22136 +++ linux-3.0.3/drivers/atm/fore200e.c 2011-08-23 21:47:55.000000000 -0400
22137 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
22138 #endif
22139 /* check error condition */
22140 if (*entry->status & STATUS_ERROR)
22141 - atomic_inc(&vcc->stats->tx_err);
22142 + atomic_inc_unchecked(&vcc->stats->tx_err);
22143 else
22144 - atomic_inc(&vcc->stats->tx);
22145 + atomic_inc_unchecked(&vcc->stats->tx);
22146 }
22147 }
22148
22149 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
22150 if (skb == NULL) {
22151 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
22152
22153 - atomic_inc(&vcc->stats->rx_drop);
22154 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22155 return -ENOMEM;
22156 }
22157
22158 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
22159
22160 dev_kfree_skb_any(skb);
22161
22162 - atomic_inc(&vcc->stats->rx_drop);
22163 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22164 return -ENOMEM;
22165 }
22166
22167 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22168
22169 vcc->push(vcc, skb);
22170 - atomic_inc(&vcc->stats->rx);
22171 + atomic_inc_unchecked(&vcc->stats->rx);
22172
22173 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22174
22175 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
22176 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
22177 fore200e->atm_dev->number,
22178 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
22179 - atomic_inc(&vcc->stats->rx_err);
22180 + atomic_inc_unchecked(&vcc->stats->rx_err);
22181 }
22182 }
22183
22184 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
22185 goto retry_here;
22186 }
22187
22188 - atomic_inc(&vcc->stats->tx_err);
22189 + atomic_inc_unchecked(&vcc->stats->tx_err);
22190
22191 fore200e->tx_sat++;
22192 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
22193 diff -urNp linux-3.0.3/drivers/atm/he.c linux-3.0.3/drivers/atm/he.c
22194 --- linux-3.0.3/drivers/atm/he.c 2011-07-21 22:17:23.000000000 -0400
22195 +++ linux-3.0.3/drivers/atm/he.c 2011-08-23 21:47:55.000000000 -0400
22196 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22197
22198 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
22199 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
22200 - atomic_inc(&vcc->stats->rx_drop);
22201 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22202 goto return_host_buffers;
22203 }
22204
22205 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22206 RBRQ_LEN_ERR(he_dev->rbrq_head)
22207 ? "LEN_ERR" : "",
22208 vcc->vpi, vcc->vci);
22209 - atomic_inc(&vcc->stats->rx_err);
22210 + atomic_inc_unchecked(&vcc->stats->rx_err);
22211 goto return_host_buffers;
22212 }
22213
22214 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22215 vcc->push(vcc, skb);
22216 spin_lock(&he_dev->global_lock);
22217
22218 - atomic_inc(&vcc->stats->rx);
22219 + atomic_inc_unchecked(&vcc->stats->rx);
22220
22221 return_host_buffers:
22222 ++pdus_assembled;
22223 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
22224 tpd->vcc->pop(tpd->vcc, tpd->skb);
22225 else
22226 dev_kfree_skb_any(tpd->skb);
22227 - atomic_inc(&tpd->vcc->stats->tx_err);
22228 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
22229 }
22230 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
22231 return;
22232 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22233 vcc->pop(vcc, skb);
22234 else
22235 dev_kfree_skb_any(skb);
22236 - atomic_inc(&vcc->stats->tx_err);
22237 + atomic_inc_unchecked(&vcc->stats->tx_err);
22238 return -EINVAL;
22239 }
22240
22241 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22242 vcc->pop(vcc, skb);
22243 else
22244 dev_kfree_skb_any(skb);
22245 - atomic_inc(&vcc->stats->tx_err);
22246 + atomic_inc_unchecked(&vcc->stats->tx_err);
22247 return -EINVAL;
22248 }
22249 #endif
22250 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22251 vcc->pop(vcc, skb);
22252 else
22253 dev_kfree_skb_any(skb);
22254 - atomic_inc(&vcc->stats->tx_err);
22255 + atomic_inc_unchecked(&vcc->stats->tx_err);
22256 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22257 return -ENOMEM;
22258 }
22259 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22260 vcc->pop(vcc, skb);
22261 else
22262 dev_kfree_skb_any(skb);
22263 - atomic_inc(&vcc->stats->tx_err);
22264 + atomic_inc_unchecked(&vcc->stats->tx_err);
22265 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22266 return -ENOMEM;
22267 }
22268 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22269 __enqueue_tpd(he_dev, tpd, cid);
22270 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22271
22272 - atomic_inc(&vcc->stats->tx);
22273 + atomic_inc_unchecked(&vcc->stats->tx);
22274
22275 return 0;
22276 }
22277 diff -urNp linux-3.0.3/drivers/atm/horizon.c linux-3.0.3/drivers/atm/horizon.c
22278 --- linux-3.0.3/drivers/atm/horizon.c 2011-07-21 22:17:23.000000000 -0400
22279 +++ linux-3.0.3/drivers/atm/horizon.c 2011-08-23 21:47:55.000000000 -0400
22280 @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev,
22281 {
22282 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
22283 // VC layer stats
22284 - atomic_inc(&vcc->stats->rx);
22285 + atomic_inc_unchecked(&vcc->stats->rx);
22286 __net_timestamp(skb);
22287 // end of our responsibility
22288 vcc->push (vcc, skb);
22289 @@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
22290 dev->tx_iovec = NULL;
22291
22292 // VC layer stats
22293 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22294 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22295
22296 // free the skb
22297 hrz_kfree_skb (skb);
22298 diff -urNp linux-3.0.3/drivers/atm/idt77252.c linux-3.0.3/drivers/atm/idt77252.c
22299 --- linux-3.0.3/drivers/atm/idt77252.c 2011-07-21 22:17:23.000000000 -0400
22300 +++ linux-3.0.3/drivers/atm/idt77252.c 2011-08-23 21:47:55.000000000 -0400
22301 @@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str
22302 else
22303 dev_kfree_skb(skb);
22304
22305 - atomic_inc(&vcc->stats->tx);
22306 + atomic_inc_unchecked(&vcc->stats->tx);
22307 }
22308
22309 atomic_dec(&scq->used);
22310 @@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st
22311 if ((sb = dev_alloc_skb(64)) == NULL) {
22312 printk("%s: Can't allocate buffers for aal0.\n",
22313 card->name);
22314 - atomic_add(i, &vcc->stats->rx_drop);
22315 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
22316 break;
22317 }
22318 if (!atm_charge(vcc, sb->truesize)) {
22319 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
22320 card->name);
22321 - atomic_add(i - 1, &vcc->stats->rx_drop);
22322 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
22323 dev_kfree_skb(sb);
22324 break;
22325 }
22326 @@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st
22327 ATM_SKB(sb)->vcc = vcc;
22328 __net_timestamp(sb);
22329 vcc->push(vcc, sb);
22330 - atomic_inc(&vcc->stats->rx);
22331 + atomic_inc_unchecked(&vcc->stats->rx);
22332
22333 cell += ATM_CELL_PAYLOAD;
22334 }
22335 @@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st
22336 "(CDC: %08x)\n",
22337 card->name, len, rpp->len, readl(SAR_REG_CDC));
22338 recycle_rx_pool_skb(card, rpp);
22339 - atomic_inc(&vcc->stats->rx_err);
22340 + atomic_inc_unchecked(&vcc->stats->rx_err);
22341 return;
22342 }
22343 if (stat & SAR_RSQE_CRC) {
22344 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
22345 recycle_rx_pool_skb(card, rpp);
22346 - atomic_inc(&vcc->stats->rx_err);
22347 + atomic_inc_unchecked(&vcc->stats->rx_err);
22348 return;
22349 }
22350 if (skb_queue_len(&rpp->queue) > 1) {
22351 @@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st
22352 RXPRINTK("%s: Can't alloc RX skb.\n",
22353 card->name);
22354 recycle_rx_pool_skb(card, rpp);
22355 - atomic_inc(&vcc->stats->rx_err);
22356 + atomic_inc_unchecked(&vcc->stats->rx_err);
22357 return;
22358 }
22359 if (!atm_charge(vcc, skb->truesize)) {
22360 @@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st
22361 __net_timestamp(skb);
22362
22363 vcc->push(vcc, skb);
22364 - atomic_inc(&vcc->stats->rx);
22365 + atomic_inc_unchecked(&vcc->stats->rx);
22366
22367 return;
22368 }
22369 @@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st
22370 __net_timestamp(skb);
22371
22372 vcc->push(vcc, skb);
22373 - atomic_inc(&vcc->stats->rx);
22374 + atomic_inc_unchecked(&vcc->stats->rx);
22375
22376 if (skb->truesize > SAR_FB_SIZE_3)
22377 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
22378 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
22379 if (vcc->qos.aal != ATM_AAL0) {
22380 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
22381 card->name, vpi, vci);
22382 - atomic_inc(&vcc->stats->rx_drop);
22383 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22384 goto drop;
22385 }
22386
22387 if ((sb = dev_alloc_skb(64)) == NULL) {
22388 printk("%s: Can't allocate buffers for AAL0.\n",
22389 card->name);
22390 - atomic_inc(&vcc->stats->rx_err);
22391 + atomic_inc_unchecked(&vcc->stats->rx_err);
22392 goto drop;
22393 }
22394
22395 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
22396 ATM_SKB(sb)->vcc = vcc;
22397 __net_timestamp(sb);
22398 vcc->push(vcc, sb);
22399 - atomic_inc(&vcc->stats->rx);
22400 + atomic_inc_unchecked(&vcc->stats->rx);
22401
22402 drop:
22403 skb_pull(queue, 64);
22404 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22405
22406 if (vc == NULL) {
22407 printk("%s: NULL connection in send().\n", card->name);
22408 - atomic_inc(&vcc->stats->tx_err);
22409 + atomic_inc_unchecked(&vcc->stats->tx_err);
22410 dev_kfree_skb(skb);
22411 return -EINVAL;
22412 }
22413 if (!test_bit(VCF_TX, &vc->flags)) {
22414 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
22415 - atomic_inc(&vcc->stats->tx_err);
22416 + atomic_inc_unchecked(&vcc->stats->tx_err);
22417 dev_kfree_skb(skb);
22418 return -EINVAL;
22419 }
22420 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22421 break;
22422 default:
22423 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
22424 - atomic_inc(&vcc->stats->tx_err);
22425 + atomic_inc_unchecked(&vcc->stats->tx_err);
22426 dev_kfree_skb(skb);
22427 return -EINVAL;
22428 }
22429
22430 if (skb_shinfo(skb)->nr_frags != 0) {
22431 printk("%s: No scatter-gather yet.\n", card->name);
22432 - atomic_inc(&vcc->stats->tx_err);
22433 + atomic_inc_unchecked(&vcc->stats->tx_err);
22434 dev_kfree_skb(skb);
22435 return -EINVAL;
22436 }
22437 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22438
22439 err = queue_skb(card, vc, skb, oam);
22440 if (err) {
22441 - atomic_inc(&vcc->stats->tx_err);
22442 + atomic_inc_unchecked(&vcc->stats->tx_err);
22443 dev_kfree_skb(skb);
22444 return err;
22445 }
22446 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
22447 skb = dev_alloc_skb(64);
22448 if (!skb) {
22449 printk("%s: Out of memory in send_oam().\n", card->name);
22450 - atomic_inc(&vcc->stats->tx_err);
22451 + atomic_inc_unchecked(&vcc->stats->tx_err);
22452 return -ENOMEM;
22453 }
22454 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
22455 diff -urNp linux-3.0.3/drivers/atm/iphase.c linux-3.0.3/drivers/atm/iphase.c
22456 --- linux-3.0.3/drivers/atm/iphase.c 2011-07-21 22:17:23.000000000 -0400
22457 +++ linux-3.0.3/drivers/atm/iphase.c 2011-08-23 21:47:55.000000000 -0400
22458 @@ -1120,7 +1120,7 @@ static int rx_pkt(struct atm_dev *dev)
22459 status = (u_short) (buf_desc_ptr->desc_mode);
22460 if (status & (RX_CER | RX_PTE | RX_OFL))
22461 {
22462 - atomic_inc(&vcc->stats->rx_err);
22463 + atomic_inc_unchecked(&vcc->stats->rx_err);
22464 IF_ERR(printk("IA: bad packet, dropping it");)
22465 if (status & RX_CER) {
22466 IF_ERR(printk(" cause: packet CRC error\n");)
22467 @@ -1143,7 +1143,7 @@ static int rx_pkt(struct atm_dev *dev)
22468 len = dma_addr - buf_addr;
22469 if (len > iadev->rx_buf_sz) {
22470 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
22471 - atomic_inc(&vcc->stats->rx_err);
22472 + atomic_inc_unchecked(&vcc->stats->rx_err);
22473 goto out_free_desc;
22474 }
22475
22476 @@ -1293,7 +1293,7 @@ static void rx_dle_intr(struct atm_dev *
22477 ia_vcc = INPH_IA_VCC(vcc);
22478 if (ia_vcc == NULL)
22479 {
22480 - atomic_inc(&vcc->stats->rx_err);
22481 + atomic_inc_unchecked(&vcc->stats->rx_err);
22482 dev_kfree_skb_any(skb);
22483 atm_return(vcc, atm_guess_pdu2truesize(len));
22484 goto INCR_DLE;
22485 @@ -1305,7 +1305,7 @@ static void rx_dle_intr(struct atm_dev *
22486 if ((length > iadev->rx_buf_sz) || (length >
22487 (skb->len - sizeof(struct cpcs_trailer))))
22488 {
22489 - atomic_inc(&vcc->stats->rx_err);
22490 + atomic_inc_unchecked(&vcc->stats->rx_err);
22491 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
22492 length, skb->len);)
22493 dev_kfree_skb_any(skb);
22494 @@ -1321,7 +1321,7 @@ static void rx_dle_intr(struct atm_dev *
22495
22496 IF_RX(printk("rx_dle_intr: skb push");)
22497 vcc->push(vcc,skb);
22498 - atomic_inc(&vcc->stats->rx);
22499 + atomic_inc_unchecked(&vcc->stats->rx);
22500 iadev->rx_pkt_cnt++;
22501 }
22502 INCR_DLE:
22503 @@ -2801,15 +2801,15 @@ static int ia_ioctl(struct atm_dev *dev,
22504 {
22505 struct k_sonet_stats *stats;
22506 stats = &PRIV(_ia_dev[board])->sonet_stats;
22507 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
22508 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
22509 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
22510 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
22511 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
22512 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
22513 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
22514 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
22515 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
22516 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
22517 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
22518 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
22519 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
22520 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
22521 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
22522 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
22523 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
22524 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
22525 }
22526 ia_cmds.status = 0;
22527 break;
22528 @@ -2914,7 +2914,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
22529 if ((desc == 0) || (desc > iadev->num_tx_desc))
22530 {
22531 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
22532 - atomic_inc(&vcc->stats->tx);
22533 + atomic_inc_unchecked(&vcc->stats->tx);
22534 if (vcc->pop)
22535 vcc->pop(vcc, skb);
22536 else
22537 @@ -3019,14 +3019,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
22538 ATM_DESC(skb) = vcc->vci;
22539 skb_queue_tail(&iadev->tx_dma_q, skb);
22540
22541 - atomic_inc(&vcc->stats->tx);
22542 + atomic_inc_unchecked(&vcc->stats->tx);
22543 iadev->tx_pkt_cnt++;
22544 /* Increment transaction counter */
22545 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
22546
22547 #if 0
22548 /* add flow control logic */
22549 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
22550 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
22551 if (iavcc->vc_desc_cnt > 10) {
22552 vcc->tx_quota = vcc->tx_quota * 3 / 4;
22553 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
22554 diff -urNp linux-3.0.3/drivers/atm/lanai.c linux-3.0.3/drivers/atm/lanai.c
22555 --- linux-3.0.3/drivers/atm/lanai.c 2011-07-21 22:17:23.000000000 -0400
22556 +++ linux-3.0.3/drivers/atm/lanai.c 2011-08-23 21:47:55.000000000 -0400
22557 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
22558 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
22559 lanai_endtx(lanai, lvcc);
22560 lanai_free_skb(lvcc->tx.atmvcc, skb);
22561 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
22562 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
22563 }
22564
22565 /* Try to fill the buffer - don't call unless there is backlog */
22566 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
22567 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
22568 __net_timestamp(skb);
22569 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
22570 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
22571 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
22572 out:
22573 lvcc->rx.buf.ptr = end;
22574 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
22575 @@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
22576 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
22577 "vcc %d\n", lanai->number, (unsigned int) s, vci);
22578 lanai->stats.service_rxnotaal5++;
22579 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22580 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22581 return 0;
22582 }
22583 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
22584 @@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
22585 int bytes;
22586 read_unlock(&vcc_sklist_lock);
22587 DPRINTK("got trashed rx pdu on vci %d\n", vci);
22588 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22589 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22590 lvcc->stats.x.aal5.service_trash++;
22591 bytes = (SERVICE_GET_END(s) * 16) -
22592 (((unsigned long) lvcc->rx.buf.ptr) -
22593 @@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
22594 }
22595 if (s & SERVICE_STREAM) {
22596 read_unlock(&vcc_sklist_lock);
22597 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22598 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22599 lvcc->stats.x.aal5.service_stream++;
22600 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
22601 "PDU on VCI %d!\n", lanai->number, vci);
22602 @@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
22603 return 0;
22604 }
22605 DPRINTK("got rx crc error on vci %d\n", vci);
22606 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22607 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22608 lvcc->stats.x.aal5.service_rxcrc++;
22609 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
22610 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
22611 diff -urNp linux-3.0.3/drivers/atm/nicstar.c linux-3.0.3/drivers/atm/nicstar.c
22612 --- linux-3.0.3/drivers/atm/nicstar.c 2011-07-21 22:17:23.000000000 -0400
22613 +++ linux-3.0.3/drivers/atm/nicstar.c 2011-08-23 21:47:55.000000000 -0400
22614 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
22615 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
22616 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
22617 card->index);
22618 - atomic_inc(&vcc->stats->tx_err);
22619 + atomic_inc_unchecked(&vcc->stats->tx_err);
22620 dev_kfree_skb_any(skb);
22621 return -EINVAL;
22622 }
22623 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
22624 if (!vc->tx) {
22625 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
22626 card->index);
22627 - atomic_inc(&vcc->stats->tx_err);
22628 + atomic_inc_unchecked(&vcc->stats->tx_err);
22629 dev_kfree_skb_any(skb);
22630 return -EINVAL;
22631 }
22632 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
22633 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
22634 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
22635 card->index);
22636 - atomic_inc(&vcc->stats->tx_err);
22637 + atomic_inc_unchecked(&vcc->stats->tx_err);
22638 dev_kfree_skb_any(skb);
22639 return -EINVAL;
22640 }
22641
22642 if (skb_shinfo(skb)->nr_frags != 0) {
22643 printk("nicstar%d: No scatter-gather yet.\n", card->index);
22644 - atomic_inc(&vcc->stats->tx_err);
22645 + atomic_inc_unchecked(&vcc->stats->tx_err);
22646 dev_kfree_skb_any(skb);
22647 return -EINVAL;
22648 }
22649 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
22650 }
22651
22652 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
22653 - atomic_inc(&vcc->stats->tx_err);
22654 + atomic_inc_unchecked(&vcc->stats->tx_err);
22655 dev_kfree_skb_any(skb);
22656 return -EIO;
22657 }
22658 - atomic_inc(&vcc->stats->tx);
22659 + atomic_inc_unchecked(&vcc->stats->tx);
22660
22661 return 0;
22662 }
22663 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
22664 printk
22665 ("nicstar%d: Can't allocate buffers for aal0.\n",
22666 card->index);
22667 - atomic_add(i, &vcc->stats->rx_drop);
22668 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
22669 break;
22670 }
22671 if (!atm_charge(vcc, sb->truesize)) {
22672 RXPRINTK
22673 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
22674 card->index);
22675 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22676 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22677 dev_kfree_skb_any(sb);
22678 break;
22679 }
22680 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
22681 ATM_SKB(sb)->vcc = vcc;
22682 __net_timestamp(sb);
22683 vcc->push(vcc, sb);
22684 - atomic_inc(&vcc->stats->rx);
22685 + atomic_inc_unchecked(&vcc->stats->rx);
22686 cell += ATM_CELL_PAYLOAD;
22687 }
22688
22689 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
22690 if (iovb == NULL) {
22691 printk("nicstar%d: Out of iovec buffers.\n",
22692 card->index);
22693 - atomic_inc(&vcc->stats->rx_drop);
22694 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22695 recycle_rx_buf(card, skb);
22696 return;
22697 }
22698 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
22699 small or large buffer itself. */
22700 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
22701 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
22702 - atomic_inc(&vcc->stats->rx_err);
22703 + atomic_inc_unchecked(&vcc->stats->rx_err);
22704 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22705 NS_MAX_IOVECS);
22706 NS_PRV_IOVCNT(iovb) = 0;
22707 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
22708 ("nicstar%d: Expected a small buffer, and this is not one.\n",
22709 card->index);
22710 which_list(card, skb);
22711 - atomic_inc(&vcc->stats->rx_err);
22712 + atomic_inc_unchecked(&vcc->stats->rx_err);
22713 recycle_rx_buf(card, skb);
22714 vc->rx_iov = NULL;
22715 recycle_iov_buf(card, iovb);
22716 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
22717 ("nicstar%d: Expected a large buffer, and this is not one.\n",
22718 card->index);
22719 which_list(card, skb);
22720 - atomic_inc(&vcc->stats->rx_err);
22721 + atomic_inc_unchecked(&vcc->stats->rx_err);
22722 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22723 NS_PRV_IOVCNT(iovb));
22724 vc->rx_iov = NULL;
22725 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
22726 printk(" - PDU size mismatch.\n");
22727 else
22728 printk(".\n");
22729 - atomic_inc(&vcc->stats->rx_err);
22730 + atomic_inc_unchecked(&vcc->stats->rx_err);
22731 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22732 NS_PRV_IOVCNT(iovb));
22733 vc->rx_iov = NULL;
22734 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
22735 /* skb points to a small buffer */
22736 if (!atm_charge(vcc, skb->truesize)) {
22737 push_rxbufs(card, skb);
22738 - atomic_inc(&vcc->stats->rx_drop);
22739 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22740 } else {
22741 skb_put(skb, len);
22742 dequeue_sm_buf(card, skb);
22743 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
22744 ATM_SKB(skb)->vcc = vcc;
22745 __net_timestamp(skb);
22746 vcc->push(vcc, skb);
22747 - atomic_inc(&vcc->stats->rx);
22748 + atomic_inc_unchecked(&vcc->stats->rx);
22749 }
22750 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
22751 struct sk_buff *sb;
22752 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
22753 if (len <= NS_SMBUFSIZE) {
22754 if (!atm_charge(vcc, sb->truesize)) {
22755 push_rxbufs(card, sb);
22756 - atomic_inc(&vcc->stats->rx_drop);
22757 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22758 } else {
22759 skb_put(sb, len);
22760 dequeue_sm_buf(card, sb);
22761 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
22762 ATM_SKB(sb)->vcc = vcc;
22763 __net_timestamp(sb);
22764 vcc->push(vcc, sb);
22765 - atomic_inc(&vcc->stats->rx);
22766 + atomic_inc_unchecked(&vcc->stats->rx);
22767 }
22768
22769 push_rxbufs(card, skb);
22770 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
22771
22772 if (!atm_charge(vcc, skb->truesize)) {
22773 push_rxbufs(card, skb);
22774 - atomic_inc(&vcc->stats->rx_drop);
22775 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22776 } else {
22777 dequeue_lg_buf(card, skb);
22778 #ifdef NS_USE_DESTRUCTORS
22779 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
22780 ATM_SKB(skb)->vcc = vcc;
22781 __net_timestamp(skb);
22782 vcc->push(vcc, skb);
22783 - atomic_inc(&vcc->stats->rx);
22784 + atomic_inc_unchecked(&vcc->stats->rx);
22785 }
22786
22787 push_rxbufs(card, sb);
22788 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
22789 printk
22790 ("nicstar%d: Out of huge buffers.\n",
22791 card->index);
22792 - atomic_inc(&vcc->stats->rx_drop);
22793 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22794 recycle_iovec_rx_bufs(card,
22795 (struct iovec *)
22796 iovb->data,
22797 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
22798 card->hbpool.count++;
22799 } else
22800 dev_kfree_skb_any(hb);
22801 - atomic_inc(&vcc->stats->rx_drop);
22802 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22803 } else {
22804 /* Copy the small buffer to the huge buffer */
22805 sb = (struct sk_buff *)iov->iov_base;
22806 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
22807 #endif /* NS_USE_DESTRUCTORS */
22808 __net_timestamp(hb);
22809 vcc->push(vcc, hb);
22810 - atomic_inc(&vcc->stats->rx);
22811 + atomic_inc_unchecked(&vcc->stats->rx);
22812 }
22813 }
22814
22815 diff -urNp linux-3.0.3/drivers/atm/solos-pci.c linux-3.0.3/drivers/atm/solos-pci.c
22816 --- linux-3.0.3/drivers/atm/solos-pci.c 2011-07-21 22:17:23.000000000 -0400
22817 +++ linux-3.0.3/drivers/atm/solos-pci.c 2011-08-23 21:48:14.000000000 -0400
22818 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
22819 }
22820 atm_charge(vcc, skb->truesize);
22821 vcc->push(vcc, skb);
22822 - atomic_inc(&vcc->stats->rx);
22823 + atomic_inc_unchecked(&vcc->stats->rx);
22824 break;
22825
22826 case PKT_STATUS:
22827 @@ -899,6 +899,8 @@ static int print_buffer(struct sk_buff *
22828 char msg[500];
22829 char item[10];
22830
22831 + pax_track_stack();
22832 +
22833 len = buf->len;
22834 for (i = 0; i < len; i++){
22835 if(i % 8 == 0)
22836 @@ -1008,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_car
22837 vcc = SKB_CB(oldskb)->vcc;
22838
22839 if (vcc) {
22840 - atomic_inc(&vcc->stats->tx);
22841 + atomic_inc_unchecked(&vcc->stats->tx);
22842 solos_pop(vcc, oldskb);
22843 } else
22844 dev_kfree_skb_irq(oldskb);
22845 diff -urNp linux-3.0.3/drivers/atm/suni.c linux-3.0.3/drivers/atm/suni.c
22846 --- linux-3.0.3/drivers/atm/suni.c 2011-07-21 22:17:23.000000000 -0400
22847 +++ linux-3.0.3/drivers/atm/suni.c 2011-08-23 21:47:55.000000000 -0400
22848 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
22849
22850
22851 #define ADD_LIMITED(s,v) \
22852 - atomic_add((v),&stats->s); \
22853 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
22854 + atomic_add_unchecked((v),&stats->s); \
22855 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
22856
22857
22858 static void suni_hz(unsigned long from_timer)
22859 diff -urNp linux-3.0.3/drivers/atm/uPD98402.c linux-3.0.3/drivers/atm/uPD98402.c
22860 --- linux-3.0.3/drivers/atm/uPD98402.c 2011-07-21 22:17:23.000000000 -0400
22861 +++ linux-3.0.3/drivers/atm/uPD98402.c 2011-08-23 21:47:55.000000000 -0400
22862 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
22863 struct sonet_stats tmp;
22864 int error = 0;
22865
22866 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
22867 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
22868 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
22869 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
22870 if (zero && !error) {
22871 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
22872
22873
22874 #define ADD_LIMITED(s,v) \
22875 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
22876 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
22877 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
22878 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
22879 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
22880 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
22881
22882
22883 static void stat_event(struct atm_dev *dev)
22884 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
22885 if (reason & uPD98402_INT_PFM) stat_event(dev);
22886 if (reason & uPD98402_INT_PCO) {
22887 (void) GET(PCOCR); /* clear interrupt cause */
22888 - atomic_add(GET(HECCT),
22889 + atomic_add_unchecked(GET(HECCT),
22890 &PRIV(dev)->sonet_stats.uncorr_hcs);
22891 }
22892 if ((reason & uPD98402_INT_RFO) &&
22893 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
22894 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
22895 uPD98402_INT_LOS),PIMR); /* enable them */
22896 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
22897 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
22898 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
22899 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
22900 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
22901 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
22902 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
22903 return 0;
22904 }
22905
22906 diff -urNp linux-3.0.3/drivers/atm/zatm.c linux-3.0.3/drivers/atm/zatm.c
22907 --- linux-3.0.3/drivers/atm/zatm.c 2011-07-21 22:17:23.000000000 -0400
22908 +++ linux-3.0.3/drivers/atm/zatm.c 2011-08-23 21:47:55.000000000 -0400
22909 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
22910 }
22911 if (!size) {
22912 dev_kfree_skb_irq(skb);
22913 - if (vcc) atomic_inc(&vcc->stats->rx_err);
22914 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
22915 continue;
22916 }
22917 if (!atm_charge(vcc,skb->truesize)) {
22918 @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
22919 skb->len = size;
22920 ATM_SKB(skb)->vcc = vcc;
22921 vcc->push(vcc,skb);
22922 - atomic_inc(&vcc->stats->rx);
22923 + atomic_inc_unchecked(&vcc->stats->rx);
22924 }
22925 zout(pos & 0xffff,MTA(mbx));
22926 #if 0 /* probably a stupid idea */
22927 @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
22928 skb_queue_head(&zatm_vcc->backlog,skb);
22929 break;
22930 }
22931 - atomic_inc(&vcc->stats->tx);
22932 + atomic_inc_unchecked(&vcc->stats->tx);
22933 wake_up(&zatm_vcc->tx_wait);
22934 }
22935
22936 diff -urNp linux-3.0.3/drivers/base/power/wakeup.c linux-3.0.3/drivers/base/power/wakeup.c
22937 --- linux-3.0.3/drivers/base/power/wakeup.c 2011-07-21 22:17:23.000000000 -0400
22938 +++ linux-3.0.3/drivers/base/power/wakeup.c 2011-08-23 21:47:55.000000000 -0400
22939 @@ -29,14 +29,14 @@ bool events_check_enabled;
22940 * They need to be modified together atomically, so it's better to use one
22941 * atomic variable to hold them both.
22942 */
22943 -static atomic_t combined_event_count = ATOMIC_INIT(0);
22944 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
22945
22946 #define IN_PROGRESS_BITS (sizeof(int) * 4)
22947 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
22948
22949 static void split_counters(unsigned int *cnt, unsigned int *inpr)
22950 {
22951 - unsigned int comb = atomic_read(&combined_event_count);
22952 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
22953
22954 *cnt = (comb >> IN_PROGRESS_BITS);
22955 *inpr = comb & MAX_IN_PROGRESS;
22956 @@ -350,7 +350,7 @@ static void wakeup_source_activate(struc
22957 ws->last_time = ktime_get();
22958
22959 /* Increment the counter of events in progress. */
22960 - atomic_inc(&combined_event_count);
22961 + atomic_inc_unchecked(&combined_event_count);
22962 }
22963
22964 /**
22965 @@ -440,7 +440,7 @@ static void wakeup_source_deactivate(str
22966 * Increment the counter of registered wakeup events and decrement the
22967 * couter of wakeup events in progress simultaneously.
22968 */
22969 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
22970 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
22971 }
22972
22973 /**
22974 diff -urNp linux-3.0.3/drivers/block/cciss.c linux-3.0.3/drivers/block/cciss.c
22975 --- linux-3.0.3/drivers/block/cciss.c 2011-07-21 22:17:23.000000000 -0400
22976 +++ linux-3.0.3/drivers/block/cciss.c 2011-08-23 21:48:14.000000000 -0400
22977 @@ -1179,6 +1179,8 @@ static int cciss_ioctl32_passthru(struct
22978 int err;
22979 u32 cp;
22980
22981 + memset(&arg64, 0, sizeof(arg64));
22982 +
22983 err = 0;
22984 err |=
22985 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
22986 @@ -2986,7 +2988,7 @@ static void start_io(ctlr_info_t *h)
22987 while (!list_empty(&h->reqQ)) {
22988 c = list_entry(h->reqQ.next, CommandList_struct, list);
22989 /* can't do anything if fifo is full */
22990 - if ((h->access.fifo_full(h))) {
22991 + if ((h->access->fifo_full(h))) {
22992 dev_warn(&h->pdev->dev, "fifo full\n");
22993 break;
22994 }
22995 @@ -2996,7 +2998,7 @@ static void start_io(ctlr_info_t *h)
22996 h->Qdepth--;
22997
22998 /* Tell the controller execute command */
22999 - h->access.submit_command(h, c);
23000 + h->access->submit_command(h, c);
23001
23002 /* Put job onto the completed Q */
23003 addQ(&h->cmpQ, c);
23004 @@ -3422,17 +3424,17 @@ startio:
23005
23006 static inline unsigned long get_next_completion(ctlr_info_t *h)
23007 {
23008 - return h->access.command_completed(h);
23009 + return h->access->command_completed(h);
23010 }
23011
23012 static inline int interrupt_pending(ctlr_info_t *h)
23013 {
23014 - return h->access.intr_pending(h);
23015 + return h->access->intr_pending(h);
23016 }
23017
23018 static inline long interrupt_not_for_us(ctlr_info_t *h)
23019 {
23020 - return ((h->access.intr_pending(h) == 0) ||
23021 + return ((h->access->intr_pending(h) == 0) ||
23022 (h->interrupts_enabled == 0));
23023 }
23024
23025 @@ -3465,7 +3467,7 @@ static inline u32 next_command(ctlr_info
23026 u32 a;
23027
23028 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
23029 - return h->access.command_completed(h);
23030 + return h->access->command_completed(h);
23031
23032 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
23033 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
23034 @@ -4020,7 +4022,7 @@ static void __devinit cciss_put_controll
23035 trans_support & CFGTBL_Trans_use_short_tags);
23036
23037 /* Change the access methods to the performant access methods */
23038 - h->access = SA5_performant_access;
23039 + h->access = &SA5_performant_access;
23040 h->transMethod = CFGTBL_Trans_Performant;
23041
23042 return;
23043 @@ -4292,7 +4294,7 @@ static int __devinit cciss_pci_init(ctlr
23044 if (prod_index < 0)
23045 return -ENODEV;
23046 h->product_name = products[prod_index].product_name;
23047 - h->access = *(products[prod_index].access);
23048 + h->access = products[prod_index].access;
23049
23050 if (cciss_board_disabled(h)) {
23051 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
23052 @@ -5002,7 +5004,7 @@ reinit_after_soft_reset:
23053 }
23054
23055 /* make sure the board interrupts are off */
23056 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23057 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23058 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
23059 if (rc)
23060 goto clean2;
23061 @@ -5054,7 +5056,7 @@ reinit_after_soft_reset:
23062 * fake ones to scoop up any residual completions.
23063 */
23064 spin_lock_irqsave(&h->lock, flags);
23065 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23066 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23067 spin_unlock_irqrestore(&h->lock, flags);
23068 free_irq(h->intr[PERF_MODE_INT], h);
23069 rc = cciss_request_irq(h, cciss_msix_discard_completions,
23070 @@ -5074,9 +5076,9 @@ reinit_after_soft_reset:
23071 dev_info(&h->pdev->dev, "Board READY.\n");
23072 dev_info(&h->pdev->dev,
23073 "Waiting for stale completions to drain.\n");
23074 - h->access.set_intr_mask(h, CCISS_INTR_ON);
23075 + h->access->set_intr_mask(h, CCISS_INTR_ON);
23076 msleep(10000);
23077 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23078 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23079
23080 rc = controller_reset_failed(h->cfgtable);
23081 if (rc)
23082 @@ -5099,7 +5101,7 @@ reinit_after_soft_reset:
23083 cciss_scsi_setup(h);
23084
23085 /* Turn the interrupts on so we can service requests */
23086 - h->access.set_intr_mask(h, CCISS_INTR_ON);
23087 + h->access->set_intr_mask(h, CCISS_INTR_ON);
23088
23089 /* Get the firmware version */
23090 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
23091 @@ -5171,7 +5173,7 @@ static void cciss_shutdown(struct pci_de
23092 kfree(flush_buf);
23093 if (return_code != IO_OK)
23094 dev_warn(&h->pdev->dev, "Error flushing cache\n");
23095 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23096 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23097 free_irq(h->intr[PERF_MODE_INT], h);
23098 }
23099
23100 diff -urNp linux-3.0.3/drivers/block/cciss.h linux-3.0.3/drivers/block/cciss.h
23101 --- linux-3.0.3/drivers/block/cciss.h 2011-08-23 21:44:40.000000000 -0400
23102 +++ linux-3.0.3/drivers/block/cciss.h 2011-08-23 21:47:55.000000000 -0400
23103 @@ -100,7 +100,7 @@ struct ctlr_info
23104 /* information about each logical volume */
23105 drive_info_struct *drv[CISS_MAX_LUN];
23106
23107 - struct access_method access;
23108 + struct access_method *access;
23109
23110 /* queue and queue Info */
23111 struct list_head reqQ;
23112 diff -urNp linux-3.0.3/drivers/block/cpqarray.c linux-3.0.3/drivers/block/cpqarray.c
23113 --- linux-3.0.3/drivers/block/cpqarray.c 2011-07-21 22:17:23.000000000 -0400
23114 +++ linux-3.0.3/drivers/block/cpqarray.c 2011-08-23 21:48:14.000000000 -0400
23115 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
23116 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
23117 goto Enomem4;
23118 }
23119 - hba[i]->access.set_intr_mask(hba[i], 0);
23120 + hba[i]->access->set_intr_mask(hba[i], 0);
23121 if (request_irq(hba[i]->intr, do_ida_intr,
23122 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
23123 {
23124 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
23125 add_timer(&hba[i]->timer);
23126
23127 /* Enable IRQ now that spinlock and rate limit timer are set up */
23128 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23129 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23130
23131 for(j=0; j<NWD; j++) {
23132 struct gendisk *disk = ida_gendisk[i][j];
23133 @@ -694,7 +694,7 @@ DBGINFO(
23134 for(i=0; i<NR_PRODUCTS; i++) {
23135 if (board_id == products[i].board_id) {
23136 c->product_name = products[i].product_name;
23137 - c->access = *(products[i].access);
23138 + c->access = products[i].access;
23139 break;
23140 }
23141 }
23142 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
23143 hba[ctlr]->intr = intr;
23144 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
23145 hba[ctlr]->product_name = products[j].product_name;
23146 - hba[ctlr]->access = *(products[j].access);
23147 + hba[ctlr]->access = products[j].access;
23148 hba[ctlr]->ctlr = ctlr;
23149 hba[ctlr]->board_id = board_id;
23150 hba[ctlr]->pci_dev = NULL; /* not PCI */
23151 @@ -911,6 +911,8 @@ static void do_ida_request(struct reques
23152 struct scatterlist tmp_sg[SG_MAX];
23153 int i, dir, seg;
23154
23155 + pax_track_stack();
23156 +
23157 queue_next:
23158 creq = blk_peek_request(q);
23159 if (!creq)
23160 @@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
23161
23162 while((c = h->reqQ) != NULL) {
23163 /* Can't do anything if we're busy */
23164 - if (h->access.fifo_full(h) == 0)
23165 + if (h->access->fifo_full(h) == 0)
23166 return;
23167
23168 /* Get the first entry from the request Q */
23169 @@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
23170 h->Qdepth--;
23171
23172 /* Tell the controller to do our bidding */
23173 - h->access.submit_command(h, c);
23174 + h->access->submit_command(h, c);
23175
23176 /* Get onto the completion Q */
23177 addQ(&h->cmpQ, c);
23178 @@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq,
23179 unsigned long flags;
23180 __u32 a,a1;
23181
23182 - istat = h->access.intr_pending(h);
23183 + istat = h->access->intr_pending(h);
23184 /* Is this interrupt for us? */
23185 if (istat == 0)
23186 return IRQ_NONE;
23187 @@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq,
23188 */
23189 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
23190 if (istat & FIFO_NOT_EMPTY) {
23191 - while((a = h->access.command_completed(h))) {
23192 + while((a = h->access->command_completed(h))) {
23193 a1 = a; a &= ~3;
23194 if ((c = h->cmpQ) == NULL)
23195 {
23196 @@ -1449,11 +1451,11 @@ static int sendcmd(
23197 /*
23198 * Disable interrupt
23199 */
23200 - info_p->access.set_intr_mask(info_p, 0);
23201 + info_p->access->set_intr_mask(info_p, 0);
23202 /* Make sure there is room in the command FIFO */
23203 /* Actually it should be completely empty at this time. */
23204 for (i = 200000; i > 0; i--) {
23205 - temp = info_p->access.fifo_full(info_p);
23206 + temp = info_p->access->fifo_full(info_p);
23207 if (temp != 0) {
23208 break;
23209 }
23210 @@ -1466,7 +1468,7 @@ DBG(
23211 /*
23212 * Send the cmd
23213 */
23214 - info_p->access.submit_command(info_p, c);
23215 + info_p->access->submit_command(info_p, c);
23216 complete = pollcomplete(ctlr);
23217
23218 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
23219 @@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
23220 * we check the new geometry. Then turn interrupts back on when
23221 * we're done.
23222 */
23223 - host->access.set_intr_mask(host, 0);
23224 + host->access->set_intr_mask(host, 0);
23225 getgeometry(ctlr);
23226 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
23227 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
23228
23229 for(i=0; i<NWD; i++) {
23230 struct gendisk *disk = ida_gendisk[ctlr][i];
23231 @@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
23232 /* Wait (up to 2 seconds) for a command to complete */
23233
23234 for (i = 200000; i > 0; i--) {
23235 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
23236 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
23237 if (done == 0) {
23238 udelay(10); /* a short fixed delay */
23239 } else
23240 diff -urNp linux-3.0.3/drivers/block/cpqarray.h linux-3.0.3/drivers/block/cpqarray.h
23241 --- linux-3.0.3/drivers/block/cpqarray.h 2011-07-21 22:17:23.000000000 -0400
23242 +++ linux-3.0.3/drivers/block/cpqarray.h 2011-08-23 21:47:55.000000000 -0400
23243 @@ -99,7 +99,7 @@ struct ctlr_info {
23244 drv_info_t drv[NWD];
23245 struct proc_dir_entry *proc;
23246
23247 - struct access_method access;
23248 + struct access_method *access;
23249
23250 cmdlist_t *reqQ;
23251 cmdlist_t *cmpQ;
23252 diff -urNp linux-3.0.3/drivers/block/DAC960.c linux-3.0.3/drivers/block/DAC960.c
23253 --- linux-3.0.3/drivers/block/DAC960.c 2011-07-21 22:17:23.000000000 -0400
23254 +++ linux-3.0.3/drivers/block/DAC960.c 2011-08-23 21:48:14.000000000 -0400
23255 @@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
23256 unsigned long flags;
23257 int Channel, TargetID;
23258
23259 + pax_track_stack();
23260 +
23261 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
23262 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
23263 sizeof(DAC960_SCSI_Inquiry_T) +
23264 diff -urNp linux-3.0.3/drivers/block/drbd/drbd_int.h linux-3.0.3/drivers/block/drbd/drbd_int.h
23265 --- linux-3.0.3/drivers/block/drbd/drbd_int.h 2011-07-21 22:17:23.000000000 -0400
23266 +++ linux-3.0.3/drivers/block/drbd/drbd_int.h 2011-08-23 21:47:55.000000000 -0400
23267 @@ -737,7 +737,7 @@ struct drbd_request;
23268 struct drbd_epoch {
23269 struct list_head list;
23270 unsigned int barrier_nr;
23271 - atomic_t epoch_size; /* increased on every request added. */
23272 + atomic_unchecked_t epoch_size; /* increased on every request added. */
23273 atomic_t active; /* increased on every req. added, and dec on every finished. */
23274 unsigned long flags;
23275 };
23276 @@ -1109,7 +1109,7 @@ struct drbd_conf {
23277 void *int_dig_in;
23278 void *int_dig_vv;
23279 wait_queue_head_t seq_wait;
23280 - atomic_t packet_seq;
23281 + atomic_unchecked_t packet_seq;
23282 unsigned int peer_seq;
23283 spinlock_t peer_seq_lock;
23284 unsigned int minor;
23285 diff -urNp linux-3.0.3/drivers/block/drbd/drbd_main.c linux-3.0.3/drivers/block/drbd/drbd_main.c
23286 --- linux-3.0.3/drivers/block/drbd/drbd_main.c 2011-07-21 22:17:23.000000000 -0400
23287 +++ linux-3.0.3/drivers/block/drbd/drbd_main.c 2011-08-23 21:47:55.000000000 -0400
23288 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_co
23289 p.sector = sector;
23290 p.block_id = block_id;
23291 p.blksize = blksize;
23292 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
23293 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
23294
23295 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
23296 return false;
23297 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *m
23298 p.sector = cpu_to_be64(req->sector);
23299 p.block_id = (unsigned long)req;
23300 p.seq_num = cpu_to_be32(req->seq_num =
23301 - atomic_add_return(1, &mdev->packet_seq));
23302 + atomic_add_return_unchecked(1, &mdev->packet_seq));
23303
23304 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
23305
23306 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_
23307 atomic_set(&mdev->unacked_cnt, 0);
23308 atomic_set(&mdev->local_cnt, 0);
23309 atomic_set(&mdev->net_cnt, 0);
23310 - atomic_set(&mdev->packet_seq, 0);
23311 + atomic_set_unchecked(&mdev->packet_seq, 0);
23312 atomic_set(&mdev->pp_in_use, 0);
23313 atomic_set(&mdev->pp_in_use_by_net, 0);
23314 atomic_set(&mdev->rs_sect_in, 0);
23315 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf
23316 mdev->receiver.t_state);
23317
23318 /* no need to lock it, I'm the only thread alive */
23319 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
23320 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
23321 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
23322 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
23323 mdev->al_writ_cnt =
23324 mdev->bm_writ_cnt =
23325 mdev->read_cnt =
23326 diff -urNp linux-3.0.3/drivers/block/drbd/drbd_nl.c linux-3.0.3/drivers/block/drbd/drbd_nl.c
23327 --- linux-3.0.3/drivers/block/drbd/drbd_nl.c 2011-07-21 22:17:23.000000000 -0400
23328 +++ linux-3.0.3/drivers/block/drbd/drbd_nl.c 2011-08-23 21:47:55.000000000 -0400
23329 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(stru
23330 module_put(THIS_MODULE);
23331 }
23332
23333 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23334 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23335
23336 static unsigned short *
23337 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
23338 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *
23339 cn_reply->id.idx = CN_IDX_DRBD;
23340 cn_reply->id.val = CN_VAL_DRBD;
23341
23342 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23343 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23344 cn_reply->ack = 0; /* not used here. */
23345 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23346 (int)((char *)tl - (char *)reply->tag_list);
23347 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_co
23348 cn_reply->id.idx = CN_IDX_DRBD;
23349 cn_reply->id.val = CN_VAL_DRBD;
23350
23351 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23352 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23353 cn_reply->ack = 0; /* not used here. */
23354 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23355 (int)((char *)tl - (char *)reply->tag_list);
23356 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
23357 cn_reply->id.idx = CN_IDX_DRBD;
23358 cn_reply->id.val = CN_VAL_DRBD;
23359
23360 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
23361 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
23362 cn_reply->ack = 0; // not used here.
23363 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23364 (int)((char*)tl - (char*)reply->tag_list);
23365 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drb
23366 cn_reply->id.idx = CN_IDX_DRBD;
23367 cn_reply->id.val = CN_VAL_DRBD;
23368
23369 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23370 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23371 cn_reply->ack = 0; /* not used here. */
23372 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23373 (int)((char *)tl - (char *)reply->tag_list);
23374 diff -urNp linux-3.0.3/drivers/block/drbd/drbd_receiver.c linux-3.0.3/drivers/block/drbd/drbd_receiver.c
23375 --- linux-3.0.3/drivers/block/drbd/drbd_receiver.c 2011-07-21 22:17:23.000000000 -0400
23376 +++ linux-3.0.3/drivers/block/drbd/drbd_receiver.c 2011-08-23 21:47:55.000000000 -0400
23377 @@ -894,7 +894,7 @@ retry:
23378 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
23379 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
23380
23381 - atomic_set(&mdev->packet_seq, 0);
23382 + atomic_set_unchecked(&mdev->packet_seq, 0);
23383 mdev->peer_seq = 0;
23384
23385 drbd_thread_start(&mdev->asender);
23386 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish
23387 do {
23388 next_epoch = NULL;
23389
23390 - epoch_size = atomic_read(&epoch->epoch_size);
23391 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
23392
23393 switch (ev & ~EV_CLEANUP) {
23394 case EV_PUT:
23395 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish
23396 rv = FE_DESTROYED;
23397 } else {
23398 epoch->flags = 0;
23399 - atomic_set(&epoch->epoch_size, 0);
23400 + atomic_set_unchecked(&epoch->epoch_size, 0);
23401 /* atomic_set(&epoch->active, 0); is already zero */
23402 if (rv == FE_STILL_LIVE)
23403 rv = FE_RECYCLED;
23404 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_c
23405 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
23406 drbd_flush(mdev);
23407
23408 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
23409 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23410 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
23411 if (epoch)
23412 break;
23413 }
23414
23415 epoch = mdev->current_epoch;
23416 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
23417 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
23418
23419 D_ASSERT(atomic_read(&epoch->active) == 0);
23420 D_ASSERT(epoch->flags == 0);
23421 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_c
23422 }
23423
23424 epoch->flags = 0;
23425 - atomic_set(&epoch->epoch_size, 0);
23426 + atomic_set_unchecked(&epoch->epoch_size, 0);
23427 atomic_set(&epoch->active, 0);
23428
23429 spin_lock(&mdev->epoch_lock);
23430 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
23431 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23432 list_add(&epoch->list, &mdev->current_epoch->list);
23433 mdev->current_epoch = epoch;
23434 mdev->epochs++;
23435 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf
23436 spin_unlock(&mdev->peer_seq_lock);
23437
23438 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
23439 - atomic_inc(&mdev->current_epoch->epoch_size);
23440 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
23441 return drbd_drain_block(mdev, data_size);
23442 }
23443
23444 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf
23445
23446 spin_lock(&mdev->epoch_lock);
23447 e->epoch = mdev->current_epoch;
23448 - atomic_inc(&e->epoch->epoch_size);
23449 + atomic_inc_unchecked(&e->epoch->epoch_size);
23450 atomic_inc(&e->epoch->active);
23451 spin_unlock(&mdev->epoch_lock);
23452
23453 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_
23454 D_ASSERT(list_empty(&mdev->done_ee));
23455
23456 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
23457 - atomic_set(&mdev->current_epoch->epoch_size, 0);
23458 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
23459 D_ASSERT(list_empty(&mdev->current_epoch->list));
23460 }
23461
23462 diff -urNp linux-3.0.3/drivers/block/nbd.c linux-3.0.3/drivers/block/nbd.c
23463 --- linux-3.0.3/drivers/block/nbd.c 2011-07-21 22:17:23.000000000 -0400
23464 +++ linux-3.0.3/drivers/block/nbd.c 2011-08-23 21:48:14.000000000 -0400
23465 @@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
23466 struct kvec iov;
23467 sigset_t blocked, oldset;
23468
23469 + pax_track_stack();
23470 +
23471 if (unlikely(!sock)) {
23472 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
23473 lo->disk->disk_name, (send ? "send" : "recv"));
23474 @@ -572,6 +574,8 @@ static void do_nbd_request(struct reques
23475 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
23476 unsigned int cmd, unsigned long arg)
23477 {
23478 + pax_track_stack();
23479 +
23480 switch (cmd) {
23481 case NBD_DISCONNECT: {
23482 struct request sreq;
23483 diff -urNp linux-3.0.3/drivers/char/agp/frontend.c linux-3.0.3/drivers/char/agp/frontend.c
23484 --- linux-3.0.3/drivers/char/agp/frontend.c 2011-07-21 22:17:23.000000000 -0400
23485 +++ linux-3.0.3/drivers/char/agp/frontend.c 2011-08-23 21:47:55.000000000 -0400
23486 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
23487 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
23488 return -EFAULT;
23489
23490 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
23491 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
23492 return -EFAULT;
23493
23494 client = agp_find_client_by_pid(reserve.pid);
23495 diff -urNp linux-3.0.3/drivers/char/briq_panel.c linux-3.0.3/drivers/char/briq_panel.c
23496 --- linux-3.0.3/drivers/char/briq_panel.c 2011-07-21 22:17:23.000000000 -0400
23497 +++ linux-3.0.3/drivers/char/briq_panel.c 2011-08-23 21:48:14.000000000 -0400
23498 @@ -9,6 +9,7 @@
23499 #include <linux/types.h>
23500 #include <linux/errno.h>
23501 #include <linux/tty.h>
23502 +#include <linux/mutex.h>
23503 #include <linux/timer.h>
23504 #include <linux/kernel.h>
23505 #include <linux/wait.h>
23506 @@ -34,6 +35,7 @@ static int vfd_is_open;
23507 static unsigned char vfd[40];
23508 static int vfd_cursor;
23509 static unsigned char ledpb, led;
23510 +static DEFINE_MUTEX(vfd_mutex);
23511
23512 static void update_vfd(void)
23513 {
23514 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
23515 if (!vfd_is_open)
23516 return -EBUSY;
23517
23518 + mutex_lock(&vfd_mutex);
23519 for (;;) {
23520 char c;
23521 if (!indx)
23522 break;
23523 - if (get_user(c, buf))
23524 + if (get_user(c, buf)) {
23525 + mutex_unlock(&vfd_mutex);
23526 return -EFAULT;
23527 + }
23528 if (esc) {
23529 set_led(c);
23530 esc = 0;
23531 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
23532 buf++;
23533 }
23534 update_vfd();
23535 + mutex_unlock(&vfd_mutex);
23536
23537 return len;
23538 }
23539 diff -urNp linux-3.0.3/drivers/char/genrtc.c linux-3.0.3/drivers/char/genrtc.c
23540 --- linux-3.0.3/drivers/char/genrtc.c 2011-07-21 22:17:23.000000000 -0400
23541 +++ linux-3.0.3/drivers/char/genrtc.c 2011-08-23 21:48:14.000000000 -0400
23542 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
23543 switch (cmd) {
23544
23545 case RTC_PLL_GET:
23546 + memset(&pll, 0, sizeof(pll));
23547 if (get_rtc_pll(&pll))
23548 return -EINVAL;
23549 else
23550 diff -urNp linux-3.0.3/drivers/char/hpet.c linux-3.0.3/drivers/char/hpet.c
23551 --- linux-3.0.3/drivers/char/hpet.c 2011-07-21 22:17:23.000000000 -0400
23552 +++ linux-3.0.3/drivers/char/hpet.c 2011-08-23 21:47:55.000000000 -0400
23553 @@ -572,7 +572,7 @@ static inline unsigned long hpet_time_di
23554 }
23555
23556 static int
23557 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
23558 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
23559 struct hpet_info *info)
23560 {
23561 struct hpet_timer __iomem *timer;
23562 diff -urNp linux-3.0.3/drivers/char/ipmi/ipmi_msghandler.c linux-3.0.3/drivers/char/ipmi/ipmi_msghandler.c
23563 --- linux-3.0.3/drivers/char/ipmi/ipmi_msghandler.c 2011-07-21 22:17:23.000000000 -0400
23564 +++ linux-3.0.3/drivers/char/ipmi/ipmi_msghandler.c 2011-08-23 21:48:14.000000000 -0400
23565 @@ -415,7 +415,7 @@ struct ipmi_smi {
23566 struct proc_dir_entry *proc_dir;
23567 char proc_dir_name[10];
23568
23569 - atomic_t stats[IPMI_NUM_STATS];
23570 + atomic_unchecked_t stats[IPMI_NUM_STATS];
23571
23572 /*
23573 * run_to_completion duplicate of smb_info, smi_info
23574 @@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
23575
23576
23577 #define ipmi_inc_stat(intf, stat) \
23578 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
23579 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
23580 #define ipmi_get_stat(intf, stat) \
23581 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
23582 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
23583
23584 static int is_lan_addr(struct ipmi_addr *addr)
23585 {
23586 @@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
23587 INIT_LIST_HEAD(&intf->cmd_rcvrs);
23588 init_waitqueue_head(&intf->waitq);
23589 for (i = 0; i < IPMI_NUM_STATS; i++)
23590 - atomic_set(&intf->stats[i], 0);
23591 + atomic_set_unchecked(&intf->stats[i], 0);
23592
23593 intf->proc_dir = NULL;
23594
23595 @@ -4220,6 +4220,8 @@ static void send_panic_events(char *str)
23596 struct ipmi_smi_msg smi_msg;
23597 struct ipmi_recv_msg recv_msg;
23598
23599 + pax_track_stack();
23600 +
23601 si = (struct ipmi_system_interface_addr *) &addr;
23602 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
23603 si->channel = IPMI_BMC_CHANNEL;
23604 diff -urNp linux-3.0.3/drivers/char/ipmi/ipmi_si_intf.c linux-3.0.3/drivers/char/ipmi/ipmi_si_intf.c
23605 --- linux-3.0.3/drivers/char/ipmi/ipmi_si_intf.c 2011-07-21 22:17:23.000000000 -0400
23606 +++ linux-3.0.3/drivers/char/ipmi/ipmi_si_intf.c 2011-08-23 21:47:55.000000000 -0400
23607 @@ -277,7 +277,7 @@ struct smi_info {
23608 unsigned char slave_addr;
23609
23610 /* Counters and things for the proc filesystem. */
23611 - atomic_t stats[SI_NUM_STATS];
23612 + atomic_unchecked_t stats[SI_NUM_STATS];
23613
23614 struct task_struct *thread;
23615
23616 @@ -286,9 +286,9 @@ struct smi_info {
23617 };
23618
23619 #define smi_inc_stat(smi, stat) \
23620 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
23621 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
23622 #define smi_get_stat(smi, stat) \
23623 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
23624 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
23625
23626 #define SI_MAX_PARMS 4
23627
23628 @@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info
23629 atomic_set(&new_smi->req_events, 0);
23630 new_smi->run_to_completion = 0;
23631 for (i = 0; i < SI_NUM_STATS; i++)
23632 - atomic_set(&new_smi->stats[i], 0);
23633 + atomic_set_unchecked(&new_smi->stats[i], 0);
23634
23635 new_smi->interrupt_disabled = 1;
23636 atomic_set(&new_smi->stop_operation, 0);
23637 diff -urNp linux-3.0.3/drivers/char/Kconfig linux-3.0.3/drivers/char/Kconfig
23638 --- linux-3.0.3/drivers/char/Kconfig 2011-07-21 22:17:23.000000000 -0400
23639 +++ linux-3.0.3/drivers/char/Kconfig 2011-08-23 21:48:14.000000000 -0400
23640 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
23641
23642 config DEVKMEM
23643 bool "/dev/kmem virtual device support"
23644 - default y
23645 + default n
23646 + depends on !GRKERNSEC_KMEM
23647 help
23648 Say Y here if you want to support the /dev/kmem device. The
23649 /dev/kmem device is rarely used, but can be used for certain
23650 @@ -596,6 +597,7 @@ config DEVPORT
23651 bool
23652 depends on !M68K
23653 depends on ISA || PCI
23654 + depends on !GRKERNSEC_KMEM
23655 default y
23656
23657 source "drivers/s390/char/Kconfig"
23658 diff -urNp linux-3.0.3/drivers/char/mem.c linux-3.0.3/drivers/char/mem.c
23659 --- linux-3.0.3/drivers/char/mem.c 2011-07-21 22:17:23.000000000 -0400
23660 +++ linux-3.0.3/drivers/char/mem.c 2011-08-23 21:48:14.000000000 -0400
23661 @@ -18,6 +18,7 @@
23662 #include <linux/raw.h>
23663 #include <linux/tty.h>
23664 #include <linux/capability.h>
23665 +#include <linux/security.h>
23666 #include <linux/ptrace.h>
23667 #include <linux/device.h>
23668 #include <linux/highmem.h>
23669 @@ -34,6 +35,10 @@
23670 # include <linux/efi.h>
23671 #endif
23672
23673 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23674 +extern struct file_operations grsec_fops;
23675 +#endif
23676 +
23677 static inline unsigned long size_inside_page(unsigned long start,
23678 unsigned long size)
23679 {
23680 @@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
23681
23682 while (cursor < to) {
23683 if (!devmem_is_allowed(pfn)) {
23684 +#ifdef CONFIG_GRKERNSEC_KMEM
23685 + gr_handle_mem_readwrite(from, to);
23686 +#else
23687 printk(KERN_INFO
23688 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23689 current->comm, from, to);
23690 +#endif
23691 return 0;
23692 }
23693 cursor += PAGE_SIZE;
23694 @@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
23695 }
23696 return 1;
23697 }
23698 +#elif defined(CONFIG_GRKERNSEC_KMEM)
23699 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23700 +{
23701 + return 0;
23702 +}
23703 #else
23704 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23705 {
23706 @@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
23707
23708 while (count > 0) {
23709 unsigned long remaining;
23710 + char *temp;
23711
23712 sz = size_inside_page(p, count);
23713
23714 @@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
23715 if (!ptr)
23716 return -EFAULT;
23717
23718 - remaining = copy_to_user(buf, ptr, sz);
23719 +#ifdef CONFIG_PAX_USERCOPY
23720 + temp = kmalloc(sz, GFP_KERNEL);
23721 + if (!temp) {
23722 + unxlate_dev_mem_ptr(p, ptr);
23723 + return -ENOMEM;
23724 + }
23725 + memcpy(temp, ptr, sz);
23726 +#else
23727 + temp = ptr;
23728 +#endif
23729 +
23730 + remaining = copy_to_user(buf, temp, sz);
23731 +
23732 +#ifdef CONFIG_PAX_USERCOPY
23733 + kfree(temp);
23734 +#endif
23735 +
23736 unxlate_dev_mem_ptr(p, ptr);
23737 if (remaining)
23738 return -EFAULT;
23739 @@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
23740 size_t count, loff_t *ppos)
23741 {
23742 unsigned long p = *ppos;
23743 - ssize_t low_count, read, sz;
23744 + ssize_t low_count, read, sz, err = 0;
23745 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
23746 - int err = 0;
23747
23748 read = 0;
23749 if (p < (unsigned long) high_memory) {
23750 @@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
23751 }
23752 #endif
23753 while (low_count > 0) {
23754 + char *temp;
23755 +
23756 sz = size_inside_page(p, low_count);
23757
23758 /*
23759 @@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
23760 */
23761 kbuf = xlate_dev_kmem_ptr((char *)p);
23762
23763 - if (copy_to_user(buf, kbuf, sz))
23764 +#ifdef CONFIG_PAX_USERCOPY
23765 + temp = kmalloc(sz, GFP_KERNEL);
23766 + if (!temp)
23767 + return -ENOMEM;
23768 + memcpy(temp, kbuf, sz);
23769 +#else
23770 + temp = kbuf;
23771 +#endif
23772 +
23773 + err = copy_to_user(buf, temp, sz);
23774 +
23775 +#ifdef CONFIG_PAX_USERCOPY
23776 + kfree(temp);
23777 +#endif
23778 +
23779 + if (err)
23780 return -EFAULT;
23781 buf += sz;
23782 p += sz;
23783 @@ -866,6 +913,9 @@ static const struct memdev {
23784 #ifdef CONFIG_CRASH_DUMP
23785 [12] = { "oldmem", 0, &oldmem_fops, NULL },
23786 #endif
23787 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23788 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
23789 +#endif
23790 };
23791
23792 static int memory_open(struct inode *inode, struct file *filp)
23793 diff -urNp linux-3.0.3/drivers/char/nvram.c linux-3.0.3/drivers/char/nvram.c
23794 --- linux-3.0.3/drivers/char/nvram.c 2011-07-21 22:17:23.000000000 -0400
23795 +++ linux-3.0.3/drivers/char/nvram.c 2011-08-23 21:47:55.000000000 -0400
23796 @@ -246,7 +246,7 @@ static ssize_t nvram_read(struct file *f
23797
23798 spin_unlock_irq(&rtc_lock);
23799
23800 - if (copy_to_user(buf, contents, tmp - contents))
23801 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
23802 return -EFAULT;
23803
23804 *ppos = i;
23805 diff -urNp linux-3.0.3/drivers/char/random.c linux-3.0.3/drivers/char/random.c
23806 --- linux-3.0.3/drivers/char/random.c 2011-08-23 21:44:40.000000000 -0400
23807 +++ linux-3.0.3/drivers/char/random.c 2011-08-23 21:48:14.000000000 -0400
23808 @@ -261,8 +261,13 @@
23809 /*
23810 * Configuration information
23811 */
23812 +#ifdef CONFIG_GRKERNSEC_RANDNET
23813 +#define INPUT_POOL_WORDS 512
23814 +#define OUTPUT_POOL_WORDS 128
23815 +#else
23816 #define INPUT_POOL_WORDS 128
23817 #define OUTPUT_POOL_WORDS 32
23818 +#endif
23819 #define SEC_XFER_SIZE 512
23820 #define EXTRACT_SIZE 10
23821
23822 @@ -300,10 +305,17 @@ static struct poolinfo {
23823 int poolwords;
23824 int tap1, tap2, tap3, tap4, tap5;
23825 } poolinfo_table[] = {
23826 +#ifdef CONFIG_GRKERNSEC_RANDNET
23827 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
23828 + { 512, 411, 308, 208, 104, 1 },
23829 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
23830 + { 128, 103, 76, 51, 25, 1 },
23831 +#else
23832 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
23833 { 128, 103, 76, 51, 25, 1 },
23834 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
23835 { 32, 26, 20, 14, 7, 1 },
23836 +#endif
23837 #if 0
23838 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
23839 { 2048, 1638, 1231, 819, 411, 1 },
23840 @@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
23841
23842 extract_buf(r, tmp);
23843 i = min_t(int, nbytes, EXTRACT_SIZE);
23844 - if (copy_to_user(buf, tmp, i)) {
23845 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
23846 ret = -EFAULT;
23847 break;
23848 }
23849 @@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
23850 #include <linux/sysctl.h>
23851
23852 static int min_read_thresh = 8, min_write_thresh;
23853 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
23854 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
23855 static int max_write_thresh = INPUT_POOL_WORDS * 32;
23856 static char sysctl_bootid[16];
23857
23858 diff -urNp linux-3.0.3/drivers/char/sonypi.c linux-3.0.3/drivers/char/sonypi.c
23859 --- linux-3.0.3/drivers/char/sonypi.c 2011-07-21 22:17:23.000000000 -0400
23860 +++ linux-3.0.3/drivers/char/sonypi.c 2011-08-23 21:47:55.000000000 -0400
23861 @@ -55,6 +55,7 @@
23862 #include <asm/uaccess.h>
23863 #include <asm/io.h>
23864 #include <asm/system.h>
23865 +#include <asm/local.h>
23866
23867 #include <linux/sonypi.h>
23868
23869 @@ -491,7 +492,7 @@ static struct sonypi_device {
23870 spinlock_t fifo_lock;
23871 wait_queue_head_t fifo_proc_list;
23872 struct fasync_struct *fifo_async;
23873 - int open_count;
23874 + local_t open_count;
23875 int model;
23876 struct input_dev *input_jog_dev;
23877 struct input_dev *input_key_dev;
23878 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
23879 static int sonypi_misc_release(struct inode *inode, struct file *file)
23880 {
23881 mutex_lock(&sonypi_device.lock);
23882 - sonypi_device.open_count--;
23883 + local_dec(&sonypi_device.open_count);
23884 mutex_unlock(&sonypi_device.lock);
23885 return 0;
23886 }
23887 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
23888 {
23889 mutex_lock(&sonypi_device.lock);
23890 /* Flush input queue on first open */
23891 - if (!sonypi_device.open_count)
23892 + if (!local_read(&sonypi_device.open_count))
23893 kfifo_reset(&sonypi_device.fifo);
23894 - sonypi_device.open_count++;
23895 + local_inc(&sonypi_device.open_count);
23896 mutex_unlock(&sonypi_device.lock);
23897
23898 return 0;
23899 diff -urNp linux-3.0.3/drivers/char/tpm/tpm_bios.c linux-3.0.3/drivers/char/tpm/tpm_bios.c
23900 --- linux-3.0.3/drivers/char/tpm/tpm_bios.c 2011-07-21 22:17:23.000000000 -0400
23901 +++ linux-3.0.3/drivers/char/tpm/tpm_bios.c 2011-08-23 21:47:55.000000000 -0400
23902 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
23903 event = addr;
23904
23905 if ((event->event_type == 0 && event->event_size == 0) ||
23906 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
23907 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
23908 return NULL;
23909
23910 return addr;
23911 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
23912 return NULL;
23913
23914 if ((event->event_type == 0 && event->event_size == 0) ||
23915 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
23916 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
23917 return NULL;
23918
23919 (*pos)++;
23920 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
23921 int i;
23922
23923 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
23924 - seq_putc(m, data[i]);
23925 + if (!seq_putc(m, data[i]))
23926 + return -EFAULT;
23927
23928 return 0;
23929 }
23930 @@ -410,6 +411,11 @@ static int read_log(struct tpm_bios_log
23931 log->bios_event_log_end = log->bios_event_log + len;
23932
23933 virt = acpi_os_map_memory(start, len);
23934 + if (!virt) {
23935 + kfree(log->bios_event_log);
23936 + log->bios_event_log = NULL;
23937 + return -EFAULT;
23938 + }
23939
23940 memcpy(log->bios_event_log, virt, len);
23941
23942 diff -urNp linux-3.0.3/drivers/char/tpm/tpm.c linux-3.0.3/drivers/char/tpm/tpm.c
23943 --- linux-3.0.3/drivers/char/tpm/tpm.c 2011-07-21 22:17:23.000000000 -0400
23944 +++ linux-3.0.3/drivers/char/tpm/tpm.c 2011-08-23 21:48:14.000000000 -0400
23945 @@ -411,7 +411,7 @@ static ssize_t tpm_transmit(struct tpm_c
23946 chip->vendor.req_complete_val)
23947 goto out_recv;
23948
23949 - if ((status == chip->vendor.req_canceled)) {
23950 + if (status == chip->vendor.req_canceled) {
23951 dev_err(chip->dev, "Operation Canceled\n");
23952 rc = -ECANCELED;
23953 goto out;
23954 @@ -844,6 +844,8 @@ ssize_t tpm_show_pubek(struct device *de
23955
23956 struct tpm_chip *chip = dev_get_drvdata(dev);
23957
23958 + pax_track_stack();
23959 +
23960 tpm_cmd.header.in = tpm_readpubek_header;
23961 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
23962 "attempting to read the PUBEK");
23963 diff -urNp linux-3.0.3/drivers/crypto/hifn_795x.c linux-3.0.3/drivers/crypto/hifn_795x.c
23964 --- linux-3.0.3/drivers/crypto/hifn_795x.c 2011-07-21 22:17:23.000000000 -0400
23965 +++ linux-3.0.3/drivers/crypto/hifn_795x.c 2011-08-23 21:48:14.000000000 -0400
23966 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
23967 0xCA, 0x34, 0x2B, 0x2E};
23968 struct scatterlist sg;
23969
23970 + pax_track_stack();
23971 +
23972 memset(src, 0, sizeof(src));
23973 memset(ctx.key, 0, sizeof(ctx.key));
23974
23975 diff -urNp linux-3.0.3/drivers/crypto/padlock-aes.c linux-3.0.3/drivers/crypto/padlock-aes.c
23976 --- linux-3.0.3/drivers/crypto/padlock-aes.c 2011-07-21 22:17:23.000000000 -0400
23977 +++ linux-3.0.3/drivers/crypto/padlock-aes.c 2011-08-23 21:48:14.000000000 -0400
23978 @@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
23979 struct crypto_aes_ctx gen_aes;
23980 int cpu;
23981
23982 + pax_track_stack();
23983 +
23984 if (key_len % 8) {
23985 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
23986 return -EINVAL;
23987 diff -urNp linux-3.0.3/drivers/edac/edac_pci_sysfs.c linux-3.0.3/drivers/edac/edac_pci_sysfs.c
23988 --- linux-3.0.3/drivers/edac/edac_pci_sysfs.c 2011-07-21 22:17:23.000000000 -0400
23989 +++ linux-3.0.3/drivers/edac/edac_pci_sysfs.c 2011-08-23 21:47:55.000000000 -0400
23990 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
23991 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
23992 static int edac_pci_poll_msec = 1000; /* one second workq period */
23993
23994 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
23995 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
23996 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
23997 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
23998
23999 static struct kobject *edac_pci_top_main_kobj;
24000 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
24001 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
24002 edac_printk(KERN_CRIT, EDAC_PCI,
24003 "Signaled System Error on %s\n",
24004 pci_name(dev));
24005 - atomic_inc(&pci_nonparity_count);
24006 + atomic_inc_unchecked(&pci_nonparity_count);
24007 }
24008
24009 if (status & (PCI_STATUS_PARITY)) {
24010 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
24011 "Master Data Parity Error on %s\n",
24012 pci_name(dev));
24013
24014 - atomic_inc(&pci_parity_count);
24015 + atomic_inc_unchecked(&pci_parity_count);
24016 }
24017
24018 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24019 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
24020 "Detected Parity Error on %s\n",
24021 pci_name(dev));
24022
24023 - atomic_inc(&pci_parity_count);
24024 + atomic_inc_unchecked(&pci_parity_count);
24025 }
24026 }
24027
24028 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
24029 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
24030 "Signaled System Error on %s\n",
24031 pci_name(dev));
24032 - atomic_inc(&pci_nonparity_count);
24033 + atomic_inc_unchecked(&pci_nonparity_count);
24034 }
24035
24036 if (status & (PCI_STATUS_PARITY)) {
24037 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
24038 "Master Data Parity Error on "
24039 "%s\n", pci_name(dev));
24040
24041 - atomic_inc(&pci_parity_count);
24042 + atomic_inc_unchecked(&pci_parity_count);
24043 }
24044
24045 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24046 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
24047 "Detected Parity Error on %s\n",
24048 pci_name(dev));
24049
24050 - atomic_inc(&pci_parity_count);
24051 + atomic_inc_unchecked(&pci_parity_count);
24052 }
24053 }
24054 }
24055 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
24056 if (!check_pci_errors)
24057 return;
24058
24059 - before_count = atomic_read(&pci_parity_count);
24060 + before_count = atomic_read_unchecked(&pci_parity_count);
24061
24062 /* scan all PCI devices looking for a Parity Error on devices and
24063 * bridges.
24064 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
24065 /* Only if operator has selected panic on PCI Error */
24066 if (edac_pci_get_panic_on_pe()) {
24067 /* If the count is different 'after' from 'before' */
24068 - if (before_count != atomic_read(&pci_parity_count))
24069 + if (before_count != atomic_read_unchecked(&pci_parity_count))
24070 panic("EDAC: PCI Parity Error");
24071 }
24072 }
24073 diff -urNp linux-3.0.3/drivers/edac/i7core_edac.c linux-3.0.3/drivers/edac/i7core_edac.c
24074 --- linux-3.0.3/drivers/edac/i7core_edac.c 2011-07-21 22:17:23.000000000 -0400
24075 +++ linux-3.0.3/drivers/edac/i7core_edac.c 2011-08-23 21:47:55.000000000 -0400
24076 @@ -1670,7 +1670,7 @@ static void i7core_mce_output_error(stru
24077 char *type, *optype, *err, *msg;
24078 unsigned long error = m->status & 0x1ff0000l;
24079 u32 optypenum = (m->status >> 4) & 0x07;
24080 - u32 core_err_cnt = (m->status >> 38) && 0x7fff;
24081 + u32 core_err_cnt = (m->status >> 38) & 0x7fff;
24082 u32 dimm = (m->misc >> 16) & 0x3;
24083 u32 channel = (m->misc >> 18) & 0x3;
24084 u32 syndrome = m->misc >> 32;
24085 diff -urNp linux-3.0.3/drivers/edac/mce_amd.h linux-3.0.3/drivers/edac/mce_amd.h
24086 --- linux-3.0.3/drivers/edac/mce_amd.h 2011-07-21 22:17:23.000000000 -0400
24087 +++ linux-3.0.3/drivers/edac/mce_amd.h 2011-08-23 21:47:55.000000000 -0400
24088 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
24089 bool (*dc_mce)(u16, u8);
24090 bool (*ic_mce)(u16, u8);
24091 bool (*nb_mce)(u16, u8);
24092 -};
24093 +} __no_const;
24094
24095 void amd_report_gart_errors(bool);
24096 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
24097 diff -urNp linux-3.0.3/drivers/firewire/core-card.c linux-3.0.3/drivers/firewire/core-card.c
24098 --- linux-3.0.3/drivers/firewire/core-card.c 2011-07-21 22:17:23.000000000 -0400
24099 +++ linux-3.0.3/drivers/firewire/core-card.c 2011-08-23 21:47:55.000000000 -0400
24100 @@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
24101
24102 void fw_core_remove_card(struct fw_card *card)
24103 {
24104 - struct fw_card_driver dummy_driver = dummy_driver_template;
24105 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
24106
24107 card->driver->update_phy_reg(card, 4,
24108 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
24109 diff -urNp linux-3.0.3/drivers/firewire/core-cdev.c linux-3.0.3/drivers/firewire/core-cdev.c
24110 --- linux-3.0.3/drivers/firewire/core-cdev.c 2011-08-23 21:44:40.000000000 -0400
24111 +++ linux-3.0.3/drivers/firewire/core-cdev.c 2011-08-23 21:47:55.000000000 -0400
24112 @@ -1313,8 +1313,7 @@ static int init_iso_resource(struct clie
24113 int ret;
24114
24115 if ((request->channels == 0 && request->bandwidth == 0) ||
24116 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
24117 - request->bandwidth < 0)
24118 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
24119 return -EINVAL;
24120
24121 r = kmalloc(sizeof(*r), GFP_KERNEL);
24122 diff -urNp linux-3.0.3/drivers/firewire/core.h linux-3.0.3/drivers/firewire/core.h
24123 --- linux-3.0.3/drivers/firewire/core.h 2011-07-21 22:17:23.000000000 -0400
24124 +++ linux-3.0.3/drivers/firewire/core.h 2011-08-23 21:47:55.000000000 -0400
24125 @@ -101,6 +101,7 @@ struct fw_card_driver {
24126
24127 int (*stop_iso)(struct fw_iso_context *ctx);
24128 };
24129 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
24130
24131 void fw_card_initialize(struct fw_card *card,
24132 const struct fw_card_driver *driver, struct device *device);
24133 diff -urNp linux-3.0.3/drivers/firewire/core-transaction.c linux-3.0.3/drivers/firewire/core-transaction.c
24134 --- linux-3.0.3/drivers/firewire/core-transaction.c 2011-07-21 22:17:23.000000000 -0400
24135 +++ linux-3.0.3/drivers/firewire/core-transaction.c 2011-08-23 21:48:14.000000000 -0400
24136 @@ -37,6 +37,7 @@
24137 #include <linux/timer.h>
24138 #include <linux/types.h>
24139 #include <linux/workqueue.h>
24140 +#include <linux/sched.h>
24141
24142 #include <asm/byteorder.h>
24143
24144 @@ -422,6 +423,8 @@ int fw_run_transaction(struct fw_card *c
24145 struct transaction_callback_data d;
24146 struct fw_transaction t;
24147
24148 + pax_track_stack();
24149 +
24150 init_timer_on_stack(&t.split_timeout_timer);
24151 init_completion(&d.done);
24152 d.payload = payload;
24153 diff -urNp linux-3.0.3/drivers/firmware/dmi_scan.c linux-3.0.3/drivers/firmware/dmi_scan.c
24154 --- linux-3.0.3/drivers/firmware/dmi_scan.c 2011-07-21 22:17:23.000000000 -0400
24155 +++ linux-3.0.3/drivers/firmware/dmi_scan.c 2011-08-23 21:47:55.000000000 -0400
24156 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
24157 }
24158 }
24159 else {
24160 - /*
24161 - * no iounmap() for that ioremap(); it would be a no-op, but
24162 - * it's so early in setup that sucker gets confused into doing
24163 - * what it shouldn't if we actually call it.
24164 - */
24165 p = dmi_ioremap(0xF0000, 0x10000);
24166 if (p == NULL)
24167 goto error;
24168 diff -urNp linux-3.0.3/drivers/gpio/vr41xx_giu.c linux-3.0.3/drivers/gpio/vr41xx_giu.c
24169 --- linux-3.0.3/drivers/gpio/vr41xx_giu.c 2011-07-21 22:17:23.000000000 -0400
24170 +++ linux-3.0.3/drivers/gpio/vr41xx_giu.c 2011-08-23 21:47:55.000000000 -0400
24171 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
24172 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
24173 maskl, pendl, maskh, pendh);
24174
24175 - atomic_inc(&irq_err_count);
24176 + atomic_inc_unchecked(&irq_err_count);
24177
24178 return -EINVAL;
24179 }
24180 diff -urNp linux-3.0.3/drivers/gpu/drm/drm_crtc_helper.c linux-3.0.3/drivers/gpu/drm/drm_crtc_helper.c
24181 --- linux-3.0.3/drivers/gpu/drm/drm_crtc_helper.c 2011-07-21 22:17:23.000000000 -0400
24182 +++ linux-3.0.3/drivers/gpu/drm/drm_crtc_helper.c 2011-08-23 21:48:14.000000000 -0400
24183 @@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
24184 struct drm_crtc *tmp;
24185 int crtc_mask = 1;
24186
24187 - WARN(!crtc, "checking null crtc?\n");
24188 + BUG_ON(!crtc);
24189
24190 dev = crtc->dev;
24191
24192 @@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
24193 struct drm_encoder *encoder;
24194 bool ret = true;
24195
24196 + pax_track_stack();
24197 +
24198 crtc->enabled = drm_helper_crtc_in_use(crtc);
24199 if (!crtc->enabled)
24200 return true;
24201 diff -urNp linux-3.0.3/drivers/gpu/drm/drm_drv.c linux-3.0.3/drivers/gpu/drm/drm_drv.c
24202 --- linux-3.0.3/drivers/gpu/drm/drm_drv.c 2011-07-21 22:17:23.000000000 -0400
24203 +++ linux-3.0.3/drivers/gpu/drm/drm_drv.c 2011-08-23 21:47:55.000000000 -0400
24204 @@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
24205
24206 dev = file_priv->minor->dev;
24207 atomic_inc(&dev->ioctl_count);
24208 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
24209 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
24210 ++file_priv->ioctl_count;
24211
24212 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
24213 diff -urNp linux-3.0.3/drivers/gpu/drm/drm_fops.c linux-3.0.3/drivers/gpu/drm/drm_fops.c
24214 --- linux-3.0.3/drivers/gpu/drm/drm_fops.c 2011-07-21 22:17:23.000000000 -0400
24215 +++ linux-3.0.3/drivers/gpu/drm/drm_fops.c 2011-08-23 21:47:55.000000000 -0400
24216 @@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
24217 }
24218
24219 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
24220 - atomic_set(&dev->counts[i], 0);
24221 + atomic_set_unchecked(&dev->counts[i], 0);
24222
24223 dev->sigdata.lock = NULL;
24224
24225 @@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
24226
24227 retcode = drm_open_helper(inode, filp, dev);
24228 if (!retcode) {
24229 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
24230 - if (!dev->open_count++)
24231 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
24232 + if (local_inc_return(&dev->open_count) == 1)
24233 retcode = drm_setup(dev);
24234 }
24235 if (!retcode) {
24236 @@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
24237
24238 mutex_lock(&drm_global_mutex);
24239
24240 - DRM_DEBUG("open_count = %d\n", dev->open_count);
24241 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
24242
24243 if (dev->driver->preclose)
24244 dev->driver->preclose(dev, file_priv);
24245 @@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
24246 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
24247 task_pid_nr(current),
24248 (long)old_encode_dev(file_priv->minor->device),
24249 - dev->open_count);
24250 + local_read(&dev->open_count));
24251
24252 /* if the master has gone away we can't do anything with the lock */
24253 if (file_priv->minor->master)
24254 @@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
24255 * End inline drm_release
24256 */
24257
24258 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
24259 - if (!--dev->open_count) {
24260 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
24261 + if (local_dec_and_test(&dev->open_count)) {
24262 if (atomic_read(&dev->ioctl_count)) {
24263 DRM_ERROR("Device busy: %d\n",
24264 atomic_read(&dev->ioctl_count));
24265 diff -urNp linux-3.0.3/drivers/gpu/drm/drm_global.c linux-3.0.3/drivers/gpu/drm/drm_global.c
24266 --- linux-3.0.3/drivers/gpu/drm/drm_global.c 2011-07-21 22:17:23.000000000 -0400
24267 +++ linux-3.0.3/drivers/gpu/drm/drm_global.c 2011-08-23 21:47:55.000000000 -0400
24268 @@ -36,7 +36,7 @@
24269 struct drm_global_item {
24270 struct mutex mutex;
24271 void *object;
24272 - int refcount;
24273 + atomic_t refcount;
24274 };
24275
24276 static struct drm_global_item glob[DRM_GLOBAL_NUM];
24277 @@ -49,7 +49,7 @@ void drm_global_init(void)
24278 struct drm_global_item *item = &glob[i];
24279 mutex_init(&item->mutex);
24280 item->object = NULL;
24281 - item->refcount = 0;
24282 + atomic_set(&item->refcount, 0);
24283 }
24284 }
24285
24286 @@ -59,7 +59,7 @@ void drm_global_release(void)
24287 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
24288 struct drm_global_item *item = &glob[i];
24289 BUG_ON(item->object != NULL);
24290 - BUG_ON(item->refcount != 0);
24291 + BUG_ON(atomic_read(&item->refcount) != 0);
24292 }
24293 }
24294
24295 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
24296 void *object;
24297
24298 mutex_lock(&item->mutex);
24299 - if (item->refcount == 0) {
24300 + if (atomic_read(&item->refcount) == 0) {
24301 item->object = kzalloc(ref->size, GFP_KERNEL);
24302 if (unlikely(item->object == NULL)) {
24303 ret = -ENOMEM;
24304 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
24305 goto out_err;
24306
24307 }
24308 - ++item->refcount;
24309 + atomic_inc(&item->refcount);
24310 ref->object = item->object;
24311 object = item->object;
24312 mutex_unlock(&item->mutex);
24313 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
24314 struct drm_global_item *item = &glob[ref->global_type];
24315
24316 mutex_lock(&item->mutex);
24317 - BUG_ON(item->refcount == 0);
24318 + BUG_ON(atomic_read(&item->refcount) == 0);
24319 BUG_ON(ref->object != item->object);
24320 - if (--item->refcount == 0) {
24321 + if (atomic_dec_and_test(&item->refcount)) {
24322 ref->release(ref);
24323 item->object = NULL;
24324 }
24325 diff -urNp linux-3.0.3/drivers/gpu/drm/drm_info.c linux-3.0.3/drivers/gpu/drm/drm_info.c
24326 --- linux-3.0.3/drivers/gpu/drm/drm_info.c 2011-07-21 22:17:23.000000000 -0400
24327 +++ linux-3.0.3/drivers/gpu/drm/drm_info.c 2011-08-23 21:48:14.000000000 -0400
24328 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
24329 struct drm_local_map *map;
24330 struct drm_map_list *r_list;
24331
24332 - /* Hardcoded from _DRM_FRAME_BUFFER,
24333 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
24334 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
24335 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
24336 + static const char * const types[] = {
24337 + [_DRM_FRAME_BUFFER] = "FB",
24338 + [_DRM_REGISTERS] = "REG",
24339 + [_DRM_SHM] = "SHM",
24340 + [_DRM_AGP] = "AGP",
24341 + [_DRM_SCATTER_GATHER] = "SG",
24342 + [_DRM_CONSISTENT] = "PCI",
24343 + [_DRM_GEM] = "GEM" };
24344 const char *type;
24345 int i;
24346
24347 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
24348 map = r_list->map;
24349 if (!map)
24350 continue;
24351 - if (map->type < 0 || map->type > 5)
24352 + if (map->type >= ARRAY_SIZE(types))
24353 type = "??";
24354 else
24355 type = types[map->type];
24356 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
24357 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
24358 vma->vm_flags & VM_LOCKED ? 'l' : '-',
24359 vma->vm_flags & VM_IO ? 'i' : '-',
24360 +#ifdef CONFIG_GRKERNSEC_HIDESYM
24361 + 0);
24362 +#else
24363 vma->vm_pgoff);
24364 +#endif
24365
24366 #if defined(__i386__)
24367 pgprot = pgprot_val(vma->vm_page_prot);
24368 diff -urNp linux-3.0.3/drivers/gpu/drm/drm_ioctl.c linux-3.0.3/drivers/gpu/drm/drm_ioctl.c
24369 --- linux-3.0.3/drivers/gpu/drm/drm_ioctl.c 2011-07-21 22:17:23.000000000 -0400
24370 +++ linux-3.0.3/drivers/gpu/drm/drm_ioctl.c 2011-08-23 21:47:55.000000000 -0400
24371 @@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
24372 stats->data[i].value =
24373 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
24374 else
24375 - stats->data[i].value = atomic_read(&dev->counts[i]);
24376 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
24377 stats->data[i].type = dev->types[i];
24378 }
24379
24380 diff -urNp linux-3.0.3/drivers/gpu/drm/drm_lock.c linux-3.0.3/drivers/gpu/drm/drm_lock.c
24381 --- linux-3.0.3/drivers/gpu/drm/drm_lock.c 2011-07-21 22:17:23.000000000 -0400
24382 +++ linux-3.0.3/drivers/gpu/drm/drm_lock.c 2011-08-23 21:47:55.000000000 -0400
24383 @@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
24384 if (drm_lock_take(&master->lock, lock->context)) {
24385 master->lock.file_priv = file_priv;
24386 master->lock.lock_time = jiffies;
24387 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
24388 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
24389 break; /* Got lock */
24390 }
24391
24392 @@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
24393 return -EINVAL;
24394 }
24395
24396 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
24397 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
24398
24399 if (drm_lock_free(&master->lock, lock->context)) {
24400 /* FIXME: Should really bail out here. */
24401 diff -urNp linux-3.0.3/drivers/gpu/drm/i810/i810_dma.c linux-3.0.3/drivers/gpu/drm/i810/i810_dma.c
24402 --- linux-3.0.3/drivers/gpu/drm/i810/i810_dma.c 2011-07-21 22:17:23.000000000 -0400
24403 +++ linux-3.0.3/drivers/gpu/drm/i810/i810_dma.c 2011-08-23 21:47:55.000000000 -0400
24404 @@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
24405 dma->buflist[vertex->idx],
24406 vertex->discard, vertex->used);
24407
24408 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24409 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24410 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24411 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24412 sarea_priv->last_enqueue = dev_priv->counter - 1;
24413 sarea_priv->last_dispatch = (int)hw_status[5];
24414
24415 @@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
24416 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
24417 mc->last_render);
24418
24419 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24420 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24421 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24422 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24423 sarea_priv->last_enqueue = dev_priv->counter - 1;
24424 sarea_priv->last_dispatch = (int)hw_status[5];
24425
24426 diff -urNp linux-3.0.3/drivers/gpu/drm/i810/i810_drv.h linux-3.0.3/drivers/gpu/drm/i810/i810_drv.h
24427 --- linux-3.0.3/drivers/gpu/drm/i810/i810_drv.h 2011-07-21 22:17:23.000000000 -0400
24428 +++ linux-3.0.3/drivers/gpu/drm/i810/i810_drv.h 2011-08-23 21:47:55.000000000 -0400
24429 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
24430 int page_flipping;
24431
24432 wait_queue_head_t irq_queue;
24433 - atomic_t irq_received;
24434 - atomic_t irq_emitted;
24435 + atomic_unchecked_t irq_received;
24436 + atomic_unchecked_t irq_emitted;
24437
24438 int front_offset;
24439 } drm_i810_private_t;
24440 diff -urNp linux-3.0.3/drivers/gpu/drm/i915/i915_debugfs.c linux-3.0.3/drivers/gpu/drm/i915/i915_debugfs.c
24441 --- linux-3.0.3/drivers/gpu/drm/i915/i915_debugfs.c 2011-07-21 22:17:23.000000000 -0400
24442 +++ linux-3.0.3/drivers/gpu/drm/i915/i915_debugfs.c 2011-08-23 21:47:55.000000000 -0400
24443 @@ -497,7 +497,7 @@ static int i915_interrupt_info(struct se
24444 I915_READ(GTIMR));
24445 }
24446 seq_printf(m, "Interrupts received: %d\n",
24447 - atomic_read(&dev_priv->irq_received));
24448 + atomic_read_unchecked(&dev_priv->irq_received));
24449 for (i = 0; i < I915_NUM_RINGS; i++) {
24450 if (IS_GEN6(dev)) {
24451 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
24452 diff -urNp linux-3.0.3/drivers/gpu/drm/i915/i915_dma.c linux-3.0.3/drivers/gpu/drm/i915/i915_dma.c
24453 --- linux-3.0.3/drivers/gpu/drm/i915/i915_dma.c 2011-08-23 21:44:40.000000000 -0400
24454 +++ linux-3.0.3/drivers/gpu/drm/i915/i915_dma.c 2011-08-23 21:47:55.000000000 -0400
24455 @@ -1169,7 +1169,7 @@ static bool i915_switcheroo_can_switch(s
24456 bool can_switch;
24457
24458 spin_lock(&dev->count_lock);
24459 - can_switch = (dev->open_count == 0);
24460 + can_switch = (local_read(&dev->open_count) == 0);
24461 spin_unlock(&dev->count_lock);
24462 return can_switch;
24463 }
24464 diff -urNp linux-3.0.3/drivers/gpu/drm/i915/i915_drv.h linux-3.0.3/drivers/gpu/drm/i915/i915_drv.h
24465 --- linux-3.0.3/drivers/gpu/drm/i915/i915_drv.h 2011-07-21 22:17:23.000000000 -0400
24466 +++ linux-3.0.3/drivers/gpu/drm/i915/i915_drv.h 2011-08-23 21:47:55.000000000 -0400
24467 @@ -219,7 +219,7 @@ struct drm_i915_display_funcs {
24468 /* render clock increase/decrease */
24469 /* display clock increase/decrease */
24470 /* pll clock increase/decrease */
24471 -};
24472 +} __no_const;
24473
24474 struct intel_device_info {
24475 u8 gen;
24476 @@ -300,7 +300,7 @@ typedef struct drm_i915_private {
24477 int current_page;
24478 int page_flipping;
24479
24480 - atomic_t irq_received;
24481 + atomic_unchecked_t irq_received;
24482
24483 /* protects the irq masks */
24484 spinlock_t irq_lock;
24485 @@ -874,7 +874,7 @@ struct drm_i915_gem_object {
24486 * will be page flipped away on the next vblank. When it
24487 * reaches 0, dev_priv->pending_flip_queue will be woken up.
24488 */
24489 - atomic_t pending_flip;
24490 + atomic_unchecked_t pending_flip;
24491 };
24492
24493 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
24494 @@ -1247,7 +1247,7 @@ extern int intel_setup_gmbus(struct drm_
24495 extern void intel_teardown_gmbus(struct drm_device *dev);
24496 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
24497 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
24498 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24499 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24500 {
24501 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
24502 }
24503 diff -urNp linux-3.0.3/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.0.3/drivers/gpu/drm/i915/i915_gem_execbuffer.c
24504 --- linux-3.0.3/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-07-21 22:17:23.000000000 -0400
24505 +++ linux-3.0.3/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-08-23 21:47:55.000000000 -0400
24506 @@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct
24507 i915_gem_clflush_object(obj);
24508
24509 if (obj->base.pending_write_domain)
24510 - cd->flips |= atomic_read(&obj->pending_flip);
24511 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
24512
24513 /* The actual obj->write_domain will be updated with
24514 * pending_write_domain after we emit the accumulated flush for all
24515 diff -urNp linux-3.0.3/drivers/gpu/drm/i915/i915_irq.c linux-3.0.3/drivers/gpu/drm/i915/i915_irq.c
24516 --- linux-3.0.3/drivers/gpu/drm/i915/i915_irq.c 2011-08-23 21:44:40.000000000 -0400
24517 +++ linux-3.0.3/drivers/gpu/drm/i915/i915_irq.c 2011-08-23 21:47:55.000000000 -0400
24518 @@ -473,7 +473,7 @@ static irqreturn_t ivybridge_irq_handler
24519 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
24520 struct drm_i915_master_private *master_priv;
24521
24522 - atomic_inc(&dev_priv->irq_received);
24523 + atomic_inc_unchecked(&dev_priv->irq_received);
24524
24525 /* disable master interrupt before clearing iir */
24526 de_ier = I915_READ(DEIER);
24527 @@ -563,7 +563,7 @@ static irqreturn_t ironlake_irq_handler(
24528 struct drm_i915_master_private *master_priv;
24529 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
24530
24531 - atomic_inc(&dev_priv->irq_received);
24532 + atomic_inc_unchecked(&dev_priv->irq_received);
24533
24534 if (IS_GEN6(dev))
24535 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
24536 @@ -1226,7 +1226,7 @@ static irqreturn_t i915_driver_irq_handl
24537 int ret = IRQ_NONE, pipe;
24538 bool blc_event = false;
24539
24540 - atomic_inc(&dev_priv->irq_received);
24541 + atomic_inc_unchecked(&dev_priv->irq_received);
24542
24543 iir = I915_READ(IIR);
24544
24545 @@ -1735,7 +1735,7 @@ static void ironlake_irq_preinstall(stru
24546 {
24547 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
24548
24549 - atomic_set(&dev_priv->irq_received, 0);
24550 + atomic_set_unchecked(&dev_priv->irq_received, 0);
24551
24552 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
24553 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
24554 @@ -1899,7 +1899,7 @@ static void i915_driver_irq_preinstall(s
24555 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
24556 int pipe;
24557
24558 - atomic_set(&dev_priv->irq_received, 0);
24559 + atomic_set_unchecked(&dev_priv->irq_received, 0);
24560
24561 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
24562 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
24563 diff -urNp linux-3.0.3/drivers/gpu/drm/i915/intel_display.c linux-3.0.3/drivers/gpu/drm/i915/intel_display.c
24564 --- linux-3.0.3/drivers/gpu/drm/i915/intel_display.c 2011-08-23 21:44:40.000000000 -0400
24565 +++ linux-3.0.3/drivers/gpu/drm/i915/intel_display.c 2011-08-23 21:47:55.000000000 -0400
24566 @@ -1961,7 +1961,7 @@ intel_pipe_set_base(struct drm_crtc *crt
24567
24568 wait_event(dev_priv->pending_flip_queue,
24569 atomic_read(&dev_priv->mm.wedged) ||
24570 - atomic_read(&obj->pending_flip) == 0);
24571 + atomic_read_unchecked(&obj->pending_flip) == 0);
24572
24573 /* Big Hammer, we also need to ensure that any pending
24574 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
24575 @@ -2548,7 +2548,7 @@ static void intel_crtc_wait_for_pending_
24576 obj = to_intel_framebuffer(crtc->fb)->obj;
24577 dev_priv = crtc->dev->dev_private;
24578 wait_event(dev_priv->pending_flip_queue,
24579 - atomic_read(&obj->pending_flip) == 0);
24580 + atomic_read_unchecked(&obj->pending_flip) == 0);
24581 }
24582
24583 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
24584 @@ -6225,7 +6225,7 @@ static void do_intel_finish_page_flip(st
24585
24586 atomic_clear_mask(1 << intel_crtc->plane,
24587 &obj->pending_flip.counter);
24588 - if (atomic_read(&obj->pending_flip) == 0)
24589 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
24590 wake_up(&dev_priv->pending_flip_queue);
24591
24592 schedule_work(&work->work);
24593 @@ -6514,7 +6514,7 @@ static int intel_crtc_page_flip(struct d
24594 /* Block clients from rendering to the new back buffer until
24595 * the flip occurs and the object is no longer visible.
24596 */
24597 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24598 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24599
24600 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
24601 if (ret)
24602 @@ -6527,7 +6527,7 @@ static int intel_crtc_page_flip(struct d
24603 return 0;
24604
24605 cleanup_pending:
24606 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24607 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24608 cleanup_objs:
24609 drm_gem_object_unreference(&work->old_fb_obj->base);
24610 drm_gem_object_unreference(&obj->base);
24611 diff -urNp linux-3.0.3/drivers/gpu/drm/mga/mga_drv.h linux-3.0.3/drivers/gpu/drm/mga/mga_drv.h
24612 --- linux-3.0.3/drivers/gpu/drm/mga/mga_drv.h 2011-07-21 22:17:23.000000000 -0400
24613 +++ linux-3.0.3/drivers/gpu/drm/mga/mga_drv.h 2011-08-23 21:47:55.000000000 -0400
24614 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
24615 u32 clear_cmd;
24616 u32 maccess;
24617
24618 - atomic_t vbl_received; /**< Number of vblanks received. */
24619 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
24620 wait_queue_head_t fence_queue;
24621 - atomic_t last_fence_retired;
24622 + atomic_unchecked_t last_fence_retired;
24623 u32 next_fence_to_post;
24624
24625 unsigned int fb_cpp;
24626 diff -urNp linux-3.0.3/drivers/gpu/drm/mga/mga_irq.c linux-3.0.3/drivers/gpu/drm/mga/mga_irq.c
24627 --- linux-3.0.3/drivers/gpu/drm/mga/mga_irq.c 2011-07-21 22:17:23.000000000 -0400
24628 +++ linux-3.0.3/drivers/gpu/drm/mga/mga_irq.c 2011-08-23 21:47:55.000000000 -0400
24629 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
24630 if (crtc != 0)
24631 return 0;
24632
24633 - return atomic_read(&dev_priv->vbl_received);
24634 + return atomic_read_unchecked(&dev_priv->vbl_received);
24635 }
24636
24637
24638 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24639 /* VBLANK interrupt */
24640 if (status & MGA_VLINEPEN) {
24641 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
24642 - atomic_inc(&dev_priv->vbl_received);
24643 + atomic_inc_unchecked(&dev_priv->vbl_received);
24644 drm_handle_vblank(dev, 0);
24645 handled = 1;
24646 }
24647 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24648 if ((prim_start & ~0x03) != (prim_end & ~0x03))
24649 MGA_WRITE(MGA_PRIMEND, prim_end);
24650
24651 - atomic_inc(&dev_priv->last_fence_retired);
24652 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
24653 DRM_WAKEUP(&dev_priv->fence_queue);
24654 handled = 1;
24655 }
24656 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
24657 * using fences.
24658 */
24659 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
24660 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
24661 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
24662 - *sequence) <= (1 << 23)));
24663
24664 *sequence = cur_fence;
24665 diff -urNp linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_bios.c linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_bios.c
24666 --- linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-07-21 22:17:23.000000000 -0400
24667 +++ linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-08-23 21:47:55.000000000 -0400
24668 @@ -5488,7 +5488,7 @@ parse_bit_displayport_tbl_entry(struct d
24669 struct bit_table {
24670 const char id;
24671 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
24672 -};
24673 +} __no_const;
24674
24675 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
24676
24677 diff -urNp linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_drv.h linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_drv.h
24678 --- linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-07-21 22:17:23.000000000 -0400
24679 +++ linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-08-23 21:47:55.000000000 -0400
24680 @@ -227,7 +227,7 @@ struct nouveau_channel {
24681 struct list_head pending;
24682 uint32_t sequence;
24683 uint32_t sequence_ack;
24684 - atomic_t last_sequence_irq;
24685 + atomic_unchecked_t last_sequence_irq;
24686 } fence;
24687
24688 /* DMA push buffer */
24689 @@ -304,7 +304,7 @@ struct nouveau_exec_engine {
24690 u32 handle, u16 class);
24691 void (*set_tile_region)(struct drm_device *dev, int i);
24692 void (*tlb_flush)(struct drm_device *, int engine);
24693 -};
24694 +} __no_const;
24695
24696 struct nouveau_instmem_engine {
24697 void *priv;
24698 @@ -325,13 +325,13 @@ struct nouveau_instmem_engine {
24699 struct nouveau_mc_engine {
24700 int (*init)(struct drm_device *dev);
24701 void (*takedown)(struct drm_device *dev);
24702 -};
24703 +} __no_const;
24704
24705 struct nouveau_timer_engine {
24706 int (*init)(struct drm_device *dev);
24707 void (*takedown)(struct drm_device *dev);
24708 uint64_t (*read)(struct drm_device *dev);
24709 -};
24710 +} __no_const;
24711
24712 struct nouveau_fb_engine {
24713 int num_tiles;
24714 @@ -494,7 +494,7 @@ struct nouveau_vram_engine {
24715 void (*put)(struct drm_device *, struct nouveau_mem **);
24716
24717 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
24718 -};
24719 +} __no_const;
24720
24721 struct nouveau_engine {
24722 struct nouveau_instmem_engine instmem;
24723 @@ -640,7 +640,7 @@ struct drm_nouveau_private {
24724 struct drm_global_reference mem_global_ref;
24725 struct ttm_bo_global_ref bo_global_ref;
24726 struct ttm_bo_device bdev;
24727 - atomic_t validate_sequence;
24728 + atomic_unchecked_t validate_sequence;
24729 } ttm;
24730
24731 struct {
24732 diff -urNp linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_fence.c linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_fence.c
24733 --- linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-07-21 22:17:23.000000000 -0400
24734 +++ linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-08-23 21:47:55.000000000 -0400
24735 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
24736 if (USE_REFCNT(dev))
24737 sequence = nvchan_rd32(chan, 0x48);
24738 else
24739 - sequence = atomic_read(&chan->fence.last_sequence_irq);
24740 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
24741
24742 if (chan->fence.sequence_ack == sequence)
24743 goto out;
24744 @@ -544,7 +544,7 @@ nouveau_fence_channel_init(struct nouvea
24745
24746 INIT_LIST_HEAD(&chan->fence.pending);
24747 spin_lock_init(&chan->fence.lock);
24748 - atomic_set(&chan->fence.last_sequence_irq, 0);
24749 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
24750 return 0;
24751 }
24752
24753 diff -urNp linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_gem.c linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_gem.c
24754 --- linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-07-21 22:17:23.000000000 -0400
24755 +++ linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-08-23 21:47:55.000000000 -0400
24756 @@ -249,7 +249,7 @@ validate_init(struct nouveau_channel *ch
24757 int trycnt = 0;
24758 int ret, i;
24759
24760 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
24761 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
24762 retry:
24763 if (++trycnt > 100000) {
24764 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
24765 diff -urNp linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_state.c linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_state.c
24766 --- linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_state.c 2011-07-21 22:17:23.000000000 -0400
24767 +++ linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_state.c 2011-08-23 21:47:55.000000000 -0400
24768 @@ -488,7 +488,7 @@ static bool nouveau_switcheroo_can_switc
24769 bool can_switch;
24770
24771 spin_lock(&dev->count_lock);
24772 - can_switch = (dev->open_count == 0);
24773 + can_switch = (local_read(&dev->open_count) == 0);
24774 spin_unlock(&dev->count_lock);
24775 return can_switch;
24776 }
24777 diff -urNp linux-3.0.3/drivers/gpu/drm/nouveau/nv04_graph.c linux-3.0.3/drivers/gpu/drm/nouveau/nv04_graph.c
24778 --- linux-3.0.3/drivers/gpu/drm/nouveau/nv04_graph.c 2011-07-21 22:17:23.000000000 -0400
24779 +++ linux-3.0.3/drivers/gpu/drm/nouveau/nv04_graph.c 2011-08-23 21:47:55.000000000 -0400
24780 @@ -560,7 +560,7 @@ static int
24781 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
24782 u32 class, u32 mthd, u32 data)
24783 {
24784 - atomic_set(&chan->fence.last_sequence_irq, data);
24785 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
24786 return 0;
24787 }
24788
24789 diff -urNp linux-3.0.3/drivers/gpu/drm/r128/r128_cce.c linux-3.0.3/drivers/gpu/drm/r128/r128_cce.c
24790 --- linux-3.0.3/drivers/gpu/drm/r128/r128_cce.c 2011-07-21 22:17:23.000000000 -0400
24791 +++ linux-3.0.3/drivers/gpu/drm/r128/r128_cce.c 2011-08-23 21:47:55.000000000 -0400
24792 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
24793
24794 /* GH: Simple idle check.
24795 */
24796 - atomic_set(&dev_priv->idle_count, 0);
24797 + atomic_set_unchecked(&dev_priv->idle_count, 0);
24798
24799 /* We don't support anything other than bus-mastering ring mode,
24800 * but the ring can be in either AGP or PCI space for the ring
24801 diff -urNp linux-3.0.3/drivers/gpu/drm/r128/r128_drv.h linux-3.0.3/drivers/gpu/drm/r128/r128_drv.h
24802 --- linux-3.0.3/drivers/gpu/drm/r128/r128_drv.h 2011-07-21 22:17:23.000000000 -0400
24803 +++ linux-3.0.3/drivers/gpu/drm/r128/r128_drv.h 2011-08-23 21:47:55.000000000 -0400
24804 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
24805 int is_pci;
24806 unsigned long cce_buffers_offset;
24807
24808 - atomic_t idle_count;
24809 + atomic_unchecked_t idle_count;
24810
24811 int page_flipping;
24812 int current_page;
24813 u32 crtc_offset;
24814 u32 crtc_offset_cntl;
24815
24816 - atomic_t vbl_received;
24817 + atomic_unchecked_t vbl_received;
24818
24819 u32 color_fmt;
24820 unsigned int front_offset;
24821 diff -urNp linux-3.0.3/drivers/gpu/drm/r128/r128_irq.c linux-3.0.3/drivers/gpu/drm/r128/r128_irq.c
24822 --- linux-3.0.3/drivers/gpu/drm/r128/r128_irq.c 2011-07-21 22:17:23.000000000 -0400
24823 +++ linux-3.0.3/drivers/gpu/drm/r128/r128_irq.c 2011-08-23 21:47:55.000000000 -0400
24824 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
24825 if (crtc != 0)
24826 return 0;
24827
24828 - return atomic_read(&dev_priv->vbl_received);
24829 + return atomic_read_unchecked(&dev_priv->vbl_received);
24830 }
24831
24832 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
24833 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
24834 /* VBLANK interrupt */
24835 if (status & R128_CRTC_VBLANK_INT) {
24836 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
24837 - atomic_inc(&dev_priv->vbl_received);
24838 + atomic_inc_unchecked(&dev_priv->vbl_received);
24839 drm_handle_vblank(dev, 0);
24840 return IRQ_HANDLED;
24841 }
24842 diff -urNp linux-3.0.3/drivers/gpu/drm/r128/r128_state.c linux-3.0.3/drivers/gpu/drm/r128/r128_state.c
24843 --- linux-3.0.3/drivers/gpu/drm/r128/r128_state.c 2011-07-21 22:17:23.000000000 -0400
24844 +++ linux-3.0.3/drivers/gpu/drm/r128/r128_state.c 2011-08-23 21:47:55.000000000 -0400
24845 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
24846
24847 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
24848 {
24849 - if (atomic_read(&dev_priv->idle_count) == 0)
24850 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
24851 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
24852 else
24853 - atomic_set(&dev_priv->idle_count, 0);
24854 + atomic_set_unchecked(&dev_priv->idle_count, 0);
24855 }
24856
24857 #endif
24858 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/atom.c linux-3.0.3/drivers/gpu/drm/radeon/atom.c
24859 --- linux-3.0.3/drivers/gpu/drm/radeon/atom.c 2011-07-21 22:17:23.000000000 -0400
24860 +++ linux-3.0.3/drivers/gpu/drm/radeon/atom.c 2011-08-23 21:48:14.000000000 -0400
24861 @@ -1245,6 +1245,8 @@ struct atom_context *atom_parse(struct c
24862 char name[512];
24863 int i;
24864
24865 + pax_track_stack();
24866 +
24867 ctx->card = card;
24868 ctx->bios = bios;
24869
24870 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/mkregtable.c linux-3.0.3/drivers/gpu/drm/radeon/mkregtable.c
24871 --- linux-3.0.3/drivers/gpu/drm/radeon/mkregtable.c 2011-07-21 22:17:23.000000000 -0400
24872 +++ linux-3.0.3/drivers/gpu/drm/radeon/mkregtable.c 2011-08-23 21:47:55.000000000 -0400
24873 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
24874 regex_t mask_rex;
24875 regmatch_t match[4];
24876 char buf[1024];
24877 - size_t end;
24878 + long end;
24879 int len;
24880 int done = 0;
24881 int r;
24882 unsigned o;
24883 struct offset *offset;
24884 char last_reg_s[10];
24885 - int last_reg;
24886 + unsigned long last_reg;
24887
24888 if (regcomp
24889 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
24890 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_atombios.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_atombios.c
24891 --- linux-3.0.3/drivers/gpu/drm/radeon/radeon_atombios.c 2011-07-21 22:17:23.000000000 -0400
24892 +++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_atombios.c 2011-08-23 21:48:14.000000000 -0400
24893 @@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
24894 struct radeon_gpio_rec gpio;
24895 struct radeon_hpd hpd;
24896
24897 + pax_track_stack();
24898 +
24899 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
24900 return false;
24901
24902 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_device.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_device.c
24903 --- linux-3.0.3/drivers/gpu/drm/radeon/radeon_device.c 2011-08-23 21:44:40.000000000 -0400
24904 +++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_device.c 2011-08-23 21:47:55.000000000 -0400
24905 @@ -678,7 +678,7 @@ static bool radeon_switcheroo_can_switch
24906 bool can_switch;
24907
24908 spin_lock(&dev->count_lock);
24909 - can_switch = (dev->open_count == 0);
24910 + can_switch = (local_read(&dev->open_count) == 0);
24911 spin_unlock(&dev->count_lock);
24912 return can_switch;
24913 }
24914 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_display.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_display.c
24915 --- linux-3.0.3/drivers/gpu/drm/radeon/radeon_display.c 2011-08-23 21:44:40.000000000 -0400
24916 +++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_display.c 2011-08-23 21:48:14.000000000 -0400
24917 @@ -946,6 +946,8 @@ void radeon_compute_pll_legacy(struct ra
24918 uint32_t post_div;
24919 u32 pll_out_min, pll_out_max;
24920
24921 + pax_track_stack();
24922 +
24923 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
24924 freq = freq * 1000;
24925
24926 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_drv.h linux-3.0.3/drivers/gpu/drm/radeon/radeon_drv.h
24927 --- linux-3.0.3/drivers/gpu/drm/radeon/radeon_drv.h 2011-07-21 22:17:23.000000000 -0400
24928 +++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_drv.h 2011-08-23 21:47:55.000000000 -0400
24929 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
24930
24931 /* SW interrupt */
24932 wait_queue_head_t swi_queue;
24933 - atomic_t swi_emitted;
24934 + atomic_unchecked_t swi_emitted;
24935 int vblank_crtc;
24936 uint32_t irq_enable_reg;
24937 uint32_t r500_disp_irq_reg;
24938 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_fence.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_fence.c
24939 --- linux-3.0.3/drivers/gpu/drm/radeon/radeon_fence.c 2011-07-21 22:17:23.000000000 -0400
24940 +++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_fence.c 2011-08-23 21:47:55.000000000 -0400
24941 @@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_devi
24942 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
24943 return 0;
24944 }
24945 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
24946 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
24947 if (!rdev->cp.ready)
24948 /* FIXME: cp is not running assume everythings is done right
24949 * away
24950 @@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct rade
24951 return r;
24952 }
24953 radeon_fence_write(rdev, 0);
24954 - atomic_set(&rdev->fence_drv.seq, 0);
24955 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
24956 INIT_LIST_HEAD(&rdev->fence_drv.created);
24957 INIT_LIST_HEAD(&rdev->fence_drv.emited);
24958 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
24959 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon.h linux-3.0.3/drivers/gpu/drm/radeon/radeon.h
24960 --- linux-3.0.3/drivers/gpu/drm/radeon/radeon.h 2011-07-21 22:17:23.000000000 -0400
24961 +++ linux-3.0.3/drivers/gpu/drm/radeon/radeon.h 2011-08-23 21:47:55.000000000 -0400
24962 @@ -191,7 +191,7 @@ extern int sumo_get_temp(struct radeon_d
24963 */
24964 struct radeon_fence_driver {
24965 uint32_t scratch_reg;
24966 - atomic_t seq;
24967 + atomic_unchecked_t seq;
24968 uint32_t last_seq;
24969 unsigned long last_jiffies;
24970 unsigned long last_timeout;
24971 @@ -960,7 +960,7 @@ struct radeon_asic {
24972 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
24973 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
24974 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
24975 -};
24976 +} __no_const;
24977
24978 /*
24979 * Asic structures
24980 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_ioc32.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_ioc32.c
24981 --- linux-3.0.3/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-07-21 22:17:23.000000000 -0400
24982 +++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-08-23 21:47:55.000000000 -0400
24983 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
24984 request = compat_alloc_user_space(sizeof(*request));
24985 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
24986 || __put_user(req32.param, &request->param)
24987 - || __put_user((void __user *)(unsigned long)req32.value,
24988 + || __put_user((unsigned long)req32.value,
24989 &request->value))
24990 return -EFAULT;
24991
24992 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_irq.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_irq.c
24993 --- linux-3.0.3/drivers/gpu/drm/radeon/radeon_irq.c 2011-07-21 22:17:23.000000000 -0400
24994 +++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_irq.c 2011-08-23 21:47:55.000000000 -0400
24995 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
24996 unsigned int ret;
24997 RING_LOCALS;
24998
24999 - atomic_inc(&dev_priv->swi_emitted);
25000 - ret = atomic_read(&dev_priv->swi_emitted);
25001 + atomic_inc_unchecked(&dev_priv->swi_emitted);
25002 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
25003
25004 BEGIN_RING(4);
25005 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
25006 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
25007 drm_radeon_private_t *dev_priv =
25008 (drm_radeon_private_t *) dev->dev_private;
25009
25010 - atomic_set(&dev_priv->swi_emitted, 0);
25011 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
25012 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
25013
25014 dev->max_vblank_count = 0x001fffff;
25015 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_state.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_state.c
25016 --- linux-3.0.3/drivers/gpu/drm/radeon/radeon_state.c 2011-07-21 22:17:23.000000000 -0400
25017 +++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_state.c 2011-08-23 21:47:55.000000000 -0400
25018 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
25019 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
25020 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
25021
25022 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25023 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25024 sarea_priv->nbox * sizeof(depth_boxes[0])))
25025 return -EFAULT;
25026
25027 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
25028 {
25029 drm_radeon_private_t *dev_priv = dev->dev_private;
25030 drm_radeon_getparam_t *param = data;
25031 - int value;
25032 + int value = 0;
25033
25034 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
25035
25036 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_ttm.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_ttm.c
25037 --- linux-3.0.3/drivers/gpu/drm/radeon/radeon_ttm.c 2011-07-21 22:17:23.000000000 -0400
25038 +++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_ttm.c 2011-08-23 21:47:55.000000000 -0400
25039 @@ -644,8 +644,10 @@ int radeon_mmap(struct file *filp, struc
25040 }
25041 if (unlikely(ttm_vm_ops == NULL)) {
25042 ttm_vm_ops = vma->vm_ops;
25043 - radeon_ttm_vm_ops = *ttm_vm_ops;
25044 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25045 + pax_open_kernel();
25046 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
25047 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25048 + pax_close_kernel();
25049 }
25050 vma->vm_ops = &radeon_ttm_vm_ops;
25051 return 0;
25052 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/rs690.c linux-3.0.3/drivers/gpu/drm/radeon/rs690.c
25053 --- linux-3.0.3/drivers/gpu/drm/radeon/rs690.c 2011-07-21 22:17:23.000000000 -0400
25054 +++ linux-3.0.3/drivers/gpu/drm/radeon/rs690.c 2011-08-23 21:47:55.000000000 -0400
25055 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
25056 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
25057 rdev->pm.sideport_bandwidth.full)
25058 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
25059 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
25060 + read_delay_latency.full = dfixed_const(800 * 1000);
25061 read_delay_latency.full = dfixed_div(read_delay_latency,
25062 rdev->pm.igp_sideport_mclk);
25063 + a.full = dfixed_const(370);
25064 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
25065 } else {
25066 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
25067 rdev->pm.k8_bandwidth.full)
25068 diff -urNp linux-3.0.3/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-3.0.3/drivers/gpu/drm/ttm/ttm_page_alloc.c
25069 --- linux-3.0.3/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-07-21 22:17:23.000000000 -0400
25070 +++ linux-3.0.3/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-08-23 21:47:55.000000000 -0400
25071 @@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages
25072 static int ttm_pool_mm_shrink(struct shrinker *shrink,
25073 struct shrink_control *sc)
25074 {
25075 - static atomic_t start_pool = ATOMIC_INIT(0);
25076 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
25077 unsigned i;
25078 - unsigned pool_offset = atomic_add_return(1, &start_pool);
25079 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
25080 struct ttm_page_pool *pool;
25081 int shrink_pages = sc->nr_to_scan;
25082
25083 diff -urNp linux-3.0.3/drivers/gpu/drm/via/via_drv.h linux-3.0.3/drivers/gpu/drm/via/via_drv.h
25084 --- linux-3.0.3/drivers/gpu/drm/via/via_drv.h 2011-07-21 22:17:23.000000000 -0400
25085 +++ linux-3.0.3/drivers/gpu/drm/via/via_drv.h 2011-08-23 21:47:55.000000000 -0400
25086 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
25087 typedef uint32_t maskarray_t[5];
25088
25089 typedef struct drm_via_irq {
25090 - atomic_t irq_received;
25091 + atomic_unchecked_t irq_received;
25092 uint32_t pending_mask;
25093 uint32_t enable_mask;
25094 wait_queue_head_t irq_queue;
25095 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
25096 struct timeval last_vblank;
25097 int last_vblank_valid;
25098 unsigned usec_per_vblank;
25099 - atomic_t vbl_received;
25100 + atomic_unchecked_t vbl_received;
25101 drm_via_state_t hc_state;
25102 char pci_buf[VIA_PCI_BUF_SIZE];
25103 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
25104 diff -urNp linux-3.0.3/drivers/gpu/drm/via/via_irq.c linux-3.0.3/drivers/gpu/drm/via/via_irq.c
25105 --- linux-3.0.3/drivers/gpu/drm/via/via_irq.c 2011-07-21 22:17:23.000000000 -0400
25106 +++ linux-3.0.3/drivers/gpu/drm/via/via_irq.c 2011-08-23 21:47:55.000000000 -0400
25107 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
25108 if (crtc != 0)
25109 return 0;
25110
25111 - return atomic_read(&dev_priv->vbl_received);
25112 + return atomic_read_unchecked(&dev_priv->vbl_received);
25113 }
25114
25115 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
25116 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
25117
25118 status = VIA_READ(VIA_REG_INTERRUPT);
25119 if (status & VIA_IRQ_VBLANK_PENDING) {
25120 - atomic_inc(&dev_priv->vbl_received);
25121 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
25122 + atomic_inc_unchecked(&dev_priv->vbl_received);
25123 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
25124 do_gettimeofday(&cur_vblank);
25125 if (dev_priv->last_vblank_valid) {
25126 dev_priv->usec_per_vblank =
25127 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25128 dev_priv->last_vblank = cur_vblank;
25129 dev_priv->last_vblank_valid = 1;
25130 }
25131 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
25132 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
25133 DRM_DEBUG("US per vblank is: %u\n",
25134 dev_priv->usec_per_vblank);
25135 }
25136 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25137
25138 for (i = 0; i < dev_priv->num_irqs; ++i) {
25139 if (status & cur_irq->pending_mask) {
25140 - atomic_inc(&cur_irq->irq_received);
25141 + atomic_inc_unchecked(&cur_irq->irq_received);
25142 DRM_WAKEUP(&cur_irq->irq_queue);
25143 handled = 1;
25144 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
25145 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
25146 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25147 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
25148 masks[irq][4]));
25149 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
25150 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
25151 } else {
25152 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25153 (((cur_irq_sequence =
25154 - atomic_read(&cur_irq->irq_received)) -
25155 + atomic_read_unchecked(&cur_irq->irq_received)) -
25156 *sequence) <= (1 << 23)));
25157 }
25158 *sequence = cur_irq_sequence;
25159 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
25160 }
25161
25162 for (i = 0; i < dev_priv->num_irqs; ++i) {
25163 - atomic_set(&cur_irq->irq_received, 0);
25164 + atomic_set_unchecked(&cur_irq->irq_received, 0);
25165 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
25166 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
25167 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
25168 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
25169 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
25170 case VIA_IRQ_RELATIVE:
25171 irqwait->request.sequence +=
25172 - atomic_read(&cur_irq->irq_received);
25173 + atomic_read_unchecked(&cur_irq->irq_received);
25174 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
25175 case VIA_IRQ_ABSOLUTE:
25176 break;
25177 diff -urNp linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
25178 --- linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-07-21 22:17:23.000000000 -0400
25179 +++ linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-08-23 21:47:55.000000000 -0400
25180 @@ -240,7 +240,7 @@ struct vmw_private {
25181 * Fencing and IRQs.
25182 */
25183
25184 - atomic_t fence_seq;
25185 + atomic_unchecked_t fence_seq;
25186 wait_queue_head_t fence_queue;
25187 wait_queue_head_t fifo_queue;
25188 atomic_t fence_queue_waiters;
25189 diff -urNp linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
25190 --- linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-07-21 22:17:23.000000000 -0400
25191 +++ linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-08-23 21:47:55.000000000 -0400
25192 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
25193 while (!vmw_lag_lt(queue, us)) {
25194 spin_lock(&queue->lock);
25195 if (list_empty(&queue->head))
25196 - sequence = atomic_read(&dev_priv->fence_seq);
25197 + sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25198 else {
25199 fence = list_first_entry(&queue->head,
25200 struct vmw_fence, head);
25201 diff -urNp linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
25202 --- linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-07-21 22:17:23.000000000 -0400
25203 +++ linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-08-23 21:47:55.000000000 -0400
25204 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
25205 (unsigned int) min,
25206 (unsigned int) fifo->capabilities);
25207
25208 - atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25209 + atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25210 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
25211 vmw_fence_queue_init(&fifo->fence_queue);
25212 return vmw_fifo_send_fence(dev_priv, &dummy);
25213 @@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25214
25215 fm = vmw_fifo_reserve(dev_priv, bytes);
25216 if (unlikely(fm == NULL)) {
25217 - *sequence = atomic_read(&dev_priv->fence_seq);
25218 + *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25219 ret = -ENOMEM;
25220 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
25221 false, 3*HZ);
25222 @@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25223 }
25224
25225 do {
25226 - *sequence = atomic_add_return(1, &dev_priv->fence_seq);
25227 + *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
25228 } while (*sequence == 0);
25229
25230 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
25231 diff -urNp linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
25232 --- linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-07-21 22:17:23.000000000 -0400
25233 +++ linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-08-23 21:47:55.000000000 -0400
25234 @@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
25235 * emitted. Then the fence is stale and signaled.
25236 */
25237
25238 - ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
25239 + ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
25240 > VMW_FENCE_WRAP);
25241
25242 return ret;
25243 @@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
25244
25245 if (fifo_idle)
25246 down_read(&fifo_state->rwsem);
25247 - signal_seq = atomic_read(&dev_priv->fence_seq);
25248 + signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
25249 ret = 0;
25250
25251 for (;;) {
25252 diff -urNp linux-3.0.3/drivers/hid/hid-core.c linux-3.0.3/drivers/hid/hid-core.c
25253 --- linux-3.0.3/drivers/hid/hid-core.c 2011-07-21 22:17:23.000000000 -0400
25254 +++ linux-3.0.3/drivers/hid/hid-core.c 2011-08-23 21:47:55.000000000 -0400
25255 @@ -1923,7 +1923,7 @@ static bool hid_ignore(struct hid_device
25256
25257 int hid_add_device(struct hid_device *hdev)
25258 {
25259 - static atomic_t id = ATOMIC_INIT(0);
25260 + static atomic_unchecked_t id = ATOMIC_INIT(0);
25261 int ret;
25262
25263 if (WARN_ON(hdev->status & HID_STAT_ADDED))
25264 @@ -1938,7 +1938,7 @@ int hid_add_device(struct hid_device *hd
25265 /* XXX hack, any other cleaner solution after the driver core
25266 * is converted to allow more than 20 bytes as the device name? */
25267 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
25268 - hdev->vendor, hdev->product, atomic_inc_return(&id));
25269 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
25270
25271 hid_debug_register(hdev, dev_name(&hdev->dev));
25272 ret = device_add(&hdev->dev);
25273 diff -urNp linux-3.0.3/drivers/hid/usbhid/hiddev.c linux-3.0.3/drivers/hid/usbhid/hiddev.c
25274 --- linux-3.0.3/drivers/hid/usbhid/hiddev.c 2011-07-21 22:17:23.000000000 -0400
25275 +++ linux-3.0.3/drivers/hid/usbhid/hiddev.c 2011-08-23 21:47:55.000000000 -0400
25276 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *fi
25277 break;
25278
25279 case HIDIOCAPPLICATION:
25280 - if (arg < 0 || arg >= hid->maxapplication)
25281 + if (arg >= hid->maxapplication)
25282 break;
25283
25284 for (i = 0; i < hid->maxcollection; i++)
25285 diff -urNp linux-3.0.3/drivers/hwmon/acpi_power_meter.c linux-3.0.3/drivers/hwmon/acpi_power_meter.c
25286 --- linux-3.0.3/drivers/hwmon/acpi_power_meter.c 2011-07-21 22:17:23.000000000 -0400
25287 +++ linux-3.0.3/drivers/hwmon/acpi_power_meter.c 2011-08-23 21:47:55.000000000 -0400
25288 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
25289 return res;
25290
25291 temp /= 1000;
25292 - if (temp < 0)
25293 - return -EINVAL;
25294
25295 mutex_lock(&resource->lock);
25296 resource->trip[attr->index - 7] = temp;
25297 diff -urNp linux-3.0.3/drivers/hwmon/sht15.c linux-3.0.3/drivers/hwmon/sht15.c
25298 --- linux-3.0.3/drivers/hwmon/sht15.c 2011-07-21 22:17:23.000000000 -0400
25299 +++ linux-3.0.3/drivers/hwmon/sht15.c 2011-08-23 21:47:55.000000000 -0400
25300 @@ -166,7 +166,7 @@ struct sht15_data {
25301 int supply_uV;
25302 bool supply_uV_valid;
25303 struct work_struct update_supply_work;
25304 - atomic_t interrupt_handled;
25305 + atomic_unchecked_t interrupt_handled;
25306 };
25307
25308 /**
25309 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht1
25310 return ret;
25311
25312 gpio_direction_input(data->pdata->gpio_data);
25313 - atomic_set(&data->interrupt_handled, 0);
25314 + atomic_set_unchecked(&data->interrupt_handled, 0);
25315
25316 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25317 if (gpio_get_value(data->pdata->gpio_data) == 0) {
25318 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
25319 /* Only relevant if the interrupt hasn't occurred. */
25320 - if (!atomic_read(&data->interrupt_handled))
25321 + if (!atomic_read_unchecked(&data->interrupt_handled))
25322 schedule_work(&data->read_work);
25323 }
25324 ret = wait_event_timeout(data->wait_queue,
25325 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired
25326
25327 /* First disable the interrupt */
25328 disable_irq_nosync(irq);
25329 - atomic_inc(&data->interrupt_handled);
25330 + atomic_inc_unchecked(&data->interrupt_handled);
25331 /* Then schedule a reading work struct */
25332 if (data->state != SHT15_READING_NOTHING)
25333 schedule_work(&data->read_work);
25334 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct wo
25335 * If not, then start the interrupt again - care here as could
25336 * have gone low in meantime so verify it hasn't!
25337 */
25338 - atomic_set(&data->interrupt_handled, 0);
25339 + atomic_set_unchecked(&data->interrupt_handled, 0);
25340 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25341 /* If still not occurred or another handler has been scheduled */
25342 if (gpio_get_value(data->pdata->gpio_data)
25343 - || atomic_read(&data->interrupt_handled))
25344 + || atomic_read_unchecked(&data->interrupt_handled))
25345 return;
25346 }
25347
25348 diff -urNp linux-3.0.3/drivers/hwmon/w83791d.c linux-3.0.3/drivers/hwmon/w83791d.c
25349 --- linux-3.0.3/drivers/hwmon/w83791d.c 2011-07-21 22:17:23.000000000 -0400
25350 +++ linux-3.0.3/drivers/hwmon/w83791d.c 2011-08-23 21:47:55.000000000 -0400
25351 @@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli
25352 struct i2c_board_info *info);
25353 static int w83791d_remove(struct i2c_client *client);
25354
25355 -static int w83791d_read(struct i2c_client *client, u8 register);
25356 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
25357 +static int w83791d_read(struct i2c_client *client, u8 reg);
25358 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
25359 static struct w83791d_data *w83791d_update_device(struct device *dev);
25360
25361 #ifdef DEBUG
25362 diff -urNp linux-3.0.3/drivers/i2c/busses/i2c-amd756-s4882.c linux-3.0.3/drivers/i2c/busses/i2c-amd756-s4882.c
25363 --- linux-3.0.3/drivers/i2c/busses/i2c-amd756-s4882.c 2011-07-21 22:17:23.000000000 -0400
25364 +++ linux-3.0.3/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-23 21:47:55.000000000 -0400
25365 @@ -43,7 +43,7 @@
25366 extern struct i2c_adapter amd756_smbus;
25367
25368 static struct i2c_adapter *s4882_adapter;
25369 -static struct i2c_algorithm *s4882_algo;
25370 +static i2c_algorithm_no_const *s4882_algo;
25371
25372 /* Wrapper access functions for multiplexed SMBus */
25373 static DEFINE_MUTEX(amd756_lock);
25374 diff -urNp linux-3.0.3/drivers/i2c/busses/i2c-nforce2-s4985.c linux-3.0.3/drivers/i2c/busses/i2c-nforce2-s4985.c
25375 --- linux-3.0.3/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-07-21 22:17:23.000000000 -0400
25376 +++ linux-3.0.3/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-23 21:47:55.000000000 -0400
25377 @@ -41,7 +41,7 @@
25378 extern struct i2c_adapter *nforce2_smbus;
25379
25380 static struct i2c_adapter *s4985_adapter;
25381 -static struct i2c_algorithm *s4985_algo;
25382 +static i2c_algorithm_no_const *s4985_algo;
25383
25384 /* Wrapper access functions for multiplexed SMBus */
25385 static DEFINE_MUTEX(nforce2_lock);
25386 diff -urNp linux-3.0.3/drivers/i2c/i2c-mux.c linux-3.0.3/drivers/i2c/i2c-mux.c
25387 --- linux-3.0.3/drivers/i2c/i2c-mux.c 2011-07-21 22:17:23.000000000 -0400
25388 +++ linux-3.0.3/drivers/i2c/i2c-mux.c 2011-08-23 21:47:55.000000000 -0400
25389 @@ -28,7 +28,7 @@
25390 /* multiplexer per channel data */
25391 struct i2c_mux_priv {
25392 struct i2c_adapter adap;
25393 - struct i2c_algorithm algo;
25394 + i2c_algorithm_no_const algo;
25395
25396 struct i2c_adapter *parent;
25397 void *mux_dev; /* the mux chip/device */
25398 diff -urNp linux-3.0.3/drivers/ide/ide-cd.c linux-3.0.3/drivers/ide/ide-cd.c
25399 --- linux-3.0.3/drivers/ide/ide-cd.c 2011-07-21 22:17:23.000000000 -0400
25400 +++ linux-3.0.3/drivers/ide/ide-cd.c 2011-08-23 21:47:55.000000000 -0400
25401 @@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
25402 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
25403 if ((unsigned long)buf & alignment
25404 || blk_rq_bytes(rq) & q->dma_pad_mask
25405 - || object_is_on_stack(buf))
25406 + || object_starts_on_stack(buf))
25407 drive->dma = 0;
25408 }
25409 }
25410 diff -urNp linux-3.0.3/drivers/ide/ide-floppy.c linux-3.0.3/drivers/ide/ide-floppy.c
25411 --- linux-3.0.3/drivers/ide/ide-floppy.c 2011-07-21 22:17:23.000000000 -0400
25412 +++ linux-3.0.3/drivers/ide/ide-floppy.c 2011-08-23 21:48:14.000000000 -0400
25413 @@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
25414 u8 pc_buf[256], header_len, desc_cnt;
25415 int i, rc = 1, blocks, length;
25416
25417 + pax_track_stack();
25418 +
25419 ide_debug_log(IDE_DBG_FUNC, "enter");
25420
25421 drive->bios_cyl = 0;
25422 diff -urNp linux-3.0.3/drivers/ide/setup-pci.c linux-3.0.3/drivers/ide/setup-pci.c
25423 --- linux-3.0.3/drivers/ide/setup-pci.c 2011-07-21 22:17:23.000000000 -0400
25424 +++ linux-3.0.3/drivers/ide/setup-pci.c 2011-08-23 21:48:14.000000000 -0400
25425 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
25426 int ret, i, n_ports = dev2 ? 4 : 2;
25427 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
25428
25429 + pax_track_stack();
25430 +
25431 for (i = 0; i < n_ports / 2; i++) {
25432 ret = ide_setup_pci_controller(pdev[i], d, !i);
25433 if (ret < 0)
25434 diff -urNp linux-3.0.3/drivers/infiniband/core/cm.c linux-3.0.3/drivers/infiniband/core/cm.c
25435 --- linux-3.0.3/drivers/infiniband/core/cm.c 2011-07-21 22:17:23.000000000 -0400
25436 +++ linux-3.0.3/drivers/infiniband/core/cm.c 2011-08-23 21:47:55.000000000 -0400
25437 @@ -113,7 +113,7 @@ static char const counter_group_names[CM
25438
25439 struct cm_counter_group {
25440 struct kobject obj;
25441 - atomic_long_t counter[CM_ATTR_COUNT];
25442 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
25443 };
25444
25445 struct cm_counter_attribute {
25446 @@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
25447 struct ib_mad_send_buf *msg = NULL;
25448 int ret;
25449
25450 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25451 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25452 counter[CM_REQ_COUNTER]);
25453
25454 /* Quick state check to discard duplicate REQs. */
25455 @@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
25456 if (!cm_id_priv)
25457 return;
25458
25459 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25460 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25461 counter[CM_REP_COUNTER]);
25462 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
25463 if (ret)
25464 @@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
25465 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
25466 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
25467 spin_unlock_irq(&cm_id_priv->lock);
25468 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25469 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25470 counter[CM_RTU_COUNTER]);
25471 goto out;
25472 }
25473 @@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
25474 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
25475 dreq_msg->local_comm_id);
25476 if (!cm_id_priv) {
25477 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25478 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25479 counter[CM_DREQ_COUNTER]);
25480 cm_issue_drep(work->port, work->mad_recv_wc);
25481 return -EINVAL;
25482 @@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
25483 case IB_CM_MRA_REP_RCVD:
25484 break;
25485 case IB_CM_TIMEWAIT:
25486 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25487 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25488 counter[CM_DREQ_COUNTER]);
25489 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25490 goto unlock;
25491 @@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
25492 cm_free_msg(msg);
25493 goto deref;
25494 case IB_CM_DREQ_RCVD:
25495 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25496 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25497 counter[CM_DREQ_COUNTER]);
25498 goto unlock;
25499 default:
25500 @@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
25501 ib_modify_mad(cm_id_priv->av.port->mad_agent,
25502 cm_id_priv->msg, timeout)) {
25503 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
25504 - atomic_long_inc(&work->port->
25505 + atomic_long_inc_unchecked(&work->port->
25506 counter_group[CM_RECV_DUPLICATES].
25507 counter[CM_MRA_COUNTER]);
25508 goto out;
25509 @@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
25510 break;
25511 case IB_CM_MRA_REQ_RCVD:
25512 case IB_CM_MRA_REP_RCVD:
25513 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25514 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25515 counter[CM_MRA_COUNTER]);
25516 /* fall through */
25517 default:
25518 @@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
25519 case IB_CM_LAP_IDLE:
25520 break;
25521 case IB_CM_MRA_LAP_SENT:
25522 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25523 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25524 counter[CM_LAP_COUNTER]);
25525 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25526 goto unlock;
25527 @@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
25528 cm_free_msg(msg);
25529 goto deref;
25530 case IB_CM_LAP_RCVD:
25531 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25532 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25533 counter[CM_LAP_COUNTER]);
25534 goto unlock;
25535 default:
25536 @@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
25537 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
25538 if (cur_cm_id_priv) {
25539 spin_unlock_irq(&cm.lock);
25540 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25541 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25542 counter[CM_SIDR_REQ_COUNTER]);
25543 goto out; /* Duplicate message. */
25544 }
25545 @@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
25546 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
25547 msg->retries = 1;
25548
25549 - atomic_long_add(1 + msg->retries,
25550 + atomic_long_add_unchecked(1 + msg->retries,
25551 &port->counter_group[CM_XMIT].counter[attr_index]);
25552 if (msg->retries)
25553 - atomic_long_add(msg->retries,
25554 + atomic_long_add_unchecked(msg->retries,
25555 &port->counter_group[CM_XMIT_RETRIES].
25556 counter[attr_index]);
25557
25558 @@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
25559 }
25560
25561 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
25562 - atomic_long_inc(&port->counter_group[CM_RECV].
25563 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
25564 counter[attr_id - CM_ATTR_ID_OFFSET]);
25565
25566 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
25567 @@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
25568 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
25569
25570 return sprintf(buf, "%ld\n",
25571 - atomic_long_read(&group->counter[cm_attr->index]));
25572 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
25573 }
25574
25575 static const struct sysfs_ops cm_counter_ops = {
25576 diff -urNp linux-3.0.3/drivers/infiniband/core/fmr_pool.c linux-3.0.3/drivers/infiniband/core/fmr_pool.c
25577 --- linux-3.0.3/drivers/infiniband/core/fmr_pool.c 2011-07-21 22:17:23.000000000 -0400
25578 +++ linux-3.0.3/drivers/infiniband/core/fmr_pool.c 2011-08-23 21:47:55.000000000 -0400
25579 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
25580
25581 struct task_struct *thread;
25582
25583 - atomic_t req_ser;
25584 - atomic_t flush_ser;
25585 + atomic_unchecked_t req_ser;
25586 + atomic_unchecked_t flush_ser;
25587
25588 wait_queue_head_t force_wait;
25589 };
25590 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
25591 struct ib_fmr_pool *pool = pool_ptr;
25592
25593 do {
25594 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
25595 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
25596 ib_fmr_batch_release(pool);
25597
25598 - atomic_inc(&pool->flush_ser);
25599 + atomic_inc_unchecked(&pool->flush_ser);
25600 wake_up_interruptible(&pool->force_wait);
25601
25602 if (pool->flush_function)
25603 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
25604 }
25605
25606 set_current_state(TASK_INTERRUPTIBLE);
25607 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
25608 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
25609 !kthread_should_stop())
25610 schedule();
25611 __set_current_state(TASK_RUNNING);
25612 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
25613 pool->dirty_watermark = params->dirty_watermark;
25614 pool->dirty_len = 0;
25615 spin_lock_init(&pool->pool_lock);
25616 - atomic_set(&pool->req_ser, 0);
25617 - atomic_set(&pool->flush_ser, 0);
25618 + atomic_set_unchecked(&pool->req_ser, 0);
25619 + atomic_set_unchecked(&pool->flush_ser, 0);
25620 init_waitqueue_head(&pool->force_wait);
25621
25622 pool->thread = kthread_run(ib_fmr_cleanup_thread,
25623 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
25624 }
25625 spin_unlock_irq(&pool->pool_lock);
25626
25627 - serial = atomic_inc_return(&pool->req_ser);
25628 + serial = atomic_inc_return_unchecked(&pool->req_ser);
25629 wake_up_process(pool->thread);
25630
25631 if (wait_event_interruptible(pool->force_wait,
25632 - atomic_read(&pool->flush_ser) - serial >= 0))
25633 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
25634 return -EINTR;
25635
25636 return 0;
25637 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
25638 } else {
25639 list_add_tail(&fmr->list, &pool->dirty_list);
25640 if (++pool->dirty_len >= pool->dirty_watermark) {
25641 - atomic_inc(&pool->req_ser);
25642 + atomic_inc_unchecked(&pool->req_ser);
25643 wake_up_process(pool->thread);
25644 }
25645 }
25646 diff -urNp linux-3.0.3/drivers/infiniband/hw/cxgb4/mem.c linux-3.0.3/drivers/infiniband/hw/cxgb4/mem.c
25647 --- linux-3.0.3/drivers/infiniband/hw/cxgb4/mem.c 2011-07-21 22:17:23.000000000 -0400
25648 +++ linux-3.0.3/drivers/infiniband/hw/cxgb4/mem.c 2011-08-23 21:47:55.000000000 -0400
25649 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
25650 int err;
25651 struct fw_ri_tpte tpt;
25652 u32 stag_idx;
25653 - static atomic_t key;
25654 + static atomic_unchecked_t key;
25655
25656 if (c4iw_fatal_error(rdev))
25657 return -EIO;
25658 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
25659 &rdev->resource.tpt_fifo_lock);
25660 if (!stag_idx)
25661 return -ENOMEM;
25662 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
25663 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
25664 }
25665 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
25666 __func__, stag_state, type, pdid, stag_idx);
25667 diff -urNp linux-3.0.3/drivers/infiniband/hw/ipath/ipath_fs.c linux-3.0.3/drivers/infiniband/hw/ipath/ipath_fs.c
25668 --- linux-3.0.3/drivers/infiniband/hw/ipath/ipath_fs.c 2011-07-21 22:17:23.000000000 -0400
25669 +++ linux-3.0.3/drivers/infiniband/hw/ipath/ipath_fs.c 2011-08-23 21:48:14.000000000 -0400
25670 @@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
25671 struct infinipath_counters counters;
25672 struct ipath_devdata *dd;
25673
25674 + pax_track_stack();
25675 +
25676 dd = file->f_path.dentry->d_inode->i_private;
25677 dd->ipath_f_read_counters(dd, &counters);
25678
25679 diff -urNp linux-3.0.3/drivers/infiniband/hw/ipath/ipath_rc.c linux-3.0.3/drivers/infiniband/hw/ipath/ipath_rc.c
25680 --- linux-3.0.3/drivers/infiniband/hw/ipath/ipath_rc.c 2011-07-21 22:17:23.000000000 -0400
25681 +++ linux-3.0.3/drivers/infiniband/hw/ipath/ipath_rc.c 2011-08-23 21:47:55.000000000 -0400
25682 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25683 struct ib_atomic_eth *ateth;
25684 struct ipath_ack_entry *e;
25685 u64 vaddr;
25686 - atomic64_t *maddr;
25687 + atomic64_unchecked_t *maddr;
25688 u64 sdata;
25689 u32 rkey;
25690 u8 next;
25691 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25692 IB_ACCESS_REMOTE_ATOMIC)))
25693 goto nack_acc_unlck;
25694 /* Perform atomic OP and save result. */
25695 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25696 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25697 sdata = be64_to_cpu(ateth->swap_data);
25698 e = &qp->s_ack_queue[qp->r_head_ack_queue];
25699 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
25700 - (u64) atomic64_add_return(sdata, maddr) - sdata :
25701 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25702 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25703 be64_to_cpu(ateth->compare_data),
25704 sdata);
25705 diff -urNp linux-3.0.3/drivers/infiniband/hw/ipath/ipath_ruc.c linux-3.0.3/drivers/infiniband/hw/ipath/ipath_ruc.c
25706 --- linux-3.0.3/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-07-21 22:17:23.000000000 -0400
25707 +++ linux-3.0.3/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-08-23 21:47:55.000000000 -0400
25708 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
25709 unsigned long flags;
25710 struct ib_wc wc;
25711 u64 sdata;
25712 - atomic64_t *maddr;
25713 + atomic64_unchecked_t *maddr;
25714 enum ib_wc_status send_status;
25715
25716 /*
25717 @@ -382,11 +382,11 @@ again:
25718 IB_ACCESS_REMOTE_ATOMIC)))
25719 goto acc_err;
25720 /* Perform atomic OP and save result. */
25721 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25722 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25723 sdata = wqe->wr.wr.atomic.compare_add;
25724 *(u64 *) sqp->s_sge.sge.vaddr =
25725 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
25726 - (u64) atomic64_add_return(sdata, maddr) - sdata :
25727 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25728 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25729 sdata, wqe->wr.wr.atomic.swap);
25730 goto send_comp;
25731 diff -urNp linux-3.0.3/drivers/infiniband/hw/nes/nes.c linux-3.0.3/drivers/infiniband/hw/nes/nes.c
25732 --- linux-3.0.3/drivers/infiniband/hw/nes/nes.c 2011-07-21 22:17:23.000000000 -0400
25733 +++ linux-3.0.3/drivers/infiniband/hw/nes/nes.c 2011-08-23 21:47:55.000000000 -0400
25734 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
25735 LIST_HEAD(nes_adapter_list);
25736 static LIST_HEAD(nes_dev_list);
25737
25738 -atomic_t qps_destroyed;
25739 +atomic_unchecked_t qps_destroyed;
25740
25741 static unsigned int ee_flsh_adapter;
25742 static unsigned int sysfs_nonidx_addr;
25743 @@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
25744 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
25745 struct nes_adapter *nesadapter = nesdev->nesadapter;
25746
25747 - atomic_inc(&qps_destroyed);
25748 + atomic_inc_unchecked(&qps_destroyed);
25749
25750 /* Free the control structures */
25751
25752 diff -urNp linux-3.0.3/drivers/infiniband/hw/nes/nes_cm.c linux-3.0.3/drivers/infiniband/hw/nes/nes_cm.c
25753 --- linux-3.0.3/drivers/infiniband/hw/nes/nes_cm.c 2011-07-21 22:17:23.000000000 -0400
25754 +++ linux-3.0.3/drivers/infiniband/hw/nes/nes_cm.c 2011-08-23 21:47:55.000000000 -0400
25755 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
25756 u32 cm_packets_retrans;
25757 u32 cm_packets_created;
25758 u32 cm_packets_received;
25759 -atomic_t cm_listens_created;
25760 -atomic_t cm_listens_destroyed;
25761 +atomic_unchecked_t cm_listens_created;
25762 +atomic_unchecked_t cm_listens_destroyed;
25763 u32 cm_backlog_drops;
25764 -atomic_t cm_loopbacks;
25765 -atomic_t cm_nodes_created;
25766 -atomic_t cm_nodes_destroyed;
25767 -atomic_t cm_accel_dropped_pkts;
25768 -atomic_t cm_resets_recvd;
25769 +atomic_unchecked_t cm_loopbacks;
25770 +atomic_unchecked_t cm_nodes_created;
25771 +atomic_unchecked_t cm_nodes_destroyed;
25772 +atomic_unchecked_t cm_accel_dropped_pkts;
25773 +atomic_unchecked_t cm_resets_recvd;
25774
25775 static inline int mini_cm_accelerated(struct nes_cm_core *,
25776 struct nes_cm_node *);
25777 @@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
25778
25779 static struct nes_cm_core *g_cm_core;
25780
25781 -atomic_t cm_connects;
25782 -atomic_t cm_accepts;
25783 -atomic_t cm_disconnects;
25784 -atomic_t cm_closes;
25785 -atomic_t cm_connecteds;
25786 -atomic_t cm_connect_reqs;
25787 -atomic_t cm_rejects;
25788 +atomic_unchecked_t cm_connects;
25789 +atomic_unchecked_t cm_accepts;
25790 +atomic_unchecked_t cm_disconnects;
25791 +atomic_unchecked_t cm_closes;
25792 +atomic_unchecked_t cm_connecteds;
25793 +atomic_unchecked_t cm_connect_reqs;
25794 +atomic_unchecked_t cm_rejects;
25795
25796
25797 /**
25798 @@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
25799 kfree(listener);
25800 listener = NULL;
25801 ret = 0;
25802 - atomic_inc(&cm_listens_destroyed);
25803 + atomic_inc_unchecked(&cm_listens_destroyed);
25804 } else {
25805 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
25806 }
25807 @@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
25808 cm_node->rem_mac);
25809
25810 add_hte_node(cm_core, cm_node);
25811 - atomic_inc(&cm_nodes_created);
25812 + atomic_inc_unchecked(&cm_nodes_created);
25813
25814 return cm_node;
25815 }
25816 @@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
25817 }
25818
25819 atomic_dec(&cm_core->node_cnt);
25820 - atomic_inc(&cm_nodes_destroyed);
25821 + atomic_inc_unchecked(&cm_nodes_destroyed);
25822 nesqp = cm_node->nesqp;
25823 if (nesqp) {
25824 nesqp->cm_node = NULL;
25825 @@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
25826
25827 static void drop_packet(struct sk_buff *skb)
25828 {
25829 - atomic_inc(&cm_accel_dropped_pkts);
25830 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
25831 dev_kfree_skb_any(skb);
25832 }
25833
25834 @@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
25835 {
25836
25837 int reset = 0; /* whether to send reset in case of err.. */
25838 - atomic_inc(&cm_resets_recvd);
25839 + atomic_inc_unchecked(&cm_resets_recvd);
25840 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
25841 " refcnt=%d\n", cm_node, cm_node->state,
25842 atomic_read(&cm_node->ref_count));
25843 @@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
25844 rem_ref_cm_node(cm_node->cm_core, cm_node);
25845 return NULL;
25846 }
25847 - atomic_inc(&cm_loopbacks);
25848 + atomic_inc_unchecked(&cm_loopbacks);
25849 loopbackremotenode->loopbackpartner = cm_node;
25850 loopbackremotenode->tcp_cntxt.rcv_wscale =
25851 NES_CM_DEFAULT_RCV_WND_SCALE;
25852 @@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
25853 add_ref_cm_node(cm_node);
25854 } else if (cm_node->state == NES_CM_STATE_TSA) {
25855 rem_ref_cm_node(cm_core, cm_node);
25856 - atomic_inc(&cm_accel_dropped_pkts);
25857 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
25858 dev_kfree_skb_any(skb);
25859 break;
25860 }
25861 @@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
25862
25863 if ((cm_id) && (cm_id->event_handler)) {
25864 if (issue_disconn) {
25865 - atomic_inc(&cm_disconnects);
25866 + atomic_inc_unchecked(&cm_disconnects);
25867 cm_event.event = IW_CM_EVENT_DISCONNECT;
25868 cm_event.status = disconn_status;
25869 cm_event.local_addr = cm_id->local_addr;
25870 @@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
25871 }
25872
25873 if (issue_close) {
25874 - atomic_inc(&cm_closes);
25875 + atomic_inc_unchecked(&cm_closes);
25876 nes_disconnect(nesqp, 1);
25877
25878 cm_id->provider_data = nesqp;
25879 @@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
25880
25881 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
25882 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
25883 - atomic_inc(&cm_accepts);
25884 + atomic_inc_unchecked(&cm_accepts);
25885
25886 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
25887 netdev_refcnt_read(nesvnic->netdev));
25888 @@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
25889
25890 struct nes_cm_core *cm_core;
25891
25892 - atomic_inc(&cm_rejects);
25893 + atomic_inc_unchecked(&cm_rejects);
25894 cm_node = (struct nes_cm_node *) cm_id->provider_data;
25895 loopback = cm_node->loopbackpartner;
25896 cm_core = cm_node->cm_core;
25897 @@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
25898 ntohl(cm_id->local_addr.sin_addr.s_addr),
25899 ntohs(cm_id->local_addr.sin_port));
25900
25901 - atomic_inc(&cm_connects);
25902 + atomic_inc_unchecked(&cm_connects);
25903 nesqp->active_conn = 1;
25904
25905 /* cache the cm_id in the qp */
25906 @@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
25907 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
25908 return err;
25909 }
25910 - atomic_inc(&cm_listens_created);
25911 + atomic_inc_unchecked(&cm_listens_created);
25912 }
25913
25914 cm_id->add_ref(cm_id);
25915 @@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
25916 if (nesqp->destroyed) {
25917 return;
25918 }
25919 - atomic_inc(&cm_connecteds);
25920 + atomic_inc_unchecked(&cm_connecteds);
25921 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
25922 " local port 0x%04X. jiffies = %lu.\n",
25923 nesqp->hwqp.qp_id,
25924 @@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
25925
25926 cm_id->add_ref(cm_id);
25927 ret = cm_id->event_handler(cm_id, &cm_event);
25928 - atomic_inc(&cm_closes);
25929 + atomic_inc_unchecked(&cm_closes);
25930 cm_event.event = IW_CM_EVENT_CLOSE;
25931 cm_event.status = 0;
25932 cm_event.provider_data = cm_id->provider_data;
25933 @@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
25934 return;
25935 cm_id = cm_node->cm_id;
25936
25937 - atomic_inc(&cm_connect_reqs);
25938 + atomic_inc_unchecked(&cm_connect_reqs);
25939 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
25940 cm_node, cm_id, jiffies);
25941
25942 @@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
25943 return;
25944 cm_id = cm_node->cm_id;
25945
25946 - atomic_inc(&cm_connect_reqs);
25947 + atomic_inc_unchecked(&cm_connect_reqs);
25948 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
25949 cm_node, cm_id, jiffies);
25950
25951 diff -urNp linux-3.0.3/drivers/infiniband/hw/nes/nes.h linux-3.0.3/drivers/infiniband/hw/nes/nes.h
25952 --- linux-3.0.3/drivers/infiniband/hw/nes/nes.h 2011-07-21 22:17:23.000000000 -0400
25953 +++ linux-3.0.3/drivers/infiniband/hw/nes/nes.h 2011-08-23 21:47:55.000000000 -0400
25954 @@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
25955 extern unsigned int wqm_quanta;
25956 extern struct list_head nes_adapter_list;
25957
25958 -extern atomic_t cm_connects;
25959 -extern atomic_t cm_accepts;
25960 -extern atomic_t cm_disconnects;
25961 -extern atomic_t cm_closes;
25962 -extern atomic_t cm_connecteds;
25963 -extern atomic_t cm_connect_reqs;
25964 -extern atomic_t cm_rejects;
25965 -extern atomic_t mod_qp_timouts;
25966 -extern atomic_t qps_created;
25967 -extern atomic_t qps_destroyed;
25968 -extern atomic_t sw_qps_destroyed;
25969 +extern atomic_unchecked_t cm_connects;
25970 +extern atomic_unchecked_t cm_accepts;
25971 +extern atomic_unchecked_t cm_disconnects;
25972 +extern atomic_unchecked_t cm_closes;
25973 +extern atomic_unchecked_t cm_connecteds;
25974 +extern atomic_unchecked_t cm_connect_reqs;
25975 +extern atomic_unchecked_t cm_rejects;
25976 +extern atomic_unchecked_t mod_qp_timouts;
25977 +extern atomic_unchecked_t qps_created;
25978 +extern atomic_unchecked_t qps_destroyed;
25979 +extern atomic_unchecked_t sw_qps_destroyed;
25980 extern u32 mh_detected;
25981 extern u32 mh_pauses_sent;
25982 extern u32 cm_packets_sent;
25983 @@ -194,14 +194,14 @@ extern u32 cm_packets_created;
25984 extern u32 cm_packets_received;
25985 extern u32 cm_packets_dropped;
25986 extern u32 cm_packets_retrans;
25987 -extern atomic_t cm_listens_created;
25988 -extern atomic_t cm_listens_destroyed;
25989 +extern atomic_unchecked_t cm_listens_created;
25990 +extern atomic_unchecked_t cm_listens_destroyed;
25991 extern u32 cm_backlog_drops;
25992 -extern atomic_t cm_loopbacks;
25993 -extern atomic_t cm_nodes_created;
25994 -extern atomic_t cm_nodes_destroyed;
25995 -extern atomic_t cm_accel_dropped_pkts;
25996 -extern atomic_t cm_resets_recvd;
25997 +extern atomic_unchecked_t cm_loopbacks;
25998 +extern atomic_unchecked_t cm_nodes_created;
25999 +extern atomic_unchecked_t cm_nodes_destroyed;
26000 +extern atomic_unchecked_t cm_accel_dropped_pkts;
26001 +extern atomic_unchecked_t cm_resets_recvd;
26002
26003 extern u32 int_mod_timer_init;
26004 extern u32 int_mod_cq_depth_256;
26005 diff -urNp linux-3.0.3/drivers/infiniband/hw/nes/nes_nic.c linux-3.0.3/drivers/infiniband/hw/nes/nes_nic.c
26006 --- linux-3.0.3/drivers/infiniband/hw/nes/nes_nic.c 2011-07-21 22:17:23.000000000 -0400
26007 +++ linux-3.0.3/drivers/infiniband/hw/nes/nes_nic.c 2011-08-23 21:47:55.000000000 -0400
26008 @@ -1274,31 +1274,31 @@ static void nes_netdev_get_ethtool_stats
26009 target_stat_values[++index] = mh_detected;
26010 target_stat_values[++index] = mh_pauses_sent;
26011 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
26012 - target_stat_values[++index] = atomic_read(&cm_connects);
26013 - target_stat_values[++index] = atomic_read(&cm_accepts);
26014 - target_stat_values[++index] = atomic_read(&cm_disconnects);
26015 - target_stat_values[++index] = atomic_read(&cm_connecteds);
26016 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
26017 - target_stat_values[++index] = atomic_read(&cm_rejects);
26018 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
26019 - target_stat_values[++index] = atomic_read(&qps_created);
26020 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
26021 - target_stat_values[++index] = atomic_read(&qps_destroyed);
26022 - target_stat_values[++index] = atomic_read(&cm_closes);
26023 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
26024 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
26025 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
26026 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
26027 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
26028 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
26029 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
26030 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
26031 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
26032 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
26033 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
26034 target_stat_values[++index] = cm_packets_sent;
26035 target_stat_values[++index] = cm_packets_bounced;
26036 target_stat_values[++index] = cm_packets_created;
26037 target_stat_values[++index] = cm_packets_received;
26038 target_stat_values[++index] = cm_packets_dropped;
26039 target_stat_values[++index] = cm_packets_retrans;
26040 - target_stat_values[++index] = atomic_read(&cm_listens_created);
26041 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
26042 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
26043 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
26044 target_stat_values[++index] = cm_backlog_drops;
26045 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
26046 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
26047 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
26048 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
26049 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
26050 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
26051 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
26052 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
26053 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
26054 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
26055 target_stat_values[++index] = nesadapter->free_4kpbl;
26056 target_stat_values[++index] = nesadapter->free_256pbl;
26057 target_stat_values[++index] = int_mod_timer_init;
26058 diff -urNp linux-3.0.3/drivers/infiniband/hw/nes/nes_verbs.c linux-3.0.3/drivers/infiniband/hw/nes/nes_verbs.c
26059 --- linux-3.0.3/drivers/infiniband/hw/nes/nes_verbs.c 2011-07-21 22:17:23.000000000 -0400
26060 +++ linux-3.0.3/drivers/infiniband/hw/nes/nes_verbs.c 2011-08-23 21:47:55.000000000 -0400
26061 @@ -46,9 +46,9 @@
26062
26063 #include <rdma/ib_umem.h>
26064
26065 -atomic_t mod_qp_timouts;
26066 -atomic_t qps_created;
26067 -atomic_t sw_qps_destroyed;
26068 +atomic_unchecked_t mod_qp_timouts;
26069 +atomic_unchecked_t qps_created;
26070 +atomic_unchecked_t sw_qps_destroyed;
26071
26072 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
26073
26074 @@ -1141,7 +1141,7 @@ static struct ib_qp *nes_create_qp(struc
26075 if (init_attr->create_flags)
26076 return ERR_PTR(-EINVAL);
26077
26078 - atomic_inc(&qps_created);
26079 + atomic_inc_unchecked(&qps_created);
26080 switch (init_attr->qp_type) {
26081 case IB_QPT_RC:
26082 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
26083 @@ -1470,7 +1470,7 @@ static int nes_destroy_qp(struct ib_qp *
26084 struct iw_cm_event cm_event;
26085 int ret;
26086
26087 - atomic_inc(&sw_qps_destroyed);
26088 + atomic_inc_unchecked(&sw_qps_destroyed);
26089 nesqp->destroyed = 1;
26090
26091 /* Blow away the connection if it exists. */
26092 diff -urNp linux-3.0.3/drivers/infiniband/hw/qib/qib.h linux-3.0.3/drivers/infiniband/hw/qib/qib.h
26093 --- linux-3.0.3/drivers/infiniband/hw/qib/qib.h 2011-07-21 22:17:23.000000000 -0400
26094 +++ linux-3.0.3/drivers/infiniband/hw/qib/qib.h 2011-08-23 21:47:55.000000000 -0400
26095 @@ -51,6 +51,7 @@
26096 #include <linux/completion.h>
26097 #include <linux/kref.h>
26098 #include <linux/sched.h>
26099 +#include <linux/slab.h>
26100
26101 #include "qib_common.h"
26102 #include "qib_verbs.h"
26103 diff -urNp linux-3.0.3/drivers/input/gameport/gameport.c linux-3.0.3/drivers/input/gameport/gameport.c
26104 --- linux-3.0.3/drivers/input/gameport/gameport.c 2011-07-21 22:17:23.000000000 -0400
26105 +++ linux-3.0.3/drivers/input/gameport/gameport.c 2011-08-23 21:47:55.000000000 -0400
26106 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
26107 */
26108 static void gameport_init_port(struct gameport *gameport)
26109 {
26110 - static atomic_t gameport_no = ATOMIC_INIT(0);
26111 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
26112
26113 __module_get(THIS_MODULE);
26114
26115 mutex_init(&gameport->drv_mutex);
26116 device_initialize(&gameport->dev);
26117 dev_set_name(&gameport->dev, "gameport%lu",
26118 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
26119 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
26120 gameport->dev.bus = &gameport_bus;
26121 gameport->dev.release = gameport_release_port;
26122 if (gameport->parent)
26123 diff -urNp linux-3.0.3/drivers/input/input.c linux-3.0.3/drivers/input/input.c
26124 --- linux-3.0.3/drivers/input/input.c 2011-07-21 22:17:23.000000000 -0400
26125 +++ linux-3.0.3/drivers/input/input.c 2011-08-23 21:47:55.000000000 -0400
26126 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struc
26127 */
26128 int input_register_device(struct input_dev *dev)
26129 {
26130 - static atomic_t input_no = ATOMIC_INIT(0);
26131 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
26132 struct input_handler *handler;
26133 const char *path;
26134 int error;
26135 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_d
26136 dev->setkeycode = input_default_setkeycode;
26137
26138 dev_set_name(&dev->dev, "input%ld",
26139 - (unsigned long) atomic_inc_return(&input_no) - 1);
26140 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
26141
26142 error = device_add(&dev->dev);
26143 if (error)
26144 diff -urNp linux-3.0.3/drivers/input/joystick/sidewinder.c linux-3.0.3/drivers/input/joystick/sidewinder.c
26145 --- linux-3.0.3/drivers/input/joystick/sidewinder.c 2011-07-21 22:17:23.000000000 -0400
26146 +++ linux-3.0.3/drivers/input/joystick/sidewinder.c 2011-08-23 21:48:14.000000000 -0400
26147 @@ -30,6 +30,7 @@
26148 #include <linux/kernel.h>
26149 #include <linux/module.h>
26150 #include <linux/slab.h>
26151 +#include <linux/sched.h>
26152 #include <linux/init.h>
26153 #include <linux/input.h>
26154 #include <linux/gameport.h>
26155 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
26156 unsigned char buf[SW_LENGTH];
26157 int i;
26158
26159 + pax_track_stack();
26160 +
26161 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
26162
26163 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
26164 diff -urNp linux-3.0.3/drivers/input/joystick/xpad.c linux-3.0.3/drivers/input/joystick/xpad.c
26165 --- linux-3.0.3/drivers/input/joystick/xpad.c 2011-07-21 22:17:23.000000000 -0400
26166 +++ linux-3.0.3/drivers/input/joystick/xpad.c 2011-08-23 21:47:55.000000000 -0400
26167 @@ -689,7 +689,7 @@ static void xpad_led_set(struct led_clas
26168
26169 static int xpad_led_probe(struct usb_xpad *xpad)
26170 {
26171 - static atomic_t led_seq = ATOMIC_INIT(0);
26172 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
26173 long led_no;
26174 struct xpad_led *led;
26175 struct led_classdev *led_cdev;
26176 @@ -702,7 +702,7 @@ static int xpad_led_probe(struct usb_xpa
26177 if (!led)
26178 return -ENOMEM;
26179
26180 - led_no = (long)atomic_inc_return(&led_seq) - 1;
26181 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
26182
26183 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
26184 led->xpad = xpad;
26185 diff -urNp linux-3.0.3/drivers/input/mousedev.c linux-3.0.3/drivers/input/mousedev.c
26186 --- linux-3.0.3/drivers/input/mousedev.c 2011-07-21 22:17:23.000000000 -0400
26187 +++ linux-3.0.3/drivers/input/mousedev.c 2011-08-23 21:47:55.000000000 -0400
26188 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file
26189
26190 spin_unlock_irq(&client->packet_lock);
26191
26192 - if (copy_to_user(buffer, data, count))
26193 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
26194 return -EFAULT;
26195
26196 return count;
26197 diff -urNp linux-3.0.3/drivers/input/serio/serio.c linux-3.0.3/drivers/input/serio/serio.c
26198 --- linux-3.0.3/drivers/input/serio/serio.c 2011-07-21 22:17:23.000000000 -0400
26199 +++ linux-3.0.3/drivers/input/serio/serio.c 2011-08-23 21:47:55.000000000 -0400
26200 @@ -497,7 +497,7 @@ static void serio_release_port(struct de
26201 */
26202 static void serio_init_port(struct serio *serio)
26203 {
26204 - static atomic_t serio_no = ATOMIC_INIT(0);
26205 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
26206
26207 __module_get(THIS_MODULE);
26208
26209 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio
26210 mutex_init(&serio->drv_mutex);
26211 device_initialize(&serio->dev);
26212 dev_set_name(&serio->dev, "serio%ld",
26213 - (long)atomic_inc_return(&serio_no) - 1);
26214 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
26215 serio->dev.bus = &serio_bus;
26216 serio->dev.release = serio_release_port;
26217 serio->dev.groups = serio_device_attr_groups;
26218 diff -urNp linux-3.0.3/drivers/isdn/capi/capi.c linux-3.0.3/drivers/isdn/capi/capi.c
26219 --- linux-3.0.3/drivers/isdn/capi/capi.c 2011-07-21 22:17:23.000000000 -0400
26220 +++ linux-3.0.3/drivers/isdn/capi/capi.c 2011-08-23 21:47:55.000000000 -0400
26221 @@ -83,8 +83,8 @@ struct capiminor {
26222
26223 struct capi20_appl *ap;
26224 u32 ncci;
26225 - atomic_t datahandle;
26226 - atomic_t msgid;
26227 + atomic_unchecked_t datahandle;
26228 + atomic_unchecked_t msgid;
26229
26230 struct tty_port port;
26231 int ttyinstop;
26232 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *m
26233 capimsg_setu16(s, 2, mp->ap->applid);
26234 capimsg_setu8 (s, 4, CAPI_DATA_B3);
26235 capimsg_setu8 (s, 5, CAPI_RESP);
26236 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
26237 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
26238 capimsg_setu32(s, 8, mp->ncci);
26239 capimsg_setu16(s, 12, datahandle);
26240 }
26241 @@ -518,14 +518,14 @@ static void handle_minor_send(struct cap
26242 mp->outbytes -= len;
26243 spin_unlock_bh(&mp->outlock);
26244
26245 - datahandle = atomic_inc_return(&mp->datahandle);
26246 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
26247 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
26248 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26249 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26250 capimsg_setu16(skb->data, 2, mp->ap->applid);
26251 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
26252 capimsg_setu8 (skb->data, 5, CAPI_REQ);
26253 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
26254 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
26255 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
26256 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
26257 capimsg_setu16(skb->data, 16, len); /* Data length */
26258 diff -urNp linux-3.0.3/drivers/isdn/gigaset/common.c linux-3.0.3/drivers/isdn/gigaset/common.c
26259 --- linux-3.0.3/drivers/isdn/gigaset/common.c 2011-07-21 22:17:23.000000000 -0400
26260 +++ linux-3.0.3/drivers/isdn/gigaset/common.c 2011-08-23 21:47:55.000000000 -0400
26261 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
26262 cs->commands_pending = 0;
26263 cs->cur_at_seq = 0;
26264 cs->gotfwver = -1;
26265 - cs->open_count = 0;
26266 + local_set(&cs->open_count, 0);
26267 cs->dev = NULL;
26268 cs->tty = NULL;
26269 cs->tty_dev = NULL;
26270 diff -urNp linux-3.0.3/drivers/isdn/gigaset/gigaset.h linux-3.0.3/drivers/isdn/gigaset/gigaset.h
26271 --- linux-3.0.3/drivers/isdn/gigaset/gigaset.h 2011-07-21 22:17:23.000000000 -0400
26272 +++ linux-3.0.3/drivers/isdn/gigaset/gigaset.h 2011-08-23 21:47:55.000000000 -0400
26273 @@ -35,6 +35,7 @@
26274 #include <linux/tty_driver.h>
26275 #include <linux/list.h>
26276 #include <asm/atomic.h>
26277 +#include <asm/local.h>
26278
26279 #define GIG_VERSION {0, 5, 0, 0}
26280 #define GIG_COMPAT {0, 4, 0, 0}
26281 @@ -433,7 +434,7 @@ struct cardstate {
26282 spinlock_t cmdlock;
26283 unsigned curlen, cmdbytes;
26284
26285 - unsigned open_count;
26286 + local_t open_count;
26287 struct tty_struct *tty;
26288 struct tasklet_struct if_wake_tasklet;
26289 unsigned control_state;
26290 diff -urNp linux-3.0.3/drivers/isdn/gigaset/interface.c linux-3.0.3/drivers/isdn/gigaset/interface.c
26291 --- linux-3.0.3/drivers/isdn/gigaset/interface.c 2011-07-21 22:17:23.000000000 -0400
26292 +++ linux-3.0.3/drivers/isdn/gigaset/interface.c 2011-08-23 21:47:55.000000000 -0400
26293 @@ -162,9 +162,7 @@ static int if_open(struct tty_struct *tt
26294 }
26295 tty->driver_data = cs;
26296
26297 - ++cs->open_count;
26298 -
26299 - if (cs->open_count == 1) {
26300 + if (local_inc_return(&cs->open_count) == 1) {
26301 spin_lock_irqsave(&cs->lock, flags);
26302 cs->tty = tty;
26303 spin_unlock_irqrestore(&cs->lock, flags);
26304 @@ -192,10 +190,10 @@ static void if_close(struct tty_struct *
26305
26306 if (!cs->connected)
26307 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26308 - else if (!cs->open_count)
26309 + else if (!local_read(&cs->open_count))
26310 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26311 else {
26312 - if (!--cs->open_count) {
26313 + if (!local_dec_return(&cs->open_count)) {
26314 spin_lock_irqsave(&cs->lock, flags);
26315 cs->tty = NULL;
26316 spin_unlock_irqrestore(&cs->lock, flags);
26317 @@ -230,7 +228,7 @@ static int if_ioctl(struct tty_struct *t
26318 if (!cs->connected) {
26319 gig_dbg(DEBUG_IF, "not connected");
26320 retval = -ENODEV;
26321 - } else if (!cs->open_count)
26322 + } else if (!local_read(&cs->open_count))
26323 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26324 else {
26325 retval = 0;
26326 @@ -360,7 +358,7 @@ static int if_write(struct tty_struct *t
26327 retval = -ENODEV;
26328 goto done;
26329 }
26330 - if (!cs->open_count) {
26331 + if (!local_read(&cs->open_count)) {
26332 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26333 retval = -ENODEV;
26334 goto done;
26335 @@ -413,7 +411,7 @@ static int if_write_room(struct tty_stru
26336 if (!cs->connected) {
26337 gig_dbg(DEBUG_IF, "not connected");
26338 retval = -ENODEV;
26339 - } else if (!cs->open_count)
26340 + } else if (!local_read(&cs->open_count))
26341 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26342 else if (cs->mstate != MS_LOCKED) {
26343 dev_warn(cs->dev, "can't write to unlocked device\n");
26344 @@ -443,7 +441,7 @@ static int if_chars_in_buffer(struct tty
26345
26346 if (!cs->connected)
26347 gig_dbg(DEBUG_IF, "not connected");
26348 - else if (!cs->open_count)
26349 + else if (!local_read(&cs->open_count))
26350 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26351 else if (cs->mstate != MS_LOCKED)
26352 dev_warn(cs->dev, "can't write to unlocked device\n");
26353 @@ -471,7 +469,7 @@ static void if_throttle(struct tty_struc
26354
26355 if (!cs->connected)
26356 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26357 - else if (!cs->open_count)
26358 + else if (!local_read(&cs->open_count))
26359 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26360 else
26361 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26362 @@ -495,7 +493,7 @@ static void if_unthrottle(struct tty_str
26363
26364 if (!cs->connected)
26365 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26366 - else if (!cs->open_count)
26367 + else if (!local_read(&cs->open_count))
26368 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26369 else
26370 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26371 @@ -526,7 +524,7 @@ static void if_set_termios(struct tty_st
26372 goto out;
26373 }
26374
26375 - if (!cs->open_count) {
26376 + if (!local_read(&cs->open_count)) {
26377 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26378 goto out;
26379 }
26380 diff -urNp linux-3.0.3/drivers/isdn/hardware/avm/b1.c linux-3.0.3/drivers/isdn/hardware/avm/b1.c
26381 --- linux-3.0.3/drivers/isdn/hardware/avm/b1.c 2011-07-21 22:17:23.000000000 -0400
26382 +++ linux-3.0.3/drivers/isdn/hardware/avm/b1.c 2011-08-23 21:47:55.000000000 -0400
26383 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
26384 }
26385 if (left) {
26386 if (t4file->user) {
26387 - if (copy_from_user(buf, dp, left))
26388 + if (left > sizeof buf || copy_from_user(buf, dp, left))
26389 return -EFAULT;
26390 } else {
26391 memcpy(buf, dp, left);
26392 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
26393 }
26394 if (left) {
26395 if (config->user) {
26396 - if (copy_from_user(buf, dp, left))
26397 + if (left > sizeof buf || copy_from_user(buf, dp, left))
26398 return -EFAULT;
26399 } else {
26400 memcpy(buf, dp, left);
26401 diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/capidtmf.c linux-3.0.3/drivers/isdn/hardware/eicon/capidtmf.c
26402 --- linux-3.0.3/drivers/isdn/hardware/eicon/capidtmf.c 2011-07-21 22:17:23.000000000 -0400
26403 +++ linux-3.0.3/drivers/isdn/hardware/eicon/capidtmf.c 2011-08-23 21:48:14.000000000 -0400
26404 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
26405 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
26406 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
26407
26408 + pax_track_stack();
26409
26410 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
26411 {
26412 diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/capifunc.c linux-3.0.3/drivers/isdn/hardware/eicon/capifunc.c
26413 --- linux-3.0.3/drivers/isdn/hardware/eicon/capifunc.c 2011-07-21 22:17:23.000000000 -0400
26414 +++ linux-3.0.3/drivers/isdn/hardware/eicon/capifunc.c 2011-08-23 21:48:14.000000000 -0400
26415 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
26416 IDI_SYNC_REQ req;
26417 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26418
26419 + pax_track_stack();
26420 +
26421 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26422
26423 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26424 diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/diddfunc.c linux-3.0.3/drivers/isdn/hardware/eicon/diddfunc.c
26425 --- linux-3.0.3/drivers/isdn/hardware/eicon/diddfunc.c 2011-07-21 22:17:23.000000000 -0400
26426 +++ linux-3.0.3/drivers/isdn/hardware/eicon/diddfunc.c 2011-08-23 21:48:14.000000000 -0400
26427 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
26428 IDI_SYNC_REQ req;
26429 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26430
26431 + pax_track_stack();
26432 +
26433 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26434
26435 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26436 diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/divasfunc.c linux-3.0.3/drivers/isdn/hardware/eicon/divasfunc.c
26437 --- linux-3.0.3/drivers/isdn/hardware/eicon/divasfunc.c 2011-07-21 22:17:23.000000000 -0400
26438 +++ linux-3.0.3/drivers/isdn/hardware/eicon/divasfunc.c 2011-08-23 21:48:14.000000000 -0400
26439 @@ -160,6 +160,8 @@ static int DIVA_INIT_FUNCTION connect_di
26440 IDI_SYNC_REQ req;
26441 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26442
26443 + pax_track_stack();
26444 +
26445 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26446
26447 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26448 diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/divasync.h linux-3.0.3/drivers/isdn/hardware/eicon/divasync.h
26449 --- linux-3.0.3/drivers/isdn/hardware/eicon/divasync.h 2011-07-21 22:17:23.000000000 -0400
26450 +++ linux-3.0.3/drivers/isdn/hardware/eicon/divasync.h 2011-08-23 21:47:55.000000000 -0400
26451 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
26452 } diva_didd_add_adapter_t;
26453 typedef struct _diva_didd_remove_adapter {
26454 IDI_CALL p_request;
26455 -} diva_didd_remove_adapter_t;
26456 +} __no_const diva_didd_remove_adapter_t;
26457 typedef struct _diva_didd_read_adapter_array {
26458 void * buffer;
26459 dword length;
26460 diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/idifunc.c linux-3.0.3/drivers/isdn/hardware/eicon/idifunc.c
26461 --- linux-3.0.3/drivers/isdn/hardware/eicon/idifunc.c 2011-07-21 22:17:23.000000000 -0400
26462 +++ linux-3.0.3/drivers/isdn/hardware/eicon/idifunc.c 2011-08-23 21:48:14.000000000 -0400
26463 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
26464 IDI_SYNC_REQ req;
26465 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26466
26467 + pax_track_stack();
26468 +
26469 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26470
26471 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26472 diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/message.c linux-3.0.3/drivers/isdn/hardware/eicon/message.c
26473 --- linux-3.0.3/drivers/isdn/hardware/eicon/message.c 2011-07-21 22:17:23.000000000 -0400
26474 +++ linux-3.0.3/drivers/isdn/hardware/eicon/message.c 2011-08-23 21:48:14.000000000 -0400
26475 @@ -4886,6 +4886,8 @@ static void sig_ind(PLCI *plci)
26476 dword d;
26477 word w;
26478
26479 + pax_track_stack();
26480 +
26481 a = plci->adapter;
26482 Id = ((word)plci->Id<<8)|a->Id;
26483 PUT_WORD(&SS_Ind[4],0x0000);
26484 @@ -7480,6 +7482,8 @@ static word add_b1(PLCI *plci, API_PARSE
26485 word j, n, w;
26486 dword d;
26487
26488 + pax_track_stack();
26489 +
26490
26491 for(i=0;i<8;i++) bp_parms[i].length = 0;
26492 for(i=0;i<2;i++) global_config[i].length = 0;
26493 @@ -7954,6 +7958,8 @@ static word add_b23(PLCI *plci, API_PARS
26494 const byte llc3[] = {4,3,2,2,6,6,0};
26495 const byte header[] = {0,2,3,3,0,0,0};
26496
26497 + pax_track_stack();
26498 +
26499 for(i=0;i<8;i++) bp_parms[i].length = 0;
26500 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
26501 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
26502 @@ -14741,6 +14747,8 @@ static void group_optimization(DIVA_CAPI
26503 word appl_number_group_type[MAX_APPL];
26504 PLCI *auxplci;
26505
26506 + pax_track_stack();
26507 +
26508 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
26509
26510 if(!a->group_optimization_enabled)
26511 diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/mntfunc.c linux-3.0.3/drivers/isdn/hardware/eicon/mntfunc.c
26512 --- linux-3.0.3/drivers/isdn/hardware/eicon/mntfunc.c 2011-07-21 22:17:23.000000000 -0400
26513 +++ linux-3.0.3/drivers/isdn/hardware/eicon/mntfunc.c 2011-08-23 21:48:14.000000000 -0400
26514 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
26515 IDI_SYNC_REQ req;
26516 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26517
26518 + pax_track_stack();
26519 +
26520 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26521
26522 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26523 diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/xdi_adapter.h linux-3.0.3/drivers/isdn/hardware/eicon/xdi_adapter.h
26524 --- linux-3.0.3/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-07-21 22:17:23.000000000 -0400
26525 +++ linux-3.0.3/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-23 21:47:55.000000000 -0400
26526 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
26527 typedef struct _diva_os_idi_adapter_interface {
26528 diva_init_card_proc_t cleanup_adapter_proc;
26529 diva_cmd_card_proc_t cmd_proc;
26530 -} diva_os_idi_adapter_interface_t;
26531 +} __no_const diva_os_idi_adapter_interface_t;
26532
26533 typedef struct _diva_os_xdi_adapter {
26534 struct list_head link;
26535 diff -urNp linux-3.0.3/drivers/isdn/i4l/isdn_common.c linux-3.0.3/drivers/isdn/i4l/isdn_common.c
26536 --- linux-3.0.3/drivers/isdn/i4l/isdn_common.c 2011-07-21 22:17:23.000000000 -0400
26537 +++ linux-3.0.3/drivers/isdn/i4l/isdn_common.c 2011-08-23 21:48:14.000000000 -0400
26538 @@ -1286,6 +1286,8 @@ isdn_ioctl(struct file *file, uint cmd,
26539 } iocpar;
26540 void __user *argp = (void __user *)arg;
26541
26542 + pax_track_stack();
26543 +
26544 #define name iocpar.name
26545 #define bname iocpar.bname
26546 #define iocts iocpar.iocts
26547 diff -urNp linux-3.0.3/drivers/isdn/icn/icn.c linux-3.0.3/drivers/isdn/icn/icn.c
26548 --- linux-3.0.3/drivers/isdn/icn/icn.c 2011-07-21 22:17:23.000000000 -0400
26549 +++ linux-3.0.3/drivers/isdn/icn/icn.c 2011-08-23 21:47:55.000000000 -0400
26550 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
26551 if (count > len)
26552 count = len;
26553 if (user) {
26554 - if (copy_from_user(msg, buf, count))
26555 + if (count > sizeof msg || copy_from_user(msg, buf, count))
26556 return -EFAULT;
26557 } else
26558 memcpy(msg, buf, count);
26559 diff -urNp linux-3.0.3/drivers/lguest/core.c linux-3.0.3/drivers/lguest/core.c
26560 --- linux-3.0.3/drivers/lguest/core.c 2011-07-21 22:17:23.000000000 -0400
26561 +++ linux-3.0.3/drivers/lguest/core.c 2011-08-23 21:47:55.000000000 -0400
26562 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
26563 * it's worked so far. The end address needs +1 because __get_vm_area
26564 * allocates an extra guard page, so we need space for that.
26565 */
26566 +
26567 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26568 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26569 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
26570 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26571 +#else
26572 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26573 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
26574 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26575 +#endif
26576 +
26577 if (!switcher_vma) {
26578 err = -ENOMEM;
26579 printk("lguest: could not map switcher pages high\n");
26580 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
26581 * Now the Switcher is mapped at the right address, we can't fail!
26582 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
26583 */
26584 - memcpy(switcher_vma->addr, start_switcher_text,
26585 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
26586 end_switcher_text - start_switcher_text);
26587
26588 printk(KERN_INFO "lguest: mapped switcher at %p\n",
26589 diff -urNp linux-3.0.3/drivers/lguest/x86/core.c linux-3.0.3/drivers/lguest/x86/core.c
26590 --- linux-3.0.3/drivers/lguest/x86/core.c 2011-07-21 22:17:23.000000000 -0400
26591 +++ linux-3.0.3/drivers/lguest/x86/core.c 2011-08-23 21:47:55.000000000 -0400
26592 @@ -59,7 +59,7 @@ static struct {
26593 /* Offset from where switcher.S was compiled to where we've copied it */
26594 static unsigned long switcher_offset(void)
26595 {
26596 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
26597 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
26598 }
26599
26600 /* This cpu's struct lguest_pages. */
26601 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
26602 * These copies are pretty cheap, so we do them unconditionally: */
26603 /* Save the current Host top-level page directory.
26604 */
26605 +
26606 +#ifdef CONFIG_PAX_PER_CPU_PGD
26607 + pages->state.host_cr3 = read_cr3();
26608 +#else
26609 pages->state.host_cr3 = __pa(current->mm->pgd);
26610 +#endif
26611 +
26612 /*
26613 * Set up the Guest's page tables to see this CPU's pages (and no
26614 * other CPU's pages).
26615 @@ -547,7 +553,7 @@ void __init lguest_arch_host_init(void)
26616 * compiled-in switcher code and the high-mapped copy we just made.
26617 */
26618 for (i = 0; i < IDT_ENTRIES; i++)
26619 - default_idt_entries[i] += switcher_offset();
26620 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
26621
26622 /*
26623 * Set up the Switcher's per-cpu areas.
26624 @@ -630,7 +636,7 @@ void __init lguest_arch_host_init(void)
26625 * it will be undisturbed when we switch. To change %cs and jump we
26626 * need this structure to feed to Intel's "lcall" instruction.
26627 */
26628 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
26629 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
26630 lguest_entry.segment = LGUEST_CS;
26631
26632 /*
26633 diff -urNp linux-3.0.3/drivers/lguest/x86/switcher_32.S linux-3.0.3/drivers/lguest/x86/switcher_32.S
26634 --- linux-3.0.3/drivers/lguest/x86/switcher_32.S 2011-07-21 22:17:23.000000000 -0400
26635 +++ linux-3.0.3/drivers/lguest/x86/switcher_32.S 2011-08-23 21:47:55.000000000 -0400
26636 @@ -87,6 +87,7 @@
26637 #include <asm/page.h>
26638 #include <asm/segment.h>
26639 #include <asm/lguest.h>
26640 +#include <asm/processor-flags.h>
26641
26642 // We mark the start of the code to copy
26643 // It's placed in .text tho it's never run here
26644 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
26645 // Changes type when we load it: damn Intel!
26646 // For after we switch over our page tables
26647 // That entry will be read-only: we'd crash.
26648 +
26649 +#ifdef CONFIG_PAX_KERNEXEC
26650 + mov %cr0, %edx
26651 + xor $X86_CR0_WP, %edx
26652 + mov %edx, %cr0
26653 +#endif
26654 +
26655 movl $(GDT_ENTRY_TSS*8), %edx
26656 ltr %dx
26657
26658 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
26659 // Let's clear it again for our return.
26660 // The GDT descriptor of the Host
26661 // Points to the table after two "size" bytes
26662 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
26663 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
26664 // Clear "used" from type field (byte 5, bit 2)
26665 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
26666 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
26667 +
26668 +#ifdef CONFIG_PAX_KERNEXEC
26669 + mov %cr0, %eax
26670 + xor $X86_CR0_WP, %eax
26671 + mov %eax, %cr0
26672 +#endif
26673
26674 // Once our page table's switched, the Guest is live!
26675 // The Host fades as we run this final step.
26676 @@ -295,13 +309,12 @@ deliver_to_host:
26677 // I consulted gcc, and it gave
26678 // These instructions, which I gladly credit:
26679 leal (%edx,%ebx,8), %eax
26680 - movzwl (%eax),%edx
26681 - movl 4(%eax), %eax
26682 - xorw %ax, %ax
26683 - orl %eax, %edx
26684 + movl 4(%eax), %edx
26685 + movw (%eax), %dx
26686 // Now the address of the handler's in %edx
26687 // We call it now: its "iret" drops us home.
26688 - jmp *%edx
26689 + ljmp $__KERNEL_CS, $1f
26690 +1: jmp *%edx
26691
26692 // Every interrupt can come to us here
26693 // But we must truly tell each apart.
26694 diff -urNp linux-3.0.3/drivers/md/dm.c linux-3.0.3/drivers/md/dm.c
26695 --- linux-3.0.3/drivers/md/dm.c 2011-08-23 21:44:40.000000000 -0400
26696 +++ linux-3.0.3/drivers/md/dm.c 2011-08-23 21:47:55.000000000 -0400
26697 @@ -164,9 +164,9 @@ struct mapped_device {
26698 /*
26699 * Event handling.
26700 */
26701 - atomic_t event_nr;
26702 + atomic_unchecked_t event_nr;
26703 wait_queue_head_t eventq;
26704 - atomic_t uevent_seq;
26705 + atomic_unchecked_t uevent_seq;
26706 struct list_head uevent_list;
26707 spinlock_t uevent_lock; /* Protect access to uevent_list */
26708
26709 @@ -1842,8 +1842,8 @@ static struct mapped_device *alloc_dev(i
26710 rwlock_init(&md->map_lock);
26711 atomic_set(&md->holders, 1);
26712 atomic_set(&md->open_count, 0);
26713 - atomic_set(&md->event_nr, 0);
26714 - atomic_set(&md->uevent_seq, 0);
26715 + atomic_set_unchecked(&md->event_nr, 0);
26716 + atomic_set_unchecked(&md->uevent_seq, 0);
26717 INIT_LIST_HEAD(&md->uevent_list);
26718 spin_lock_init(&md->uevent_lock);
26719
26720 @@ -1977,7 +1977,7 @@ static void event_callback(void *context
26721
26722 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
26723
26724 - atomic_inc(&md->event_nr);
26725 + atomic_inc_unchecked(&md->event_nr);
26726 wake_up(&md->eventq);
26727 }
26728
26729 @@ -2553,18 +2553,18 @@ int dm_kobject_uevent(struct mapped_devi
26730
26731 uint32_t dm_next_uevent_seq(struct mapped_device *md)
26732 {
26733 - return atomic_add_return(1, &md->uevent_seq);
26734 + return atomic_add_return_unchecked(1, &md->uevent_seq);
26735 }
26736
26737 uint32_t dm_get_event_nr(struct mapped_device *md)
26738 {
26739 - return atomic_read(&md->event_nr);
26740 + return atomic_read_unchecked(&md->event_nr);
26741 }
26742
26743 int dm_wait_event(struct mapped_device *md, int event_nr)
26744 {
26745 return wait_event_interruptible(md->eventq,
26746 - (event_nr != atomic_read(&md->event_nr)));
26747 + (event_nr != atomic_read_unchecked(&md->event_nr)));
26748 }
26749
26750 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
26751 diff -urNp linux-3.0.3/drivers/md/dm-ioctl.c linux-3.0.3/drivers/md/dm-ioctl.c
26752 --- linux-3.0.3/drivers/md/dm-ioctl.c 2011-07-21 22:17:23.000000000 -0400
26753 +++ linux-3.0.3/drivers/md/dm-ioctl.c 2011-08-23 21:47:55.000000000 -0400
26754 @@ -1551,7 +1551,7 @@ static int validate_params(uint cmd, str
26755 cmd == DM_LIST_VERSIONS_CMD)
26756 return 0;
26757
26758 - if ((cmd == DM_DEV_CREATE_CMD)) {
26759 + if (cmd == DM_DEV_CREATE_CMD) {
26760 if (!*param->name) {
26761 DMWARN("name not supplied when creating device");
26762 return -EINVAL;
26763 diff -urNp linux-3.0.3/drivers/md/dm-raid1.c linux-3.0.3/drivers/md/dm-raid1.c
26764 --- linux-3.0.3/drivers/md/dm-raid1.c 2011-07-21 22:17:23.000000000 -0400
26765 +++ linux-3.0.3/drivers/md/dm-raid1.c 2011-08-23 21:47:55.000000000 -0400
26766 @@ -40,7 +40,7 @@ enum dm_raid1_error {
26767
26768 struct mirror {
26769 struct mirror_set *ms;
26770 - atomic_t error_count;
26771 + atomic_unchecked_t error_count;
26772 unsigned long error_type;
26773 struct dm_dev *dev;
26774 sector_t offset;
26775 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(s
26776 struct mirror *m;
26777
26778 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
26779 - if (!atomic_read(&m->error_count))
26780 + if (!atomic_read_unchecked(&m->error_count))
26781 return m;
26782
26783 return NULL;
26784 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m
26785 * simple way to tell if a device has encountered
26786 * errors.
26787 */
26788 - atomic_inc(&m->error_count);
26789 + atomic_inc_unchecked(&m->error_count);
26790
26791 if (test_and_set_bit(error_type, &m->error_type))
26792 return;
26793 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(stru
26794 struct mirror *m = get_default_mirror(ms);
26795
26796 do {
26797 - if (likely(!atomic_read(&m->error_count)))
26798 + if (likely(!atomic_read_unchecked(&m->error_count)))
26799 return m;
26800
26801 if (m-- == ms->mirror)
26802 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
26803 {
26804 struct mirror *default_mirror = get_default_mirror(m->ms);
26805
26806 - return !atomic_read(&default_mirror->error_count);
26807 + return !atomic_read_unchecked(&default_mirror->error_count);
26808 }
26809
26810 static int mirror_available(struct mirror_set *ms, struct bio *bio)
26811 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *
26812 */
26813 if (likely(region_in_sync(ms, region, 1)))
26814 m = choose_mirror(ms, bio->bi_sector);
26815 - else if (m && atomic_read(&m->error_count))
26816 + else if (m && atomic_read_unchecked(&m->error_count))
26817 m = NULL;
26818
26819 if (likely(m))
26820 @@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set
26821 }
26822
26823 ms->mirror[mirror].ms = ms;
26824 - atomic_set(&(ms->mirror[mirror].error_count), 0);
26825 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
26826 ms->mirror[mirror].error_type = 0;
26827 ms->mirror[mirror].offset = offset;
26828
26829 @@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
26830 */
26831 static char device_status_char(struct mirror *m)
26832 {
26833 - if (!atomic_read(&(m->error_count)))
26834 + if (!atomic_read_unchecked(&(m->error_count)))
26835 return 'A';
26836
26837 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
26838 diff -urNp linux-3.0.3/drivers/md/dm-stripe.c linux-3.0.3/drivers/md/dm-stripe.c
26839 --- linux-3.0.3/drivers/md/dm-stripe.c 2011-07-21 22:17:23.000000000 -0400
26840 +++ linux-3.0.3/drivers/md/dm-stripe.c 2011-08-23 21:47:55.000000000 -0400
26841 @@ -20,7 +20,7 @@ struct stripe {
26842 struct dm_dev *dev;
26843 sector_t physical_start;
26844
26845 - atomic_t error_count;
26846 + atomic_unchecked_t error_count;
26847 };
26848
26849 struct stripe_c {
26850 @@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
26851 kfree(sc);
26852 return r;
26853 }
26854 - atomic_set(&(sc->stripe[i].error_count), 0);
26855 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
26856 }
26857
26858 ti->private = sc;
26859 @@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
26860 DMEMIT("%d ", sc->stripes);
26861 for (i = 0; i < sc->stripes; i++) {
26862 DMEMIT("%s ", sc->stripe[i].dev->name);
26863 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
26864 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
26865 'D' : 'A';
26866 }
26867 buffer[i] = '\0';
26868 @@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
26869 */
26870 for (i = 0; i < sc->stripes; i++)
26871 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
26872 - atomic_inc(&(sc->stripe[i].error_count));
26873 - if (atomic_read(&(sc->stripe[i].error_count)) <
26874 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
26875 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
26876 DM_IO_ERROR_THRESHOLD)
26877 schedule_work(&sc->trigger_event);
26878 }
26879 diff -urNp linux-3.0.3/drivers/md/dm-table.c linux-3.0.3/drivers/md/dm-table.c
26880 --- linux-3.0.3/drivers/md/dm-table.c 2011-07-21 22:17:23.000000000 -0400
26881 +++ linux-3.0.3/drivers/md/dm-table.c 2011-08-23 21:47:55.000000000 -0400
26882 @@ -390,7 +390,7 @@ static int device_area_is_invalid(struct
26883 if (!dev_size)
26884 return 0;
26885
26886 - if ((start >= dev_size) || (start + len > dev_size)) {
26887 + if ((start >= dev_size) || (len > dev_size - start)) {
26888 DMWARN("%s: %s too small for target: "
26889 "start=%llu, len=%llu, dev_size=%llu",
26890 dm_device_name(ti->table->md), bdevname(bdev, b),
26891 diff -urNp linux-3.0.3/drivers/md/md.c linux-3.0.3/drivers/md/md.c
26892 --- linux-3.0.3/drivers/md/md.c 2011-07-21 22:17:23.000000000 -0400
26893 +++ linux-3.0.3/drivers/md/md.c 2011-08-23 21:47:55.000000000 -0400
26894 @@ -226,10 +226,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
26895 * start build, activate spare
26896 */
26897 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
26898 -static atomic_t md_event_count;
26899 +static atomic_unchecked_t md_event_count;
26900 void md_new_event(mddev_t *mddev)
26901 {
26902 - atomic_inc(&md_event_count);
26903 + atomic_inc_unchecked(&md_event_count);
26904 wake_up(&md_event_waiters);
26905 }
26906 EXPORT_SYMBOL_GPL(md_new_event);
26907 @@ -239,7 +239,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
26908 */
26909 static void md_new_event_inintr(mddev_t *mddev)
26910 {
26911 - atomic_inc(&md_event_count);
26912 + atomic_inc_unchecked(&md_event_count);
26913 wake_up(&md_event_waiters);
26914 }
26915
26916 @@ -1457,7 +1457,7 @@ static int super_1_load(mdk_rdev_t *rdev
26917
26918 rdev->preferred_minor = 0xffff;
26919 rdev->data_offset = le64_to_cpu(sb->data_offset);
26920 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
26921 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
26922
26923 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
26924 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
26925 @@ -1635,7 +1635,7 @@ static void super_1_sync(mddev_t *mddev,
26926 else
26927 sb->resync_offset = cpu_to_le64(0);
26928
26929 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
26930 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
26931
26932 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
26933 sb->size = cpu_to_le64(mddev->dev_sectors);
26934 @@ -2428,7 +2428,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
26935 static ssize_t
26936 errors_show(mdk_rdev_t *rdev, char *page)
26937 {
26938 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
26939 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
26940 }
26941
26942 static ssize_t
26943 @@ -2437,7 +2437,7 @@ errors_store(mdk_rdev_t *rdev, const cha
26944 char *e;
26945 unsigned long n = simple_strtoul(buf, &e, 10);
26946 if (*buf && (*e == 0 || *e == '\n')) {
26947 - atomic_set(&rdev->corrected_errors, n);
26948 + atomic_set_unchecked(&rdev->corrected_errors, n);
26949 return len;
26950 }
26951 return -EINVAL;
26952 @@ -2793,8 +2793,8 @@ void md_rdev_init(mdk_rdev_t *rdev)
26953 rdev->last_read_error.tv_sec = 0;
26954 rdev->last_read_error.tv_nsec = 0;
26955 atomic_set(&rdev->nr_pending, 0);
26956 - atomic_set(&rdev->read_errors, 0);
26957 - atomic_set(&rdev->corrected_errors, 0);
26958 + atomic_set_unchecked(&rdev->read_errors, 0);
26959 + atomic_set_unchecked(&rdev->corrected_errors, 0);
26960
26961 INIT_LIST_HEAD(&rdev->same_set);
26962 init_waitqueue_head(&rdev->blocked_wait);
26963 @@ -6415,7 +6415,7 @@ static int md_seq_show(struct seq_file *
26964
26965 spin_unlock(&pers_lock);
26966 seq_printf(seq, "\n");
26967 - mi->event = atomic_read(&md_event_count);
26968 + mi->event = atomic_read_unchecked(&md_event_count);
26969 return 0;
26970 }
26971 if (v == (void*)2) {
26972 @@ -6504,7 +6504,7 @@ static int md_seq_show(struct seq_file *
26973 chunk_kb ? "KB" : "B");
26974 if (bitmap->file) {
26975 seq_printf(seq, ", file: ");
26976 - seq_path(seq, &bitmap->file->f_path, " \t\n");
26977 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
26978 }
26979
26980 seq_printf(seq, "\n");
26981 @@ -6538,7 +6538,7 @@ static int md_seq_open(struct inode *ino
26982 else {
26983 struct seq_file *p = file->private_data;
26984 p->private = mi;
26985 - mi->event = atomic_read(&md_event_count);
26986 + mi->event = atomic_read_unchecked(&md_event_count);
26987 }
26988 return error;
26989 }
26990 @@ -6554,7 +6554,7 @@ static unsigned int mdstat_poll(struct f
26991 /* always allow read */
26992 mask = POLLIN | POLLRDNORM;
26993
26994 - if (mi->event != atomic_read(&md_event_count))
26995 + if (mi->event != atomic_read_unchecked(&md_event_count))
26996 mask |= POLLERR | POLLPRI;
26997 return mask;
26998 }
26999 @@ -6598,7 +6598,7 @@ static int is_mddev_idle(mddev_t *mddev,
27000 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
27001 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
27002 (int)part_stat_read(&disk->part0, sectors[1]) -
27003 - atomic_read(&disk->sync_io);
27004 + atomic_read_unchecked(&disk->sync_io);
27005 /* sync IO will cause sync_io to increase before the disk_stats
27006 * as sync_io is counted when a request starts, and
27007 * disk_stats is counted when it completes.
27008 diff -urNp linux-3.0.3/drivers/md/md.h linux-3.0.3/drivers/md/md.h
27009 --- linux-3.0.3/drivers/md/md.h 2011-07-21 22:17:23.000000000 -0400
27010 +++ linux-3.0.3/drivers/md/md.h 2011-08-23 21:47:55.000000000 -0400
27011 @@ -97,13 +97,13 @@ struct mdk_rdev_s
27012 * only maintained for arrays that
27013 * support hot removal
27014 */
27015 - atomic_t read_errors; /* number of consecutive read errors that
27016 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
27017 * we have tried to ignore.
27018 */
27019 struct timespec last_read_error; /* monotonic time since our
27020 * last read error
27021 */
27022 - atomic_t corrected_errors; /* number of corrected read errors,
27023 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
27024 * for reporting to userspace and storing
27025 * in superblock.
27026 */
27027 @@ -344,7 +344,7 @@ static inline void rdev_dec_pending(mdk_
27028
27029 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
27030 {
27031 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27032 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27033 }
27034
27035 struct mdk_personality
27036 diff -urNp linux-3.0.3/drivers/md/raid10.c linux-3.0.3/drivers/md/raid10.c
27037 --- linux-3.0.3/drivers/md/raid10.c 2011-07-21 22:17:23.000000000 -0400
27038 +++ linux-3.0.3/drivers/md/raid10.c 2011-08-23 21:47:55.000000000 -0400
27039 @@ -1186,7 +1186,7 @@ static void end_sync_read(struct bio *bi
27040 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
27041 set_bit(R10BIO_Uptodate, &r10_bio->state);
27042 else {
27043 - atomic_add(r10_bio->sectors,
27044 + atomic_add_unchecked(r10_bio->sectors,
27045 &conf->mirrors[d].rdev->corrected_errors);
27046 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
27047 md_error(r10_bio->mddev,
27048 @@ -1394,7 +1394,7 @@ static void check_decay_read_errors(mdde
27049 {
27050 struct timespec cur_time_mon;
27051 unsigned long hours_since_last;
27052 - unsigned int read_errors = atomic_read(&rdev->read_errors);
27053 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
27054
27055 ktime_get_ts(&cur_time_mon);
27056
27057 @@ -1416,9 +1416,9 @@ static void check_decay_read_errors(mdde
27058 * overflowing the shift of read_errors by hours_since_last.
27059 */
27060 if (hours_since_last >= 8 * sizeof(read_errors))
27061 - atomic_set(&rdev->read_errors, 0);
27062 + atomic_set_unchecked(&rdev->read_errors, 0);
27063 else
27064 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
27065 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
27066 }
27067
27068 /*
27069 @@ -1448,8 +1448,8 @@ static void fix_read_error(conf_t *conf,
27070 return;
27071
27072 check_decay_read_errors(mddev, rdev);
27073 - atomic_inc(&rdev->read_errors);
27074 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
27075 + atomic_inc_unchecked(&rdev->read_errors);
27076 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
27077 char b[BDEVNAME_SIZE];
27078 bdevname(rdev->bdev, b);
27079
27080 @@ -1457,7 +1457,7 @@ static void fix_read_error(conf_t *conf,
27081 "md/raid10:%s: %s: Raid device exceeded "
27082 "read_error threshold [cur %d:max %d]\n",
27083 mdname(mddev), b,
27084 - atomic_read(&rdev->read_errors), max_read_errors);
27085 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
27086 printk(KERN_NOTICE
27087 "md/raid10:%s: %s: Failing raid device\n",
27088 mdname(mddev), b);
27089 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
27090 test_bit(In_sync, &rdev->flags)) {
27091 atomic_inc(&rdev->nr_pending);
27092 rcu_read_unlock();
27093 - atomic_add(s, &rdev->corrected_errors);
27094 + atomic_add_unchecked(s, &rdev->corrected_errors);
27095 if (sync_page_io(rdev,
27096 r10_bio->devs[sl].addr +
27097 sect,
27098 diff -urNp linux-3.0.3/drivers/md/raid1.c linux-3.0.3/drivers/md/raid1.c
27099 --- linux-3.0.3/drivers/md/raid1.c 2011-07-21 22:17:23.000000000 -0400
27100 +++ linux-3.0.3/drivers/md/raid1.c 2011-08-23 21:47:55.000000000 -0400
27101 @@ -1263,7 +1263,7 @@ static int fix_sync_read_error(r1bio_t *
27102 rdev_dec_pending(rdev, mddev);
27103 md_error(mddev, rdev);
27104 } else
27105 - atomic_add(s, &rdev->corrected_errors);
27106 + atomic_add_unchecked(s, &rdev->corrected_errors);
27107 }
27108 d = start;
27109 while (d != r1_bio->read_disk) {
27110 @@ -1492,7 +1492,7 @@ static void fix_read_error(conf_t *conf,
27111 /* Well, this device is dead */
27112 md_error(mddev, rdev);
27113 else {
27114 - atomic_add(s, &rdev->corrected_errors);
27115 + atomic_add_unchecked(s, &rdev->corrected_errors);
27116 printk(KERN_INFO
27117 "md/raid1:%s: read error corrected "
27118 "(%d sectors at %llu on %s)\n",
27119 diff -urNp linux-3.0.3/drivers/md/raid5.c linux-3.0.3/drivers/md/raid5.c
27120 --- linux-3.0.3/drivers/md/raid5.c 2011-07-21 22:17:23.000000000 -0400
27121 +++ linux-3.0.3/drivers/md/raid5.c 2011-08-23 21:48:14.000000000 -0400
27122 @@ -550,7 +550,7 @@ static void ops_run_io(struct stripe_hea
27123 bi->bi_next = NULL;
27124 if ((rw & WRITE) &&
27125 test_bit(R5_ReWrite, &sh->dev[i].flags))
27126 - atomic_add(STRIPE_SECTORS,
27127 + atomic_add_unchecked(STRIPE_SECTORS,
27128 &rdev->corrected_errors);
27129 generic_make_request(bi);
27130 } else {
27131 @@ -1596,15 +1596,15 @@ static void raid5_end_read_request(struc
27132 clear_bit(R5_ReadError, &sh->dev[i].flags);
27133 clear_bit(R5_ReWrite, &sh->dev[i].flags);
27134 }
27135 - if (atomic_read(&conf->disks[i].rdev->read_errors))
27136 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
27137 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
27138 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
27139 } else {
27140 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
27141 int retry = 0;
27142 rdev = conf->disks[i].rdev;
27143
27144 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
27145 - atomic_inc(&rdev->read_errors);
27146 + atomic_inc_unchecked(&rdev->read_errors);
27147 if (conf->mddev->degraded >= conf->max_degraded)
27148 printk_rl(KERN_WARNING
27149 "md/raid:%s: read error not correctable "
27150 @@ -1622,7 +1622,7 @@ static void raid5_end_read_request(struc
27151 (unsigned long long)(sh->sector
27152 + rdev->data_offset),
27153 bdn);
27154 - else if (atomic_read(&rdev->read_errors)
27155 + else if (atomic_read_unchecked(&rdev->read_errors)
27156 > conf->max_nr_stripes)
27157 printk(KERN_WARNING
27158 "md/raid:%s: Too many read errors, failing device %s.\n",
27159 @@ -1945,6 +1945,7 @@ static sector_t compute_blocknr(struct s
27160 sector_t r_sector;
27161 struct stripe_head sh2;
27162
27163 + pax_track_stack();
27164
27165 chunk_offset = sector_div(new_sector, sectors_per_chunk);
27166 stripe = new_sector;
27167 diff -urNp linux-3.0.3/drivers/media/common/saa7146_hlp.c linux-3.0.3/drivers/media/common/saa7146_hlp.c
27168 --- linux-3.0.3/drivers/media/common/saa7146_hlp.c 2011-07-21 22:17:23.000000000 -0400
27169 +++ linux-3.0.3/drivers/media/common/saa7146_hlp.c 2011-08-23 21:48:14.000000000 -0400
27170 @@ -353,6 +353,8 @@ static void calculate_clipping_registers
27171
27172 int x[32], y[32], w[32], h[32];
27173
27174 + pax_track_stack();
27175 +
27176 /* clear out memory */
27177 memset(&line_list[0], 0x00, sizeof(u32)*32);
27178 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
27179 diff -urNp linux-3.0.3/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-3.0.3/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
27180 --- linux-3.0.3/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-07-21 22:17:23.000000000 -0400
27181 +++ linux-3.0.3/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-08-23 21:48:14.000000000 -0400
27182 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
27183 u8 buf[HOST_LINK_BUF_SIZE];
27184 int i;
27185
27186 + pax_track_stack();
27187 +
27188 dprintk("%s\n", __func__);
27189
27190 /* check if we have space for a link buf in the rx_buffer */
27191 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
27192 unsigned long timeout;
27193 int written;
27194
27195 + pax_track_stack();
27196 +
27197 dprintk("%s\n", __func__);
27198
27199 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
27200 diff -urNp linux-3.0.3/drivers/media/dvb/dvb-usb/dib0700_core.c linux-3.0.3/drivers/media/dvb/dvb-usb/dib0700_core.c
27201 --- linux-3.0.3/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-07-21 22:17:23.000000000 -0400
27202 +++ linux-3.0.3/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-08-23 21:48:14.000000000 -0400
27203 @@ -434,6 +434,8 @@ int dib0700_download_firmware(struct usb
27204 if (!buf)
27205 return -ENOMEM;
27206
27207 + pax_track_stack();
27208 +
27209 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
27210 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
27211 hx.addr, hx.len, hx.chk);
27212 diff -urNp linux-3.0.3/drivers/media/dvb/dvb-usb/lmedm04.c linux-3.0.3/drivers/media/dvb/dvb-usb/lmedm04.c
27213 --- linux-3.0.3/drivers/media/dvb/dvb-usb/lmedm04.c 2011-07-21 22:17:23.000000000 -0400
27214 +++ linux-3.0.3/drivers/media/dvb/dvb-usb/lmedm04.c 2011-08-23 21:48:14.000000000 -0400
27215 @@ -742,6 +742,7 @@ static int lme2510_download_firmware(str
27216 usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
27217 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
27218
27219 + pax_track_stack();
27220
27221 data[0] = 0x8a;
27222 len_in = 1;
27223 @@ -764,6 +765,8 @@ static void lme_coldreset(struct usb_dev
27224 int ret = 0, len_in;
27225 u8 data[512] = {0};
27226
27227 + pax_track_stack();
27228 +
27229 data[0] = 0x0a;
27230 len_in = 1;
27231 info("FRM Firmware Cold Reset");
27232 diff -urNp linux-3.0.3/drivers/media/dvb/frontends/mb86a16.c linux-3.0.3/drivers/media/dvb/frontends/mb86a16.c
27233 --- linux-3.0.3/drivers/media/dvb/frontends/mb86a16.c 2011-07-21 22:17:23.000000000 -0400
27234 +++ linux-3.0.3/drivers/media/dvb/frontends/mb86a16.c 2011-08-23 21:48:14.000000000 -0400
27235 @@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
27236 int ret = -1;
27237 int sync;
27238
27239 + pax_track_stack();
27240 +
27241 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
27242
27243 fcp = 3000;
27244 diff -urNp linux-3.0.3/drivers/media/dvb/frontends/or51211.c linux-3.0.3/drivers/media/dvb/frontends/or51211.c
27245 --- linux-3.0.3/drivers/media/dvb/frontends/or51211.c 2011-07-21 22:17:23.000000000 -0400
27246 +++ linux-3.0.3/drivers/media/dvb/frontends/or51211.c 2011-08-23 21:48:14.000000000 -0400
27247 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
27248 u8 tudata[585];
27249 int i;
27250
27251 + pax_track_stack();
27252 +
27253 dprintk("Firmware is %zd bytes\n",fw->size);
27254
27255 /* Get eprom data */
27256 diff -urNp linux-3.0.3/drivers/media/video/cx18/cx18-driver.c linux-3.0.3/drivers/media/video/cx18/cx18-driver.c
27257 --- linux-3.0.3/drivers/media/video/cx18/cx18-driver.c 2011-07-21 22:17:23.000000000 -0400
27258 +++ linux-3.0.3/drivers/media/video/cx18/cx18-driver.c 2011-08-23 21:48:14.000000000 -0400
27259 @@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
27260 struct i2c_client c;
27261 u8 eedata[256];
27262
27263 + pax_track_stack();
27264 +
27265 memset(&c, 0, sizeof(c));
27266 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
27267 c.adapter = &cx->i2c_adap[0];
27268 diff -urNp linux-3.0.3/drivers/media/video/cx23885/cx23885-input.c linux-3.0.3/drivers/media/video/cx23885/cx23885-input.c
27269 --- linux-3.0.3/drivers/media/video/cx23885/cx23885-input.c 2011-07-21 22:17:23.000000000 -0400
27270 +++ linux-3.0.3/drivers/media/video/cx23885/cx23885-input.c 2011-08-23 21:48:14.000000000 -0400
27271 @@ -53,6 +53,8 @@ static void cx23885_input_process_measur
27272 bool handle = false;
27273 struct ir_raw_event ir_core_event[64];
27274
27275 + pax_track_stack();
27276 +
27277 do {
27278 num = 0;
27279 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
27280 diff -urNp linux-3.0.3/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-3.0.3/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
27281 --- linux-3.0.3/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-07-21 22:17:23.000000000 -0400
27282 +++ linux-3.0.3/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-08-23 21:48:14.000000000 -0400
27283 @@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
27284 u8 *eeprom;
27285 struct tveeprom tvdata;
27286
27287 + pax_track_stack();
27288 +
27289 memset(&tvdata,0,sizeof(tvdata));
27290
27291 eeprom = pvr2_eeprom_fetch(hdw);
27292 diff -urNp linux-3.0.3/drivers/media/video/saa7134/saa6752hs.c linux-3.0.3/drivers/media/video/saa7134/saa6752hs.c
27293 --- linux-3.0.3/drivers/media/video/saa7134/saa6752hs.c 2011-07-21 22:17:23.000000000 -0400
27294 +++ linux-3.0.3/drivers/media/video/saa7134/saa6752hs.c 2011-08-23 21:48:14.000000000 -0400
27295 @@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
27296 unsigned char localPAT[256];
27297 unsigned char localPMT[256];
27298
27299 + pax_track_stack();
27300 +
27301 /* Set video format - must be done first as it resets other settings */
27302 set_reg8(client, 0x41, h->video_format);
27303
27304 diff -urNp linux-3.0.3/drivers/media/video/saa7164/saa7164-cmd.c linux-3.0.3/drivers/media/video/saa7164/saa7164-cmd.c
27305 --- linux-3.0.3/drivers/media/video/saa7164/saa7164-cmd.c 2011-07-21 22:17:23.000000000 -0400
27306 +++ linux-3.0.3/drivers/media/video/saa7164/saa7164-cmd.c 2011-08-23 21:48:14.000000000 -0400
27307 @@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
27308 u8 tmp[512];
27309 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27310
27311 + pax_track_stack();
27312 +
27313 /* While any outstand message on the bus exists... */
27314 do {
27315
27316 @@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
27317 u8 tmp[512];
27318 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27319
27320 + pax_track_stack();
27321 +
27322 while (loop) {
27323
27324 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
27325 diff -urNp linux-3.0.3/drivers/media/video/usbvision/usbvision-core.c linux-3.0.3/drivers/media/video/usbvision/usbvision-core.c
27326 --- linux-3.0.3/drivers/media/video/usbvision/usbvision-core.c 2011-07-21 22:17:23.000000000 -0400
27327 +++ linux-3.0.3/drivers/media/video/usbvision/usbvision-core.c 2011-08-23 21:48:14.000000000 -0400
27328 @@ -707,6 +707,8 @@ static enum parse_state usbvision_parse_
27329 unsigned char rv, gv, bv;
27330 static unsigned char *Y, *U, *V;
27331
27332 + pax_track_stack();
27333 +
27334 frame = usbvision->cur_frame;
27335 image_size = frame->frmwidth * frame->frmheight;
27336 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
27337 diff -urNp linux-3.0.3/drivers/media/video/videobuf-dma-sg.c linux-3.0.3/drivers/media/video/videobuf-dma-sg.c
27338 --- linux-3.0.3/drivers/media/video/videobuf-dma-sg.c 2011-07-21 22:17:23.000000000 -0400
27339 +++ linux-3.0.3/drivers/media/video/videobuf-dma-sg.c 2011-08-23 21:48:14.000000000 -0400
27340 @@ -606,6 +606,8 @@ void *videobuf_sg_alloc(size_t size)
27341 {
27342 struct videobuf_queue q;
27343
27344 + pax_track_stack();
27345 +
27346 /* Required to make generic handler to call __videobuf_alloc */
27347 q.int_ops = &sg_ops;
27348
27349 diff -urNp linux-3.0.3/drivers/message/fusion/mptbase.c linux-3.0.3/drivers/message/fusion/mptbase.c
27350 --- linux-3.0.3/drivers/message/fusion/mptbase.c 2011-07-21 22:17:23.000000000 -0400
27351 +++ linux-3.0.3/drivers/message/fusion/mptbase.c 2011-08-23 21:48:14.000000000 -0400
27352 @@ -6681,8 +6681,13 @@ static int mpt_iocinfo_proc_show(struct
27353 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
27354 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
27355
27356 +#ifdef CONFIG_GRKERNSEC_HIDESYM
27357 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
27358 +#else
27359 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
27360 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
27361 +#endif
27362 +
27363 /*
27364 * Rounding UP to nearest 4-kB boundary here...
27365 */
27366 diff -urNp linux-3.0.3/drivers/message/fusion/mptsas.c linux-3.0.3/drivers/message/fusion/mptsas.c
27367 --- linux-3.0.3/drivers/message/fusion/mptsas.c 2011-07-21 22:17:23.000000000 -0400
27368 +++ linux-3.0.3/drivers/message/fusion/mptsas.c 2011-08-23 21:47:55.000000000 -0400
27369 @@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
27370 return 0;
27371 }
27372
27373 +static inline void
27374 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27375 +{
27376 + if (phy_info->port_details) {
27377 + phy_info->port_details->rphy = rphy;
27378 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27379 + ioc->name, rphy));
27380 + }
27381 +
27382 + if (rphy) {
27383 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27384 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27385 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27386 + ioc->name, rphy, rphy->dev.release));
27387 + }
27388 +}
27389 +
27390 /* no mutex */
27391 static void
27392 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
27393 @@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
27394 return NULL;
27395 }
27396
27397 -static inline void
27398 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27399 -{
27400 - if (phy_info->port_details) {
27401 - phy_info->port_details->rphy = rphy;
27402 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27403 - ioc->name, rphy));
27404 - }
27405 -
27406 - if (rphy) {
27407 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27408 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27409 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27410 - ioc->name, rphy, rphy->dev.release));
27411 - }
27412 -}
27413 -
27414 static inline struct sas_port *
27415 mptsas_get_port(struct mptsas_phyinfo *phy_info)
27416 {
27417 diff -urNp linux-3.0.3/drivers/message/fusion/mptscsih.c linux-3.0.3/drivers/message/fusion/mptscsih.c
27418 --- linux-3.0.3/drivers/message/fusion/mptscsih.c 2011-07-21 22:17:23.000000000 -0400
27419 +++ linux-3.0.3/drivers/message/fusion/mptscsih.c 2011-08-23 21:47:55.000000000 -0400
27420 @@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
27421
27422 h = shost_priv(SChost);
27423
27424 - if (h) {
27425 - if (h->info_kbuf == NULL)
27426 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27427 - return h->info_kbuf;
27428 - h->info_kbuf[0] = '\0';
27429 + if (!h)
27430 + return NULL;
27431
27432 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27433 - h->info_kbuf[size-1] = '\0';
27434 - }
27435 + if (h->info_kbuf == NULL)
27436 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27437 + return h->info_kbuf;
27438 + h->info_kbuf[0] = '\0';
27439 +
27440 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27441 + h->info_kbuf[size-1] = '\0';
27442
27443 return h->info_kbuf;
27444 }
27445 diff -urNp linux-3.0.3/drivers/message/i2o/i2o_config.c linux-3.0.3/drivers/message/i2o/i2o_config.c
27446 --- linux-3.0.3/drivers/message/i2o/i2o_config.c 2011-07-21 22:17:23.000000000 -0400
27447 +++ linux-3.0.3/drivers/message/i2o/i2o_config.c 2011-08-23 21:48:14.000000000 -0400
27448 @@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
27449 struct i2o_message *msg;
27450 unsigned int iop;
27451
27452 + pax_track_stack();
27453 +
27454 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
27455 return -EFAULT;
27456
27457 diff -urNp linux-3.0.3/drivers/message/i2o/i2o_proc.c linux-3.0.3/drivers/message/i2o/i2o_proc.c
27458 --- linux-3.0.3/drivers/message/i2o/i2o_proc.c 2011-07-21 22:17:23.000000000 -0400
27459 +++ linux-3.0.3/drivers/message/i2o/i2o_proc.c 2011-08-23 21:47:55.000000000 -0400
27460 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
27461 "Array Controller Device"
27462 };
27463
27464 -static char *chtostr(u8 * chars, int n)
27465 -{
27466 - char tmp[256];
27467 - tmp[0] = 0;
27468 - return strncat(tmp, (char *)chars, n);
27469 -}
27470 -
27471 static int i2o_report_query_status(struct seq_file *seq, int block_status,
27472 char *group)
27473 {
27474 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
27475
27476 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
27477 seq_printf(seq, "%-#8x", ddm_table.module_id);
27478 - seq_printf(seq, "%-29s",
27479 - chtostr(ddm_table.module_name_version, 28));
27480 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
27481 seq_printf(seq, "%9d ", ddm_table.data_size);
27482 seq_printf(seq, "%8d", ddm_table.code_size);
27483
27484 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
27485
27486 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
27487 seq_printf(seq, "%-#8x", dst->module_id);
27488 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
27489 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
27490 + seq_printf(seq, "%-.28s", dst->module_name_version);
27491 + seq_printf(seq, "%-.8s", dst->date);
27492 seq_printf(seq, "%8d ", dst->module_size);
27493 seq_printf(seq, "%8d ", dst->mpb_size);
27494 seq_printf(seq, "0x%04x", dst->module_flags);
27495 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
27496 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
27497 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
27498 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
27499 - seq_printf(seq, "Vendor info : %s\n",
27500 - chtostr((u8 *) (work32 + 2), 16));
27501 - seq_printf(seq, "Product info : %s\n",
27502 - chtostr((u8 *) (work32 + 6), 16));
27503 - seq_printf(seq, "Description : %s\n",
27504 - chtostr((u8 *) (work32 + 10), 16));
27505 - seq_printf(seq, "Product rev. : %s\n",
27506 - chtostr((u8 *) (work32 + 14), 8));
27507 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
27508 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
27509 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
27510 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
27511
27512 seq_printf(seq, "Serial number : ");
27513 print_serial_number(seq, (u8 *) (work32 + 16),
27514 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
27515 }
27516
27517 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
27518 - seq_printf(seq, "Module name : %s\n",
27519 - chtostr(result.module_name, 24));
27520 - seq_printf(seq, "Module revision : %s\n",
27521 - chtostr(result.module_rev, 8));
27522 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
27523 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
27524
27525 seq_printf(seq, "Serial number : ");
27526 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
27527 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
27528 return 0;
27529 }
27530
27531 - seq_printf(seq, "Device name : %s\n",
27532 - chtostr(result.device_name, 64));
27533 - seq_printf(seq, "Service name : %s\n",
27534 - chtostr(result.service_name, 64));
27535 - seq_printf(seq, "Physical name : %s\n",
27536 - chtostr(result.physical_location, 64));
27537 - seq_printf(seq, "Instance number : %s\n",
27538 - chtostr(result.instance_number, 4));
27539 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
27540 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
27541 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
27542 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
27543
27544 return 0;
27545 }
27546 diff -urNp linux-3.0.3/drivers/message/i2o/iop.c linux-3.0.3/drivers/message/i2o/iop.c
27547 --- linux-3.0.3/drivers/message/i2o/iop.c 2011-07-21 22:17:23.000000000 -0400
27548 +++ linux-3.0.3/drivers/message/i2o/iop.c 2011-08-23 21:47:55.000000000 -0400
27549 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
27550
27551 spin_lock_irqsave(&c->context_list_lock, flags);
27552
27553 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
27554 - atomic_inc(&c->context_list_counter);
27555 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
27556 + atomic_inc_unchecked(&c->context_list_counter);
27557
27558 - entry->context = atomic_read(&c->context_list_counter);
27559 + entry->context = atomic_read_unchecked(&c->context_list_counter);
27560
27561 list_add(&entry->list, &c->context_list);
27562
27563 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
27564
27565 #if BITS_PER_LONG == 64
27566 spin_lock_init(&c->context_list_lock);
27567 - atomic_set(&c->context_list_counter, 0);
27568 + atomic_set_unchecked(&c->context_list_counter, 0);
27569 INIT_LIST_HEAD(&c->context_list);
27570 #endif
27571
27572 diff -urNp linux-3.0.3/drivers/mfd/abx500-core.c linux-3.0.3/drivers/mfd/abx500-core.c
27573 --- linux-3.0.3/drivers/mfd/abx500-core.c 2011-07-21 22:17:23.000000000 -0400
27574 +++ linux-3.0.3/drivers/mfd/abx500-core.c 2011-08-23 21:47:55.000000000 -0400
27575 @@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
27576
27577 struct abx500_device_entry {
27578 struct list_head list;
27579 - struct abx500_ops ops;
27580 + abx500_ops_no_const ops;
27581 struct device *dev;
27582 };
27583
27584 diff -urNp linux-3.0.3/drivers/mfd/janz-cmodio.c linux-3.0.3/drivers/mfd/janz-cmodio.c
27585 --- linux-3.0.3/drivers/mfd/janz-cmodio.c 2011-07-21 22:17:23.000000000 -0400
27586 +++ linux-3.0.3/drivers/mfd/janz-cmodio.c 2011-08-23 21:47:55.000000000 -0400
27587 @@ -13,6 +13,7 @@
27588
27589 #include <linux/kernel.h>
27590 #include <linux/module.h>
27591 +#include <linux/slab.h>
27592 #include <linux/init.h>
27593 #include <linux/pci.h>
27594 #include <linux/interrupt.h>
27595 diff -urNp linux-3.0.3/drivers/mfd/wm8350-i2c.c linux-3.0.3/drivers/mfd/wm8350-i2c.c
27596 --- linux-3.0.3/drivers/mfd/wm8350-i2c.c 2011-07-21 22:17:23.000000000 -0400
27597 +++ linux-3.0.3/drivers/mfd/wm8350-i2c.c 2011-08-23 21:48:14.000000000 -0400
27598 @@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
27599 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
27600 int ret;
27601
27602 + pax_track_stack();
27603 +
27604 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
27605 return -EINVAL;
27606
27607 diff -urNp linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.c linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.c
27608 --- linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.c 2011-07-21 22:17:23.000000000 -0400
27609 +++ linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.c 2011-08-23 21:47:55.000000000 -0400
27610 @@ -435,7 +435,7 @@ static irqreturn_t lis302dl_interrupt(in
27611 * the lid is closed. This leads to interrupts as soon as a little move
27612 * is done.
27613 */
27614 - atomic_inc(&lis3_dev.count);
27615 + atomic_inc_unchecked(&lis3_dev.count);
27616
27617 wake_up_interruptible(&lis3_dev.misc_wait);
27618 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
27619 @@ -518,7 +518,7 @@ static int lis3lv02d_misc_open(struct in
27620 if (lis3_dev.pm_dev)
27621 pm_runtime_get_sync(lis3_dev.pm_dev);
27622
27623 - atomic_set(&lis3_dev.count, 0);
27624 + atomic_set_unchecked(&lis3_dev.count, 0);
27625 return 0;
27626 }
27627
27628 @@ -545,7 +545,7 @@ static ssize_t lis3lv02d_misc_read(struc
27629 add_wait_queue(&lis3_dev.misc_wait, &wait);
27630 while (true) {
27631 set_current_state(TASK_INTERRUPTIBLE);
27632 - data = atomic_xchg(&lis3_dev.count, 0);
27633 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
27634 if (data)
27635 break;
27636
27637 @@ -583,7 +583,7 @@ out:
27638 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
27639 {
27640 poll_wait(file, &lis3_dev.misc_wait, wait);
27641 - if (atomic_read(&lis3_dev.count))
27642 + if (atomic_read_unchecked(&lis3_dev.count))
27643 return POLLIN | POLLRDNORM;
27644 return 0;
27645 }
27646 diff -urNp linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.h linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.h
27647 --- linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.h 2011-07-21 22:17:23.000000000 -0400
27648 +++ linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.h 2011-08-23 21:47:55.000000000 -0400
27649 @@ -265,7 +265,7 @@ struct lis3lv02d {
27650 struct input_polled_dev *idev; /* input device */
27651 struct platform_device *pdev; /* platform device */
27652 struct regulator_bulk_data regulators[2];
27653 - atomic_t count; /* interrupt count after last read */
27654 + atomic_unchecked_t count; /* interrupt count after last read */
27655 union axis_conversion ac; /* hw -> logical axis */
27656 int mapped_btns[3];
27657
27658 diff -urNp linux-3.0.3/drivers/misc/sgi-gru/gruhandles.c linux-3.0.3/drivers/misc/sgi-gru/gruhandles.c
27659 --- linux-3.0.3/drivers/misc/sgi-gru/gruhandles.c 2011-07-21 22:17:23.000000000 -0400
27660 +++ linux-3.0.3/drivers/misc/sgi-gru/gruhandles.c 2011-08-23 21:47:55.000000000 -0400
27661 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
27662 unsigned long nsec;
27663
27664 nsec = CLKS2NSEC(clks);
27665 - atomic_long_inc(&mcs_op_statistics[op].count);
27666 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
27667 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
27668 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
27669 if (mcs_op_statistics[op].max < nsec)
27670 mcs_op_statistics[op].max = nsec;
27671 }
27672 diff -urNp linux-3.0.3/drivers/misc/sgi-gru/gruprocfs.c linux-3.0.3/drivers/misc/sgi-gru/gruprocfs.c
27673 --- linux-3.0.3/drivers/misc/sgi-gru/gruprocfs.c 2011-07-21 22:17:23.000000000 -0400
27674 +++ linux-3.0.3/drivers/misc/sgi-gru/gruprocfs.c 2011-08-23 21:47:55.000000000 -0400
27675 @@ -32,9 +32,9 @@
27676
27677 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
27678
27679 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
27680 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
27681 {
27682 - unsigned long val = atomic_long_read(v);
27683 + unsigned long val = atomic_long_read_unchecked(v);
27684
27685 seq_printf(s, "%16lu %s\n", val, id);
27686 }
27687 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
27688
27689 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
27690 for (op = 0; op < mcsop_last; op++) {
27691 - count = atomic_long_read(&mcs_op_statistics[op].count);
27692 - total = atomic_long_read(&mcs_op_statistics[op].total);
27693 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
27694 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
27695 max = mcs_op_statistics[op].max;
27696 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
27697 count ? total / count : 0, max);
27698 diff -urNp linux-3.0.3/drivers/misc/sgi-gru/grutables.h linux-3.0.3/drivers/misc/sgi-gru/grutables.h
27699 --- linux-3.0.3/drivers/misc/sgi-gru/grutables.h 2011-07-21 22:17:23.000000000 -0400
27700 +++ linux-3.0.3/drivers/misc/sgi-gru/grutables.h 2011-08-23 21:47:55.000000000 -0400
27701 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
27702 * GRU statistics.
27703 */
27704 struct gru_stats_s {
27705 - atomic_long_t vdata_alloc;
27706 - atomic_long_t vdata_free;
27707 - atomic_long_t gts_alloc;
27708 - atomic_long_t gts_free;
27709 - atomic_long_t gms_alloc;
27710 - atomic_long_t gms_free;
27711 - atomic_long_t gts_double_allocate;
27712 - atomic_long_t assign_context;
27713 - atomic_long_t assign_context_failed;
27714 - atomic_long_t free_context;
27715 - atomic_long_t load_user_context;
27716 - atomic_long_t load_kernel_context;
27717 - atomic_long_t lock_kernel_context;
27718 - atomic_long_t unlock_kernel_context;
27719 - atomic_long_t steal_user_context;
27720 - atomic_long_t steal_kernel_context;
27721 - atomic_long_t steal_context_failed;
27722 - atomic_long_t nopfn;
27723 - atomic_long_t asid_new;
27724 - atomic_long_t asid_next;
27725 - atomic_long_t asid_wrap;
27726 - atomic_long_t asid_reuse;
27727 - atomic_long_t intr;
27728 - atomic_long_t intr_cbr;
27729 - atomic_long_t intr_tfh;
27730 - atomic_long_t intr_spurious;
27731 - atomic_long_t intr_mm_lock_failed;
27732 - atomic_long_t call_os;
27733 - atomic_long_t call_os_wait_queue;
27734 - atomic_long_t user_flush_tlb;
27735 - atomic_long_t user_unload_context;
27736 - atomic_long_t user_exception;
27737 - atomic_long_t set_context_option;
27738 - atomic_long_t check_context_retarget_intr;
27739 - atomic_long_t check_context_unload;
27740 - atomic_long_t tlb_dropin;
27741 - atomic_long_t tlb_preload_page;
27742 - atomic_long_t tlb_dropin_fail_no_asid;
27743 - atomic_long_t tlb_dropin_fail_upm;
27744 - atomic_long_t tlb_dropin_fail_invalid;
27745 - atomic_long_t tlb_dropin_fail_range_active;
27746 - atomic_long_t tlb_dropin_fail_idle;
27747 - atomic_long_t tlb_dropin_fail_fmm;
27748 - atomic_long_t tlb_dropin_fail_no_exception;
27749 - atomic_long_t tfh_stale_on_fault;
27750 - atomic_long_t mmu_invalidate_range;
27751 - atomic_long_t mmu_invalidate_page;
27752 - atomic_long_t flush_tlb;
27753 - atomic_long_t flush_tlb_gru;
27754 - atomic_long_t flush_tlb_gru_tgh;
27755 - atomic_long_t flush_tlb_gru_zero_asid;
27756 -
27757 - atomic_long_t copy_gpa;
27758 - atomic_long_t read_gpa;
27759 -
27760 - atomic_long_t mesq_receive;
27761 - atomic_long_t mesq_receive_none;
27762 - atomic_long_t mesq_send;
27763 - atomic_long_t mesq_send_failed;
27764 - atomic_long_t mesq_noop;
27765 - atomic_long_t mesq_send_unexpected_error;
27766 - atomic_long_t mesq_send_lb_overflow;
27767 - atomic_long_t mesq_send_qlimit_reached;
27768 - atomic_long_t mesq_send_amo_nacked;
27769 - atomic_long_t mesq_send_put_nacked;
27770 - atomic_long_t mesq_page_overflow;
27771 - atomic_long_t mesq_qf_locked;
27772 - atomic_long_t mesq_qf_noop_not_full;
27773 - atomic_long_t mesq_qf_switch_head_failed;
27774 - atomic_long_t mesq_qf_unexpected_error;
27775 - atomic_long_t mesq_noop_unexpected_error;
27776 - atomic_long_t mesq_noop_lb_overflow;
27777 - atomic_long_t mesq_noop_qlimit_reached;
27778 - atomic_long_t mesq_noop_amo_nacked;
27779 - atomic_long_t mesq_noop_put_nacked;
27780 - atomic_long_t mesq_noop_page_overflow;
27781 + atomic_long_unchecked_t vdata_alloc;
27782 + atomic_long_unchecked_t vdata_free;
27783 + atomic_long_unchecked_t gts_alloc;
27784 + atomic_long_unchecked_t gts_free;
27785 + atomic_long_unchecked_t gms_alloc;
27786 + atomic_long_unchecked_t gms_free;
27787 + atomic_long_unchecked_t gts_double_allocate;
27788 + atomic_long_unchecked_t assign_context;
27789 + atomic_long_unchecked_t assign_context_failed;
27790 + atomic_long_unchecked_t free_context;
27791 + atomic_long_unchecked_t load_user_context;
27792 + atomic_long_unchecked_t load_kernel_context;
27793 + atomic_long_unchecked_t lock_kernel_context;
27794 + atomic_long_unchecked_t unlock_kernel_context;
27795 + atomic_long_unchecked_t steal_user_context;
27796 + atomic_long_unchecked_t steal_kernel_context;
27797 + atomic_long_unchecked_t steal_context_failed;
27798 + atomic_long_unchecked_t nopfn;
27799 + atomic_long_unchecked_t asid_new;
27800 + atomic_long_unchecked_t asid_next;
27801 + atomic_long_unchecked_t asid_wrap;
27802 + atomic_long_unchecked_t asid_reuse;
27803 + atomic_long_unchecked_t intr;
27804 + atomic_long_unchecked_t intr_cbr;
27805 + atomic_long_unchecked_t intr_tfh;
27806 + atomic_long_unchecked_t intr_spurious;
27807 + atomic_long_unchecked_t intr_mm_lock_failed;
27808 + atomic_long_unchecked_t call_os;
27809 + atomic_long_unchecked_t call_os_wait_queue;
27810 + atomic_long_unchecked_t user_flush_tlb;
27811 + atomic_long_unchecked_t user_unload_context;
27812 + atomic_long_unchecked_t user_exception;
27813 + atomic_long_unchecked_t set_context_option;
27814 + atomic_long_unchecked_t check_context_retarget_intr;
27815 + atomic_long_unchecked_t check_context_unload;
27816 + atomic_long_unchecked_t tlb_dropin;
27817 + atomic_long_unchecked_t tlb_preload_page;
27818 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
27819 + atomic_long_unchecked_t tlb_dropin_fail_upm;
27820 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
27821 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
27822 + atomic_long_unchecked_t tlb_dropin_fail_idle;
27823 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
27824 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
27825 + atomic_long_unchecked_t tfh_stale_on_fault;
27826 + atomic_long_unchecked_t mmu_invalidate_range;
27827 + atomic_long_unchecked_t mmu_invalidate_page;
27828 + atomic_long_unchecked_t flush_tlb;
27829 + atomic_long_unchecked_t flush_tlb_gru;
27830 + atomic_long_unchecked_t flush_tlb_gru_tgh;
27831 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
27832 +
27833 + atomic_long_unchecked_t copy_gpa;
27834 + atomic_long_unchecked_t read_gpa;
27835 +
27836 + atomic_long_unchecked_t mesq_receive;
27837 + atomic_long_unchecked_t mesq_receive_none;
27838 + atomic_long_unchecked_t mesq_send;
27839 + atomic_long_unchecked_t mesq_send_failed;
27840 + atomic_long_unchecked_t mesq_noop;
27841 + atomic_long_unchecked_t mesq_send_unexpected_error;
27842 + atomic_long_unchecked_t mesq_send_lb_overflow;
27843 + atomic_long_unchecked_t mesq_send_qlimit_reached;
27844 + atomic_long_unchecked_t mesq_send_amo_nacked;
27845 + atomic_long_unchecked_t mesq_send_put_nacked;
27846 + atomic_long_unchecked_t mesq_page_overflow;
27847 + atomic_long_unchecked_t mesq_qf_locked;
27848 + atomic_long_unchecked_t mesq_qf_noop_not_full;
27849 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
27850 + atomic_long_unchecked_t mesq_qf_unexpected_error;
27851 + atomic_long_unchecked_t mesq_noop_unexpected_error;
27852 + atomic_long_unchecked_t mesq_noop_lb_overflow;
27853 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
27854 + atomic_long_unchecked_t mesq_noop_amo_nacked;
27855 + atomic_long_unchecked_t mesq_noop_put_nacked;
27856 + atomic_long_unchecked_t mesq_noop_page_overflow;
27857
27858 };
27859
27860 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
27861 tghop_invalidate, mcsop_last};
27862
27863 struct mcs_op_statistic {
27864 - atomic_long_t count;
27865 - atomic_long_t total;
27866 + atomic_long_unchecked_t count;
27867 + atomic_long_unchecked_t total;
27868 unsigned long max;
27869 };
27870
27871 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
27872
27873 #define STAT(id) do { \
27874 if (gru_options & OPT_STATS) \
27875 - atomic_long_inc(&gru_stats.id); \
27876 + atomic_long_inc_unchecked(&gru_stats.id); \
27877 } while (0)
27878
27879 #ifdef CONFIG_SGI_GRU_DEBUG
27880 diff -urNp linux-3.0.3/drivers/misc/sgi-xp/xp.h linux-3.0.3/drivers/misc/sgi-xp/xp.h
27881 --- linux-3.0.3/drivers/misc/sgi-xp/xp.h 2011-07-21 22:17:23.000000000 -0400
27882 +++ linux-3.0.3/drivers/misc/sgi-xp/xp.h 2011-08-23 21:47:55.000000000 -0400
27883 @@ -289,7 +289,7 @@ struct xpc_interface {
27884 xpc_notify_func, void *);
27885 void (*received) (short, int, void *);
27886 enum xp_retval (*partid_to_nasids) (short, void *);
27887 -};
27888 +} __no_const;
27889
27890 extern struct xpc_interface xpc_interface;
27891
27892 diff -urNp linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0001.c linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0001.c
27893 --- linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0001.c 2011-07-21 22:17:23.000000000 -0400
27894 +++ linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0001.c 2011-08-23 21:48:14.000000000 -0400
27895 @@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
27896 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
27897 unsigned long timeo = jiffies + HZ;
27898
27899 + pax_track_stack();
27900 +
27901 /* Prevent setting state FL_SYNCING for chip in suspended state. */
27902 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
27903 goto sleep;
27904 @@ -1653,6 +1655,8 @@ static int __xipram do_write_buffer(stru
27905 unsigned long initial_adr;
27906 int initial_len = len;
27907
27908 + pax_track_stack();
27909 +
27910 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
27911 adr += chip->start;
27912 initial_adr = adr;
27913 @@ -1871,6 +1875,8 @@ static int __xipram do_erase_oneblock(st
27914 int retries = 3;
27915 int ret;
27916
27917 + pax_track_stack();
27918 +
27919 adr += chip->start;
27920
27921 retry:
27922 diff -urNp linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0020.c linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0020.c
27923 --- linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0020.c 2011-07-21 22:17:23.000000000 -0400
27924 +++ linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0020.c 2011-08-23 21:48:14.000000000 -0400
27925 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
27926 unsigned long cmd_addr;
27927 struct cfi_private *cfi = map->fldrv_priv;
27928
27929 + pax_track_stack();
27930 +
27931 adr += chip->start;
27932
27933 /* Ensure cmd read/writes are aligned. */
27934 @@ -429,6 +431,8 @@ static inline int do_write_buffer(struct
27935 DECLARE_WAITQUEUE(wait, current);
27936 int wbufsize, z;
27937
27938 + pax_track_stack();
27939 +
27940 /* M58LW064A requires bus alignment for buffer wriets -- saw */
27941 if (adr & (map_bankwidth(map)-1))
27942 return -EINVAL;
27943 @@ -743,6 +747,8 @@ static inline int do_erase_oneblock(stru
27944 DECLARE_WAITQUEUE(wait, current);
27945 int ret = 0;
27946
27947 + pax_track_stack();
27948 +
27949 adr += chip->start;
27950
27951 /* Let's determine this according to the interleave only once */
27952 @@ -1048,6 +1054,8 @@ static inline int do_lock_oneblock(struc
27953 unsigned long timeo = jiffies + HZ;
27954 DECLARE_WAITQUEUE(wait, current);
27955
27956 + pax_track_stack();
27957 +
27958 adr += chip->start;
27959
27960 /* Let's determine this according to the interleave only once */
27961 @@ -1197,6 +1205,8 @@ static inline int do_unlock_oneblock(str
27962 unsigned long timeo = jiffies + HZ;
27963 DECLARE_WAITQUEUE(wait, current);
27964
27965 + pax_track_stack();
27966 +
27967 adr += chip->start;
27968
27969 /* Let's determine this according to the interleave only once */
27970 diff -urNp linux-3.0.3/drivers/mtd/devices/doc2000.c linux-3.0.3/drivers/mtd/devices/doc2000.c
27971 --- linux-3.0.3/drivers/mtd/devices/doc2000.c 2011-07-21 22:17:23.000000000 -0400
27972 +++ linux-3.0.3/drivers/mtd/devices/doc2000.c 2011-08-23 21:47:55.000000000 -0400
27973 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
27974
27975 /* The ECC will not be calculated correctly if less than 512 is written */
27976 /* DBB-
27977 - if (len != 0x200 && eccbuf)
27978 + if (len != 0x200)
27979 printk(KERN_WARNING
27980 "ECC needs a full sector write (adr: %lx size %lx)\n",
27981 (long) to, (long) len);
27982 diff -urNp linux-3.0.3/drivers/mtd/devices/doc2001.c linux-3.0.3/drivers/mtd/devices/doc2001.c
27983 --- linux-3.0.3/drivers/mtd/devices/doc2001.c 2011-07-21 22:17:23.000000000 -0400
27984 +++ linux-3.0.3/drivers/mtd/devices/doc2001.c 2011-08-23 21:47:55.000000000 -0400
27985 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
27986 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
27987
27988 /* Don't allow read past end of device */
27989 - if (from >= this->totlen)
27990 + if (from >= this->totlen || !len)
27991 return -EINVAL;
27992
27993 /* Don't allow a single read to cross a 512-byte block boundary */
27994 diff -urNp linux-3.0.3/drivers/mtd/ftl.c linux-3.0.3/drivers/mtd/ftl.c
27995 --- linux-3.0.3/drivers/mtd/ftl.c 2011-07-21 22:17:23.000000000 -0400
27996 +++ linux-3.0.3/drivers/mtd/ftl.c 2011-08-23 21:48:14.000000000 -0400
27997 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
27998 loff_t offset;
27999 uint16_t srcunitswap = cpu_to_le16(srcunit);
28000
28001 + pax_track_stack();
28002 +
28003 eun = &part->EUNInfo[srcunit];
28004 xfer = &part->XferInfo[xferunit];
28005 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
28006 diff -urNp linux-3.0.3/drivers/mtd/inftlcore.c linux-3.0.3/drivers/mtd/inftlcore.c
28007 --- linux-3.0.3/drivers/mtd/inftlcore.c 2011-07-21 22:17:23.000000000 -0400
28008 +++ linux-3.0.3/drivers/mtd/inftlcore.c 2011-08-23 21:48:14.000000000 -0400
28009 @@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
28010 struct inftl_oob oob;
28011 size_t retlen;
28012
28013 + pax_track_stack();
28014 +
28015 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
28016 "pending=%d)\n", inftl, thisVUC, pendingblock);
28017
28018 diff -urNp linux-3.0.3/drivers/mtd/inftlmount.c linux-3.0.3/drivers/mtd/inftlmount.c
28019 --- linux-3.0.3/drivers/mtd/inftlmount.c 2011-07-21 22:17:23.000000000 -0400
28020 +++ linux-3.0.3/drivers/mtd/inftlmount.c 2011-08-23 21:48:14.000000000 -0400
28021 @@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
28022 struct INFTLPartition *ip;
28023 size_t retlen;
28024
28025 + pax_track_stack();
28026 +
28027 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
28028
28029 /*
28030 diff -urNp linux-3.0.3/drivers/mtd/lpddr/qinfo_probe.c linux-3.0.3/drivers/mtd/lpddr/qinfo_probe.c
28031 --- linux-3.0.3/drivers/mtd/lpddr/qinfo_probe.c 2011-07-21 22:17:23.000000000 -0400
28032 +++ linux-3.0.3/drivers/mtd/lpddr/qinfo_probe.c 2011-08-23 21:48:14.000000000 -0400
28033 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
28034 {
28035 map_word pfow_val[4];
28036
28037 + pax_track_stack();
28038 +
28039 /* Check identification string */
28040 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
28041 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
28042 diff -urNp linux-3.0.3/drivers/mtd/mtdchar.c linux-3.0.3/drivers/mtd/mtdchar.c
28043 --- linux-3.0.3/drivers/mtd/mtdchar.c 2011-07-21 22:17:23.000000000 -0400
28044 +++ linux-3.0.3/drivers/mtd/mtdchar.c 2011-08-23 21:48:14.000000000 -0400
28045 @@ -553,6 +553,8 @@ static int mtd_ioctl(struct file *file,
28046 u_long size;
28047 struct mtd_info_user info;
28048
28049 + pax_track_stack();
28050 +
28051 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
28052
28053 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
28054 diff -urNp linux-3.0.3/drivers/mtd/nand/denali.c linux-3.0.3/drivers/mtd/nand/denali.c
28055 --- linux-3.0.3/drivers/mtd/nand/denali.c 2011-07-21 22:17:23.000000000 -0400
28056 +++ linux-3.0.3/drivers/mtd/nand/denali.c 2011-08-23 21:47:55.000000000 -0400
28057 @@ -26,6 +26,7 @@
28058 #include <linux/pci.h>
28059 #include <linux/mtd/mtd.h>
28060 #include <linux/module.h>
28061 +#include <linux/slab.h>
28062
28063 #include "denali.h"
28064
28065 diff -urNp linux-3.0.3/drivers/mtd/nftlcore.c linux-3.0.3/drivers/mtd/nftlcore.c
28066 --- linux-3.0.3/drivers/mtd/nftlcore.c 2011-07-21 22:17:23.000000000 -0400
28067 +++ linux-3.0.3/drivers/mtd/nftlcore.c 2011-08-23 21:48:14.000000000 -0400
28068 @@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
28069 int inplace = 1;
28070 size_t retlen;
28071
28072 + pax_track_stack();
28073 +
28074 memset(BlockMap, 0xff, sizeof(BlockMap));
28075 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
28076
28077 diff -urNp linux-3.0.3/drivers/mtd/nftlmount.c linux-3.0.3/drivers/mtd/nftlmount.c
28078 --- linux-3.0.3/drivers/mtd/nftlmount.c 2011-07-21 22:17:23.000000000 -0400
28079 +++ linux-3.0.3/drivers/mtd/nftlmount.c 2011-08-23 21:48:14.000000000 -0400
28080 @@ -24,6 +24,7 @@
28081 #include <asm/errno.h>
28082 #include <linux/delay.h>
28083 #include <linux/slab.h>
28084 +#include <linux/sched.h>
28085 #include <linux/mtd/mtd.h>
28086 #include <linux/mtd/nand.h>
28087 #include <linux/mtd/nftl.h>
28088 @@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
28089 struct mtd_info *mtd = nftl->mbd.mtd;
28090 unsigned int i;
28091
28092 + pax_track_stack();
28093 +
28094 /* Assume logical EraseSize == physical erasesize for starting the scan.
28095 We'll sort it out later if we find a MediaHeader which says otherwise */
28096 /* Actually, we won't. The new DiskOnChip driver has already scanned
28097 diff -urNp linux-3.0.3/drivers/mtd/ubi/build.c linux-3.0.3/drivers/mtd/ubi/build.c
28098 --- linux-3.0.3/drivers/mtd/ubi/build.c 2011-07-21 22:17:23.000000000 -0400
28099 +++ linux-3.0.3/drivers/mtd/ubi/build.c 2011-08-23 21:47:55.000000000 -0400
28100 @@ -1287,7 +1287,7 @@ module_exit(ubi_exit);
28101 static int __init bytes_str_to_int(const char *str)
28102 {
28103 char *endp;
28104 - unsigned long result;
28105 + unsigned long result, scale = 1;
28106
28107 result = simple_strtoul(str, &endp, 0);
28108 if (str == endp || result >= INT_MAX) {
28109 @@ -1298,11 +1298,11 @@ static int __init bytes_str_to_int(const
28110
28111 switch (*endp) {
28112 case 'G':
28113 - result *= 1024;
28114 + scale *= 1024;
28115 case 'M':
28116 - result *= 1024;
28117 + scale *= 1024;
28118 case 'K':
28119 - result *= 1024;
28120 + scale *= 1024;
28121 if (endp[1] == 'i' && endp[2] == 'B')
28122 endp += 2;
28123 case '\0':
28124 @@ -1313,7 +1313,13 @@ static int __init bytes_str_to_int(const
28125 return -EINVAL;
28126 }
28127
28128 - return result;
28129 + if ((intoverflow_t)result*scale >= INT_MAX) {
28130 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
28131 + str);
28132 + return -EINVAL;
28133 + }
28134 +
28135 + return result*scale;
28136 }
28137
28138 /**
28139 diff -urNp linux-3.0.3/drivers/net/bna/bfa_ioc_ct.c linux-3.0.3/drivers/net/bna/bfa_ioc_ct.c
28140 --- linux-3.0.3/drivers/net/bna/bfa_ioc_ct.c 2011-07-21 22:17:23.000000000 -0400
28141 +++ linux-3.0.3/drivers/net/bna/bfa_ioc_ct.c 2011-08-23 21:47:55.000000000 -0400
28142 @@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
28143 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
28144 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
28145
28146 -static struct bfa_ioc_hwif nw_hwif_ct;
28147 +static struct bfa_ioc_hwif nw_hwif_ct = {
28148 + .ioc_pll_init = bfa_ioc_ct_pll_init,
28149 + .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
28150 + .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
28151 + .ioc_reg_init = bfa_ioc_ct_reg_init,
28152 + .ioc_map_port = bfa_ioc_ct_map_port,
28153 + .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
28154 + .ioc_notify_fail = bfa_ioc_ct_notify_fail,
28155 + .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
28156 + .ioc_sync_start = bfa_ioc_ct_sync_start,
28157 + .ioc_sync_join = bfa_ioc_ct_sync_join,
28158 + .ioc_sync_leave = bfa_ioc_ct_sync_leave,
28159 + .ioc_sync_ack = bfa_ioc_ct_sync_ack,
28160 + .ioc_sync_complete = bfa_ioc_ct_sync_complete
28161 +};
28162
28163 /**
28164 * Called from bfa_ioc_attach() to map asic specific calls.
28165 @@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
28166 void
28167 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
28168 {
28169 - nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
28170 - nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
28171 - nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
28172 - nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
28173 - nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
28174 - nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
28175 - nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
28176 - nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
28177 - nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
28178 - nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
28179 - nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
28180 - nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
28181 - nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
28182 -
28183 ioc->ioc_hwif = &nw_hwif_ct;
28184 }
28185
28186 diff -urNp linux-3.0.3/drivers/net/bna/bnad.c linux-3.0.3/drivers/net/bna/bnad.c
28187 --- linux-3.0.3/drivers/net/bna/bnad.c 2011-07-21 22:17:23.000000000 -0400
28188 +++ linux-3.0.3/drivers/net/bna/bnad.c 2011-08-23 21:47:55.000000000 -0400
28189 @@ -1681,7 +1681,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28190 struct bna_intr_info *intr_info =
28191 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
28192 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
28193 - struct bna_tx_event_cbfn tx_cbfn;
28194 + static struct bna_tx_event_cbfn tx_cbfn = {
28195 + /* Initialize the tx event handlers */
28196 + .tcb_setup_cbfn = bnad_cb_tcb_setup,
28197 + .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
28198 + .tx_stall_cbfn = bnad_cb_tx_stall,
28199 + .tx_resume_cbfn = bnad_cb_tx_resume,
28200 + .tx_cleanup_cbfn = bnad_cb_tx_cleanup
28201 + };
28202 struct bna_tx *tx;
28203 unsigned long flags;
28204
28205 @@ -1690,13 +1697,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28206 tx_config->txq_depth = bnad->txq_depth;
28207 tx_config->tx_type = BNA_TX_T_REGULAR;
28208
28209 - /* Initialize the tx event handlers */
28210 - tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
28211 - tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
28212 - tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
28213 - tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
28214 - tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
28215 -
28216 /* Get BNA's resource requirement for one tx object */
28217 spin_lock_irqsave(&bnad->bna_lock, flags);
28218 bna_tx_res_req(bnad->num_txq_per_tx,
28219 @@ -1827,21 +1827,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
28220 struct bna_intr_info *intr_info =
28221 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
28222 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
28223 - struct bna_rx_event_cbfn rx_cbfn;
28224 + static struct bna_rx_event_cbfn rx_cbfn = {
28225 + /* Initialize the Rx event handlers */
28226 + .rcb_setup_cbfn = bnad_cb_rcb_setup,
28227 + .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
28228 + .ccb_setup_cbfn = bnad_cb_ccb_setup,
28229 + .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
28230 + .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
28231 + .rx_post_cbfn = bnad_cb_rx_post
28232 + };
28233 struct bna_rx *rx;
28234 unsigned long flags;
28235
28236 /* Initialize the Rx object configuration */
28237 bnad_init_rx_config(bnad, rx_config);
28238
28239 - /* Initialize the Rx event handlers */
28240 - rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
28241 - rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
28242 - rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
28243 - rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
28244 - rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
28245 - rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
28246 -
28247 /* Get BNA's resource requirement for one Rx object */
28248 spin_lock_irqsave(&bnad->bna_lock, flags);
28249 bna_rx_res_req(rx_config, res_info);
28250 diff -urNp linux-3.0.3/drivers/net/bnx2.c linux-3.0.3/drivers/net/bnx2.c
28251 --- linux-3.0.3/drivers/net/bnx2.c 2011-07-21 22:17:23.000000000 -0400
28252 +++ linux-3.0.3/drivers/net/bnx2.c 2011-08-23 21:48:14.000000000 -0400
28253 @@ -5828,6 +5828,8 @@ bnx2_test_nvram(struct bnx2 *bp)
28254 int rc = 0;
28255 u32 magic, csum;
28256
28257 + pax_track_stack();
28258 +
28259 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
28260 goto test_nvram_done;
28261
28262 diff -urNp linux-3.0.3/drivers/net/bnx2x/bnx2x_ethtool.c linux-3.0.3/drivers/net/bnx2x/bnx2x_ethtool.c
28263 --- linux-3.0.3/drivers/net/bnx2x/bnx2x_ethtool.c 2011-07-21 22:17:23.000000000 -0400
28264 +++ linux-3.0.3/drivers/net/bnx2x/bnx2x_ethtool.c 2011-08-23 21:48:14.000000000 -0400
28265 @@ -1705,6 +1705,8 @@ static int bnx2x_test_nvram(struct bnx2x
28266 int i, rc;
28267 u32 magic, crc;
28268
28269 + pax_track_stack();
28270 +
28271 if (BP_NOMCP(bp))
28272 return 0;
28273
28274 diff -urNp linux-3.0.3/drivers/net/cxgb3/l2t.h linux-3.0.3/drivers/net/cxgb3/l2t.h
28275 --- linux-3.0.3/drivers/net/cxgb3/l2t.h 2011-07-21 22:17:23.000000000 -0400
28276 +++ linux-3.0.3/drivers/net/cxgb3/l2t.h 2011-08-23 21:47:55.000000000 -0400
28277 @@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
28278 */
28279 struct l2t_skb_cb {
28280 arp_failure_handler_func arp_failure_handler;
28281 -};
28282 +} __no_const;
28283
28284 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
28285
28286 diff -urNp linux-3.0.3/drivers/net/cxgb4/cxgb4_main.c linux-3.0.3/drivers/net/cxgb4/cxgb4_main.c
28287 --- linux-3.0.3/drivers/net/cxgb4/cxgb4_main.c 2011-07-21 22:17:23.000000000 -0400
28288 +++ linux-3.0.3/drivers/net/cxgb4/cxgb4_main.c 2011-08-23 21:48:14.000000000 -0400
28289 @@ -3396,6 +3396,8 @@ static int __devinit enable_msix(struct
28290 unsigned int nchan = adap->params.nports;
28291 struct msix_entry entries[MAX_INGQ + 1];
28292
28293 + pax_track_stack();
28294 +
28295 for (i = 0; i < ARRAY_SIZE(entries); ++i)
28296 entries[i].entry = i;
28297
28298 diff -urNp linux-3.0.3/drivers/net/cxgb4/t4_hw.c linux-3.0.3/drivers/net/cxgb4/t4_hw.c
28299 --- linux-3.0.3/drivers/net/cxgb4/t4_hw.c 2011-07-21 22:17:23.000000000 -0400
28300 +++ linux-3.0.3/drivers/net/cxgb4/t4_hw.c 2011-08-23 21:48:14.000000000 -0400
28301 @@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
28302 u8 vpd[VPD_LEN], csum;
28303 unsigned int vpdr_len, kw_offset, id_len;
28304
28305 + pax_track_stack();
28306 +
28307 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
28308 if (ret < 0)
28309 return ret;
28310 diff -urNp linux-3.0.3/drivers/net/e1000e/82571.c linux-3.0.3/drivers/net/e1000e/82571.c
28311 --- linux-3.0.3/drivers/net/e1000e/82571.c 2011-07-21 22:17:23.000000000 -0400
28312 +++ linux-3.0.3/drivers/net/e1000e/82571.c 2011-08-23 21:47:55.000000000 -0400
28313 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
28314 {
28315 struct e1000_hw *hw = &adapter->hw;
28316 struct e1000_mac_info *mac = &hw->mac;
28317 - struct e1000_mac_operations *func = &mac->ops;
28318 + e1000_mac_operations_no_const *func = &mac->ops;
28319 u32 swsm = 0;
28320 u32 swsm2 = 0;
28321 bool force_clear_smbi = false;
28322 diff -urNp linux-3.0.3/drivers/net/e1000e/es2lan.c linux-3.0.3/drivers/net/e1000e/es2lan.c
28323 --- linux-3.0.3/drivers/net/e1000e/es2lan.c 2011-07-21 22:17:23.000000000 -0400
28324 +++ linux-3.0.3/drivers/net/e1000e/es2lan.c 2011-08-23 21:47:55.000000000 -0400
28325 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
28326 {
28327 struct e1000_hw *hw = &adapter->hw;
28328 struct e1000_mac_info *mac = &hw->mac;
28329 - struct e1000_mac_operations *func = &mac->ops;
28330 + e1000_mac_operations_no_const *func = &mac->ops;
28331
28332 /* Set media type */
28333 switch (adapter->pdev->device) {
28334 diff -urNp linux-3.0.3/drivers/net/e1000e/hw.h linux-3.0.3/drivers/net/e1000e/hw.h
28335 --- linux-3.0.3/drivers/net/e1000e/hw.h 2011-07-21 22:17:23.000000000 -0400
28336 +++ linux-3.0.3/drivers/net/e1000e/hw.h 2011-08-23 21:47:55.000000000 -0400
28337 @@ -776,6 +776,7 @@ struct e1000_mac_operations {
28338 void (*write_vfta)(struct e1000_hw *, u32, u32);
28339 s32 (*read_mac_addr)(struct e1000_hw *);
28340 };
28341 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28342
28343 /* Function pointers for the PHY. */
28344 struct e1000_phy_operations {
28345 @@ -799,6 +800,7 @@ struct e1000_phy_operations {
28346 void (*power_up)(struct e1000_hw *);
28347 void (*power_down)(struct e1000_hw *);
28348 };
28349 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28350
28351 /* Function pointers for the NVM. */
28352 struct e1000_nvm_operations {
28353 @@ -810,9 +812,10 @@ struct e1000_nvm_operations {
28354 s32 (*validate)(struct e1000_hw *);
28355 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
28356 };
28357 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28358
28359 struct e1000_mac_info {
28360 - struct e1000_mac_operations ops;
28361 + e1000_mac_operations_no_const ops;
28362 u8 addr[ETH_ALEN];
28363 u8 perm_addr[ETH_ALEN];
28364
28365 @@ -853,7 +856,7 @@ struct e1000_mac_info {
28366 };
28367
28368 struct e1000_phy_info {
28369 - struct e1000_phy_operations ops;
28370 + e1000_phy_operations_no_const ops;
28371
28372 enum e1000_phy_type type;
28373
28374 @@ -887,7 +890,7 @@ struct e1000_phy_info {
28375 };
28376
28377 struct e1000_nvm_info {
28378 - struct e1000_nvm_operations ops;
28379 + e1000_nvm_operations_no_const ops;
28380
28381 enum e1000_nvm_type type;
28382 enum e1000_nvm_override override;
28383 diff -urNp linux-3.0.3/drivers/net/hamradio/6pack.c linux-3.0.3/drivers/net/hamradio/6pack.c
28384 --- linux-3.0.3/drivers/net/hamradio/6pack.c 2011-07-21 22:17:23.000000000 -0400
28385 +++ linux-3.0.3/drivers/net/hamradio/6pack.c 2011-08-23 21:48:14.000000000 -0400
28386 @@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
28387 unsigned char buf[512];
28388 int count1;
28389
28390 + pax_track_stack();
28391 +
28392 if (!count)
28393 return;
28394
28395 diff -urNp linux-3.0.3/drivers/net/igb/e1000_hw.h linux-3.0.3/drivers/net/igb/e1000_hw.h
28396 --- linux-3.0.3/drivers/net/igb/e1000_hw.h 2011-07-21 22:17:23.000000000 -0400
28397 +++ linux-3.0.3/drivers/net/igb/e1000_hw.h 2011-08-23 21:47:55.000000000 -0400
28398 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
28399 s32 (*read_mac_addr)(struct e1000_hw *);
28400 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
28401 };
28402 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28403
28404 struct e1000_phy_operations {
28405 s32 (*acquire)(struct e1000_hw *);
28406 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
28407 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
28408 s32 (*write_reg)(struct e1000_hw *, u32, u16);
28409 };
28410 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28411
28412 struct e1000_nvm_operations {
28413 s32 (*acquire)(struct e1000_hw *);
28414 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
28415 s32 (*update)(struct e1000_hw *);
28416 s32 (*validate)(struct e1000_hw *);
28417 };
28418 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28419
28420 struct e1000_info {
28421 s32 (*get_invariants)(struct e1000_hw *);
28422 @@ -350,7 +353,7 @@ struct e1000_info {
28423 extern const struct e1000_info e1000_82575_info;
28424
28425 struct e1000_mac_info {
28426 - struct e1000_mac_operations ops;
28427 + e1000_mac_operations_no_const ops;
28428
28429 u8 addr[6];
28430 u8 perm_addr[6];
28431 @@ -388,7 +391,7 @@ struct e1000_mac_info {
28432 };
28433
28434 struct e1000_phy_info {
28435 - struct e1000_phy_operations ops;
28436 + e1000_phy_operations_no_const ops;
28437
28438 enum e1000_phy_type type;
28439
28440 @@ -423,7 +426,7 @@ struct e1000_phy_info {
28441 };
28442
28443 struct e1000_nvm_info {
28444 - struct e1000_nvm_operations ops;
28445 + e1000_nvm_operations_no_const ops;
28446 enum e1000_nvm_type type;
28447 enum e1000_nvm_override override;
28448
28449 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
28450 s32 (*check_for_ack)(struct e1000_hw *, u16);
28451 s32 (*check_for_rst)(struct e1000_hw *, u16);
28452 };
28453 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28454
28455 struct e1000_mbx_stats {
28456 u32 msgs_tx;
28457 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
28458 };
28459
28460 struct e1000_mbx_info {
28461 - struct e1000_mbx_operations ops;
28462 + e1000_mbx_operations_no_const ops;
28463 struct e1000_mbx_stats stats;
28464 u32 timeout;
28465 u32 usec_delay;
28466 diff -urNp linux-3.0.3/drivers/net/igbvf/vf.h linux-3.0.3/drivers/net/igbvf/vf.h
28467 --- linux-3.0.3/drivers/net/igbvf/vf.h 2011-07-21 22:17:23.000000000 -0400
28468 +++ linux-3.0.3/drivers/net/igbvf/vf.h 2011-08-23 21:47:55.000000000 -0400
28469 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
28470 s32 (*read_mac_addr)(struct e1000_hw *);
28471 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
28472 };
28473 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28474
28475 struct e1000_mac_info {
28476 - struct e1000_mac_operations ops;
28477 + e1000_mac_operations_no_const ops;
28478 u8 addr[6];
28479 u8 perm_addr[6];
28480
28481 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
28482 s32 (*check_for_ack)(struct e1000_hw *);
28483 s32 (*check_for_rst)(struct e1000_hw *);
28484 };
28485 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28486
28487 struct e1000_mbx_stats {
28488 u32 msgs_tx;
28489 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
28490 };
28491
28492 struct e1000_mbx_info {
28493 - struct e1000_mbx_operations ops;
28494 + e1000_mbx_operations_no_const ops;
28495 struct e1000_mbx_stats stats;
28496 u32 timeout;
28497 u32 usec_delay;
28498 diff -urNp linux-3.0.3/drivers/net/ixgb/ixgb_main.c linux-3.0.3/drivers/net/ixgb/ixgb_main.c
28499 --- linux-3.0.3/drivers/net/ixgb/ixgb_main.c 2011-07-21 22:17:23.000000000 -0400
28500 +++ linux-3.0.3/drivers/net/ixgb/ixgb_main.c 2011-08-23 21:48:14.000000000 -0400
28501 @@ -1070,6 +1070,8 @@ ixgb_set_multi(struct net_device *netdev
28502 u32 rctl;
28503 int i;
28504
28505 + pax_track_stack();
28506 +
28507 /* Check for Promiscuous and All Multicast modes */
28508
28509 rctl = IXGB_READ_REG(hw, RCTL);
28510 diff -urNp linux-3.0.3/drivers/net/ixgb/ixgb_param.c linux-3.0.3/drivers/net/ixgb/ixgb_param.c
28511 --- linux-3.0.3/drivers/net/ixgb/ixgb_param.c 2011-07-21 22:17:23.000000000 -0400
28512 +++ linux-3.0.3/drivers/net/ixgb/ixgb_param.c 2011-08-23 21:48:14.000000000 -0400
28513 @@ -261,6 +261,9 @@ void __devinit
28514 ixgb_check_options(struct ixgb_adapter *adapter)
28515 {
28516 int bd = adapter->bd_number;
28517 +
28518 + pax_track_stack();
28519 +
28520 if (bd >= IXGB_MAX_NIC) {
28521 pr_notice("Warning: no configuration for board #%i\n", bd);
28522 pr_notice("Using defaults for all values\n");
28523 diff -urNp linux-3.0.3/drivers/net/ixgbe/ixgbe_type.h linux-3.0.3/drivers/net/ixgbe/ixgbe_type.h
28524 --- linux-3.0.3/drivers/net/ixgbe/ixgbe_type.h 2011-07-21 22:17:23.000000000 -0400
28525 +++ linux-3.0.3/drivers/net/ixgbe/ixgbe_type.h 2011-08-23 21:47:55.000000000 -0400
28526 @@ -2584,6 +2584,7 @@ struct ixgbe_eeprom_operations {
28527 s32 (*update_checksum)(struct ixgbe_hw *);
28528 u16 (*calc_checksum)(struct ixgbe_hw *);
28529 };
28530 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
28531
28532 struct ixgbe_mac_operations {
28533 s32 (*init_hw)(struct ixgbe_hw *);
28534 @@ -2639,6 +2640,7 @@ struct ixgbe_mac_operations {
28535 /* Flow Control */
28536 s32 (*fc_enable)(struct ixgbe_hw *, s32);
28537 };
28538 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28539
28540 struct ixgbe_phy_operations {
28541 s32 (*identify)(struct ixgbe_hw *);
28542 @@ -2658,9 +2660,10 @@ struct ixgbe_phy_operations {
28543 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
28544 s32 (*check_overtemp)(struct ixgbe_hw *);
28545 };
28546 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
28547
28548 struct ixgbe_eeprom_info {
28549 - struct ixgbe_eeprom_operations ops;
28550 + ixgbe_eeprom_operations_no_const ops;
28551 enum ixgbe_eeprom_type type;
28552 u32 semaphore_delay;
28553 u16 word_size;
28554 @@ -2670,7 +2673,7 @@ struct ixgbe_eeprom_info {
28555
28556 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
28557 struct ixgbe_mac_info {
28558 - struct ixgbe_mac_operations ops;
28559 + ixgbe_mac_operations_no_const ops;
28560 enum ixgbe_mac_type type;
28561 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28562 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28563 @@ -2698,7 +2701,7 @@ struct ixgbe_mac_info {
28564 };
28565
28566 struct ixgbe_phy_info {
28567 - struct ixgbe_phy_operations ops;
28568 + ixgbe_phy_operations_no_const ops;
28569 struct mdio_if_info mdio;
28570 enum ixgbe_phy_type type;
28571 u32 id;
28572 @@ -2726,6 +2729,7 @@ struct ixgbe_mbx_operations {
28573 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
28574 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
28575 };
28576 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28577
28578 struct ixgbe_mbx_stats {
28579 u32 msgs_tx;
28580 @@ -2737,7 +2741,7 @@ struct ixgbe_mbx_stats {
28581 };
28582
28583 struct ixgbe_mbx_info {
28584 - struct ixgbe_mbx_operations ops;
28585 + ixgbe_mbx_operations_no_const ops;
28586 struct ixgbe_mbx_stats stats;
28587 u32 timeout;
28588 u32 usec_delay;
28589 diff -urNp linux-3.0.3/drivers/net/ixgbevf/vf.h linux-3.0.3/drivers/net/ixgbevf/vf.h
28590 --- linux-3.0.3/drivers/net/ixgbevf/vf.h 2011-07-21 22:17:23.000000000 -0400
28591 +++ linux-3.0.3/drivers/net/ixgbevf/vf.h 2011-08-23 21:47:55.000000000 -0400
28592 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
28593 s32 (*clear_vfta)(struct ixgbe_hw *);
28594 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
28595 };
28596 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28597
28598 enum ixgbe_mac_type {
28599 ixgbe_mac_unknown = 0,
28600 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
28601 };
28602
28603 struct ixgbe_mac_info {
28604 - struct ixgbe_mac_operations ops;
28605 + ixgbe_mac_operations_no_const ops;
28606 u8 addr[6];
28607 u8 perm_addr[6];
28608
28609 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
28610 s32 (*check_for_ack)(struct ixgbe_hw *);
28611 s32 (*check_for_rst)(struct ixgbe_hw *);
28612 };
28613 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28614
28615 struct ixgbe_mbx_stats {
28616 u32 msgs_tx;
28617 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
28618 };
28619
28620 struct ixgbe_mbx_info {
28621 - struct ixgbe_mbx_operations ops;
28622 + ixgbe_mbx_operations_no_const ops;
28623 struct ixgbe_mbx_stats stats;
28624 u32 timeout;
28625 u32 udelay;
28626 diff -urNp linux-3.0.3/drivers/net/ksz884x.c linux-3.0.3/drivers/net/ksz884x.c
28627 --- linux-3.0.3/drivers/net/ksz884x.c 2011-07-21 22:17:23.000000000 -0400
28628 +++ linux-3.0.3/drivers/net/ksz884x.c 2011-08-23 21:48:14.000000000 -0400
28629 @@ -6534,6 +6534,8 @@ static void netdev_get_ethtool_stats(str
28630 int rc;
28631 u64 counter[TOTAL_PORT_COUNTER_NUM];
28632
28633 + pax_track_stack();
28634 +
28635 mutex_lock(&hw_priv->lock);
28636 n = SWITCH_PORT_NUM;
28637 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
28638 diff -urNp linux-3.0.3/drivers/net/mlx4/main.c linux-3.0.3/drivers/net/mlx4/main.c
28639 --- linux-3.0.3/drivers/net/mlx4/main.c 2011-07-21 22:17:23.000000000 -0400
28640 +++ linux-3.0.3/drivers/net/mlx4/main.c 2011-08-23 21:48:14.000000000 -0400
28641 @@ -40,6 +40,7 @@
28642 #include <linux/dma-mapping.h>
28643 #include <linux/slab.h>
28644 #include <linux/io-mapping.h>
28645 +#include <linux/sched.h>
28646
28647 #include <linux/mlx4/device.h>
28648 #include <linux/mlx4/doorbell.h>
28649 @@ -764,6 +765,8 @@ static int mlx4_init_hca(struct mlx4_dev
28650 u64 icm_size;
28651 int err;
28652
28653 + pax_track_stack();
28654 +
28655 err = mlx4_QUERY_FW(dev);
28656 if (err) {
28657 if (err == -EACCES)
28658 diff -urNp linux-3.0.3/drivers/net/niu.c linux-3.0.3/drivers/net/niu.c
28659 --- linux-3.0.3/drivers/net/niu.c 2011-08-23 21:44:40.000000000 -0400
28660 +++ linux-3.0.3/drivers/net/niu.c 2011-08-23 21:48:14.000000000 -0400
28661 @@ -9056,6 +9056,8 @@ static void __devinit niu_try_msix(struc
28662 int i, num_irqs, err;
28663 u8 first_ldg;
28664
28665 + pax_track_stack();
28666 +
28667 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
28668 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
28669 ldg_num_map[i] = first_ldg + i;
28670 diff -urNp linux-3.0.3/drivers/net/pcnet32.c linux-3.0.3/drivers/net/pcnet32.c
28671 --- linux-3.0.3/drivers/net/pcnet32.c 2011-07-21 22:17:23.000000000 -0400
28672 +++ linux-3.0.3/drivers/net/pcnet32.c 2011-08-23 21:47:55.000000000 -0400
28673 @@ -82,7 +82,7 @@ static int cards_found;
28674 /*
28675 * VLB I/O addresses
28676 */
28677 -static unsigned int pcnet32_portlist[] __initdata =
28678 +static unsigned int pcnet32_portlist[] __devinitdata =
28679 { 0x300, 0x320, 0x340, 0x360, 0 };
28680
28681 static int pcnet32_debug;
28682 @@ -270,7 +270,7 @@ struct pcnet32_private {
28683 struct sk_buff **rx_skbuff;
28684 dma_addr_t *tx_dma_addr;
28685 dma_addr_t *rx_dma_addr;
28686 - struct pcnet32_access a;
28687 + struct pcnet32_access *a;
28688 spinlock_t lock; /* Guard lock */
28689 unsigned int cur_rx, cur_tx; /* The next free ring entry */
28690 unsigned int rx_ring_size; /* current rx ring size */
28691 @@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
28692 u16 val;
28693
28694 netif_wake_queue(dev);
28695 - val = lp->a.read_csr(ioaddr, CSR3);
28696 + val = lp->a->read_csr(ioaddr, CSR3);
28697 val &= 0x00ff;
28698 - lp->a.write_csr(ioaddr, CSR3, val);
28699 + lp->a->write_csr(ioaddr, CSR3, val);
28700 napi_enable(&lp->napi);
28701 }
28702
28703 @@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
28704 r = mii_link_ok(&lp->mii_if);
28705 } else if (lp->chip_version >= PCNET32_79C970A) {
28706 ulong ioaddr = dev->base_addr; /* card base I/O address */
28707 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
28708 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
28709 } else { /* can not detect link on really old chips */
28710 r = 1;
28711 }
28712 @@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct
28713 pcnet32_netif_stop(dev);
28714
28715 spin_lock_irqsave(&lp->lock, flags);
28716 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28717 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28718
28719 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
28720
28721 @@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct
28722 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
28723 {
28724 struct pcnet32_private *lp = netdev_priv(dev);
28725 - struct pcnet32_access *a = &lp->a; /* access to registers */
28726 + struct pcnet32_access *a = lp->a; /* access to registers */
28727 ulong ioaddr = dev->base_addr; /* card base I/O address */
28728 struct sk_buff *skb; /* sk buff */
28729 int x, i; /* counters */
28730 @@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct
28731 pcnet32_netif_stop(dev);
28732
28733 spin_lock_irqsave(&lp->lock, flags);
28734 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28735 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28736
28737 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
28738
28739 /* Reset the PCNET32 */
28740 - lp->a.reset(ioaddr);
28741 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
28742 + lp->a->reset(ioaddr);
28743 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
28744
28745 /* switch pcnet32 to 32bit mode */
28746 - lp->a.write_bcr(ioaddr, 20, 2);
28747 + lp->a->write_bcr(ioaddr, 20, 2);
28748
28749 /* purge & init rings but don't actually restart */
28750 pcnet32_restart(dev, 0x0000);
28751
28752 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28753 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28754
28755 /* Initialize Transmit buffers. */
28756 size = data_len + 15;
28757 @@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct
28758
28759 /* set int loopback in CSR15 */
28760 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
28761 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
28762 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
28763
28764 teststatus = cpu_to_le16(0x8000);
28765 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
28766 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
28767
28768 /* Check status of descriptors */
28769 for (x = 0; x < numbuffs; x++) {
28770 @@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct
28771 }
28772 }
28773
28774 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28775 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28776 wmb();
28777 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
28778 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
28779 @@ -1015,7 +1015,7 @@ clean_up:
28780 pcnet32_restart(dev, CSR0_NORMAL);
28781 } else {
28782 pcnet32_purge_rx_ring(dev);
28783 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
28784 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
28785 }
28786 spin_unlock_irqrestore(&lp->lock, flags);
28787
28788 @@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct ne
28789 enum ethtool_phys_id_state state)
28790 {
28791 struct pcnet32_private *lp = netdev_priv(dev);
28792 - struct pcnet32_access *a = &lp->a;
28793 + struct pcnet32_access *a = lp->a;
28794 ulong ioaddr = dev->base_addr;
28795 unsigned long flags;
28796 int i;
28797 @@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_de
28798 {
28799 int csr5;
28800 struct pcnet32_private *lp = netdev_priv(dev);
28801 - struct pcnet32_access *a = &lp->a;
28802 + struct pcnet32_access *a = lp->a;
28803 ulong ioaddr = dev->base_addr;
28804 int ticks;
28805
28806 @@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_stru
28807 spin_lock_irqsave(&lp->lock, flags);
28808 if (pcnet32_tx(dev)) {
28809 /* reset the chip to clear the error condition, then restart */
28810 - lp->a.reset(ioaddr);
28811 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
28812 + lp->a->reset(ioaddr);
28813 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
28814 pcnet32_restart(dev, CSR0_START);
28815 netif_wake_queue(dev);
28816 }
28817 @@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_stru
28818 __napi_complete(napi);
28819
28820 /* clear interrupt masks */
28821 - val = lp->a.read_csr(ioaddr, CSR3);
28822 + val = lp->a->read_csr(ioaddr, CSR3);
28823 val &= 0x00ff;
28824 - lp->a.write_csr(ioaddr, CSR3, val);
28825 + lp->a->write_csr(ioaddr, CSR3, val);
28826
28827 /* Set interrupt enable. */
28828 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
28829 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
28830
28831 spin_unlock_irqrestore(&lp->lock, flags);
28832 }
28833 @@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_
28834 int i, csr0;
28835 u16 *buff = ptr;
28836 struct pcnet32_private *lp = netdev_priv(dev);
28837 - struct pcnet32_access *a = &lp->a;
28838 + struct pcnet32_access *a = lp->a;
28839 ulong ioaddr = dev->base_addr;
28840 unsigned long flags;
28841
28842 @@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_
28843 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
28844 if (lp->phymask & (1 << j)) {
28845 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
28846 - lp->a.write_bcr(ioaddr, 33,
28847 + lp->a->write_bcr(ioaddr, 33,
28848 (j << 5) | i);
28849 - *buff++ = lp->a.read_bcr(ioaddr, 34);
28850 + *buff++ = lp->a->read_bcr(ioaddr, 34);
28851 }
28852 }
28853 }
28854 @@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int
28855 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
28856 lp->options |= PCNET32_PORT_FD;
28857
28858 - lp->a = *a;
28859 + lp->a = a;
28860
28861 /* prior to register_netdev, dev->name is not yet correct */
28862 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
28863 @@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int
28864 if (lp->mii) {
28865 /* lp->phycount and lp->phymask are set to 0 by memset above */
28866
28867 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
28868 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
28869 /* scan for PHYs */
28870 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
28871 unsigned short id1, id2;
28872 @@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int
28873 pr_info("Found PHY %04x:%04x at address %d\n",
28874 id1, id2, i);
28875 }
28876 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
28877 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
28878 if (lp->phycount > 1)
28879 lp->options |= PCNET32_PORT_MII;
28880 }
28881 @@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_devic
28882 }
28883
28884 /* Reset the PCNET32 */
28885 - lp->a.reset(ioaddr);
28886 + lp->a->reset(ioaddr);
28887
28888 /* switch pcnet32 to 32bit mode */
28889 - lp->a.write_bcr(ioaddr, 20, 2);
28890 + lp->a->write_bcr(ioaddr, 20, 2);
28891
28892 netif_printk(lp, ifup, KERN_DEBUG, dev,
28893 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
28894 @@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_devic
28895 (u32) (lp->init_dma_addr));
28896
28897 /* set/reset autoselect bit */
28898 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
28899 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
28900 if (lp->options & PCNET32_PORT_ASEL)
28901 val |= 2;
28902 - lp->a.write_bcr(ioaddr, 2, val);
28903 + lp->a->write_bcr(ioaddr, 2, val);
28904
28905 /* handle full duplex setting */
28906 if (lp->mii_if.full_duplex) {
28907 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
28908 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
28909 if (lp->options & PCNET32_PORT_FD) {
28910 val |= 1;
28911 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
28912 @@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_devic
28913 if (lp->chip_version == 0x2627)
28914 val |= 3;
28915 }
28916 - lp->a.write_bcr(ioaddr, 9, val);
28917 + lp->a->write_bcr(ioaddr, 9, val);
28918 }
28919
28920 /* set/reset GPSI bit in test register */
28921 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
28922 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
28923 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
28924 val |= 0x10;
28925 - lp->a.write_csr(ioaddr, 124, val);
28926 + lp->a->write_csr(ioaddr, 124, val);
28927
28928 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
28929 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
28930 @@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_devic
28931 * duplex, and/or enable auto negotiation, and clear DANAS
28932 */
28933 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
28934 - lp->a.write_bcr(ioaddr, 32,
28935 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
28936 + lp->a->write_bcr(ioaddr, 32,
28937 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
28938 /* disable Auto Negotiation, set 10Mpbs, HD */
28939 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
28940 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
28941 if (lp->options & PCNET32_PORT_FD)
28942 val |= 0x10;
28943 if (lp->options & PCNET32_PORT_100)
28944 val |= 0x08;
28945 - lp->a.write_bcr(ioaddr, 32, val);
28946 + lp->a->write_bcr(ioaddr, 32, val);
28947 } else {
28948 if (lp->options & PCNET32_PORT_ASEL) {
28949 - lp->a.write_bcr(ioaddr, 32,
28950 - lp->a.read_bcr(ioaddr,
28951 + lp->a->write_bcr(ioaddr, 32,
28952 + lp->a->read_bcr(ioaddr,
28953 32) | 0x0080);
28954 /* enable auto negotiate, setup, disable fd */
28955 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
28956 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
28957 val |= 0x20;
28958 - lp->a.write_bcr(ioaddr, 32, val);
28959 + lp->a->write_bcr(ioaddr, 32, val);
28960 }
28961 }
28962 } else {
28963 @@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_devic
28964 * There is really no good other way to handle multiple PHYs
28965 * other than turning off all automatics
28966 */
28967 - val = lp->a.read_bcr(ioaddr, 2);
28968 - lp->a.write_bcr(ioaddr, 2, val & ~2);
28969 - val = lp->a.read_bcr(ioaddr, 32);
28970 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
28971 + val = lp->a->read_bcr(ioaddr, 2);
28972 + lp->a->write_bcr(ioaddr, 2, val & ~2);
28973 + val = lp->a->read_bcr(ioaddr, 32);
28974 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
28975
28976 if (!(lp->options & PCNET32_PORT_ASEL)) {
28977 /* setup ecmd */
28978 @@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_devic
28979 ethtool_cmd_speed_set(&ecmd,
28980 (lp->options & PCNET32_PORT_100) ?
28981 SPEED_100 : SPEED_10);
28982 - bcr9 = lp->a.read_bcr(ioaddr, 9);
28983 + bcr9 = lp->a->read_bcr(ioaddr, 9);
28984
28985 if (lp->options & PCNET32_PORT_FD) {
28986 ecmd.duplex = DUPLEX_FULL;
28987 @@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_devic
28988 ecmd.duplex = DUPLEX_HALF;
28989 bcr9 |= ~(1 << 0);
28990 }
28991 - lp->a.write_bcr(ioaddr, 9, bcr9);
28992 + lp->a->write_bcr(ioaddr, 9, bcr9);
28993 }
28994
28995 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
28996 @@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_devic
28997
28998 #ifdef DO_DXSUFLO
28999 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
29000 - val = lp->a.read_csr(ioaddr, CSR3);
29001 + val = lp->a->read_csr(ioaddr, CSR3);
29002 val |= 0x40;
29003 - lp->a.write_csr(ioaddr, CSR3, val);
29004 + lp->a->write_csr(ioaddr, CSR3, val);
29005 }
29006 #endif
29007
29008 @@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_devic
29009 napi_enable(&lp->napi);
29010
29011 /* Re-initialize the PCNET32, and start it when done. */
29012 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29013 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29014 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29015 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29016
29017 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29018 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29019 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29020 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29021
29022 netif_start_queue(dev);
29023
29024 @@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_devic
29025
29026 i = 0;
29027 while (i++ < 100)
29028 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29029 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29030 break;
29031 /*
29032 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
29033 * reports that doing so triggers a bug in the '974.
29034 */
29035 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
29036 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
29037
29038 netif_printk(lp, ifup, KERN_DEBUG, dev,
29039 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
29040 i,
29041 (u32) (lp->init_dma_addr),
29042 - lp->a.read_csr(ioaddr, CSR0));
29043 + lp->a->read_csr(ioaddr, CSR0));
29044
29045 spin_unlock_irqrestore(&lp->lock, flags);
29046
29047 @@ -2218,7 +2218,7 @@ err_free_ring:
29048 * Switch back to 16bit mode to avoid problems with dumb
29049 * DOS packet driver after a warm reboot
29050 */
29051 - lp->a.write_bcr(ioaddr, 20, 4);
29052 + lp->a->write_bcr(ioaddr, 20, 4);
29053
29054 err_free_irq:
29055 spin_unlock_irqrestore(&lp->lock, flags);
29056 @@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_d
29057
29058 /* wait for stop */
29059 for (i = 0; i < 100; i++)
29060 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
29061 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
29062 break;
29063
29064 if (i >= 100)
29065 @@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_d
29066 return;
29067
29068 /* ReInit Ring */
29069 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29070 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29071 i = 0;
29072 while (i++ < 1000)
29073 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29074 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29075 break;
29076
29077 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
29078 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
29079 }
29080
29081 static void pcnet32_tx_timeout(struct net_device *dev)
29082 @@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct ne
29083 /* Transmitter timeout, serious problems. */
29084 if (pcnet32_debug & NETIF_MSG_DRV)
29085 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
29086 - dev->name, lp->a.read_csr(ioaddr, CSR0));
29087 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29088 + dev->name, lp->a->read_csr(ioaddr, CSR0));
29089 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29090 dev->stats.tx_errors++;
29091 if (netif_msg_tx_err(lp)) {
29092 int i;
29093 @@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29094
29095 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
29096 "%s() called, csr0 %4.4x\n",
29097 - __func__, lp->a.read_csr(ioaddr, CSR0));
29098 + __func__, lp->a->read_csr(ioaddr, CSR0));
29099
29100 /* Default status -- will not enable Successful-TxDone
29101 * interrupt when that option is available to us.
29102 @@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29103 dev->stats.tx_bytes += skb->len;
29104
29105 /* Trigger an immediate send poll. */
29106 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29107 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29108
29109 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
29110 lp->tx_full = 1;
29111 @@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id)
29112
29113 spin_lock(&lp->lock);
29114
29115 - csr0 = lp->a.read_csr(ioaddr, CSR0);
29116 + csr0 = lp->a->read_csr(ioaddr, CSR0);
29117 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
29118 if (csr0 == 0xffff)
29119 break; /* PCMCIA remove happened */
29120 /* Acknowledge all of the current interrupt sources ASAP. */
29121 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29122 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29123
29124 netif_printk(lp, intr, KERN_DEBUG, dev,
29125 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
29126 - csr0, lp->a.read_csr(ioaddr, CSR0));
29127 + csr0, lp->a->read_csr(ioaddr, CSR0));
29128
29129 /* Log misc errors. */
29130 if (csr0 & 0x4000)
29131 @@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id)
29132 if (napi_schedule_prep(&lp->napi)) {
29133 u16 val;
29134 /* set interrupt masks */
29135 - val = lp->a.read_csr(ioaddr, CSR3);
29136 + val = lp->a->read_csr(ioaddr, CSR3);
29137 val |= 0x5f00;
29138 - lp->a.write_csr(ioaddr, CSR3, val);
29139 + lp->a->write_csr(ioaddr, CSR3, val);
29140
29141 __napi_schedule(&lp->napi);
29142 break;
29143 }
29144 - csr0 = lp->a.read_csr(ioaddr, CSR0);
29145 + csr0 = lp->a->read_csr(ioaddr, CSR0);
29146 }
29147
29148 netif_printk(lp, intr, KERN_DEBUG, dev,
29149 "exiting interrupt, csr0=%#4.4x\n",
29150 - lp->a.read_csr(ioaddr, CSR0));
29151 + lp->a->read_csr(ioaddr, CSR0));
29152
29153 spin_unlock(&lp->lock);
29154
29155 @@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_devi
29156
29157 spin_lock_irqsave(&lp->lock, flags);
29158
29159 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29160 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29161
29162 netif_printk(lp, ifdown, KERN_DEBUG, dev,
29163 "Shutting down ethercard, status was %2.2x\n",
29164 - lp->a.read_csr(ioaddr, CSR0));
29165 + lp->a->read_csr(ioaddr, CSR0));
29166
29167 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
29168 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29169 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29170
29171 /*
29172 * Switch back to 16bit mode to avoid problems with dumb
29173 * DOS packet driver after a warm reboot
29174 */
29175 - lp->a.write_bcr(ioaddr, 20, 4);
29176 + lp->a->write_bcr(ioaddr, 20, 4);
29177
29178 spin_unlock_irqrestore(&lp->lock, flags);
29179
29180 @@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_
29181 unsigned long flags;
29182
29183 spin_lock_irqsave(&lp->lock, flags);
29184 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29185 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29186 spin_unlock_irqrestore(&lp->lock, flags);
29187
29188 return &dev->stats;
29189 @@ -2578,10 +2578,10 @@ static void pcnet32_load_multicast(struc
29190 if (dev->flags & IFF_ALLMULTI) {
29191 ib->filter[0] = cpu_to_le32(~0U);
29192 ib->filter[1] = cpu_to_le32(~0U);
29193 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29194 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29195 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29196 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29197 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29198 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29199 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29200 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29201 return;
29202 }
29203 /* clear the multicast filter */
29204 @@ -2601,7 +2601,7 @@ static void pcnet32_load_multicast(struc
29205 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
29206 }
29207 for (i = 0; i < 4; i++)
29208 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
29209 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
29210 le16_to_cpu(mcast_table[i]));
29211 }
29212
29213 @@ -2616,28 +2616,28 @@ static void pcnet32_set_multicast_list(s
29214
29215 spin_lock_irqsave(&lp->lock, flags);
29216 suspended = pcnet32_suspend(dev, &flags, 0);
29217 - csr15 = lp->a.read_csr(ioaddr, CSR15);
29218 + csr15 = lp->a->read_csr(ioaddr, CSR15);
29219 if (dev->flags & IFF_PROMISC) {
29220 /* Log any net taps. */
29221 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
29222 lp->init_block->mode =
29223 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
29224 7);
29225 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
29226 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
29227 } else {
29228 lp->init_block->mode =
29229 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
29230 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29231 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29232 pcnet32_load_multicast(dev);
29233 }
29234
29235 if (suspended) {
29236 int csr5;
29237 /* clear SUSPEND (SPND) - CSR5 bit 0 */
29238 - csr5 = lp->a.read_csr(ioaddr, CSR5);
29239 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29240 + csr5 = lp->a->read_csr(ioaddr, CSR5);
29241 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29242 } else {
29243 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29244 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29245 pcnet32_restart(dev, CSR0_NORMAL);
29246 netif_wake_queue(dev);
29247 }
29248 @@ -2655,8 +2655,8 @@ static int mdio_read(struct net_device *
29249 if (!lp->mii)
29250 return 0;
29251
29252 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29253 - val_out = lp->a.read_bcr(ioaddr, 34);
29254 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29255 + val_out = lp->a->read_bcr(ioaddr, 34);
29256
29257 return val_out;
29258 }
29259 @@ -2670,8 +2670,8 @@ static void mdio_write(struct net_device
29260 if (!lp->mii)
29261 return;
29262
29263 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29264 - lp->a.write_bcr(ioaddr, 34, val);
29265 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29266 + lp->a->write_bcr(ioaddr, 34, val);
29267 }
29268
29269 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
29270 @@ -2748,7 +2748,7 @@ static void pcnet32_check_media(struct n
29271 curr_link = mii_link_ok(&lp->mii_if);
29272 } else {
29273 ulong ioaddr = dev->base_addr; /* card base I/O address */
29274 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
29275 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
29276 }
29277 if (!curr_link) {
29278 if (prev_link || verbose) {
29279 @@ -2771,13 +2771,13 @@ static void pcnet32_check_media(struct n
29280 (ecmd.duplex == DUPLEX_FULL)
29281 ? "full" : "half");
29282 }
29283 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
29284 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
29285 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
29286 if (lp->mii_if.full_duplex)
29287 bcr9 |= (1 << 0);
29288 else
29289 bcr9 &= ~(1 << 0);
29290 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
29291 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
29292 }
29293 } else {
29294 netif_info(lp, link, dev, "link up\n");
29295 diff -urNp linux-3.0.3/drivers/net/ppp_generic.c linux-3.0.3/drivers/net/ppp_generic.c
29296 --- linux-3.0.3/drivers/net/ppp_generic.c 2011-07-21 22:17:23.000000000 -0400
29297 +++ linux-3.0.3/drivers/net/ppp_generic.c 2011-08-23 21:47:55.000000000 -0400
29298 @@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
29299 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
29300 struct ppp_stats stats;
29301 struct ppp_comp_stats cstats;
29302 - char *vers;
29303
29304 switch (cmd) {
29305 case SIOCGPPPSTATS:
29306 @@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
29307 break;
29308
29309 case SIOCGPPPVER:
29310 - vers = PPP_VERSION;
29311 - if (copy_to_user(addr, vers, strlen(vers) + 1))
29312 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
29313 break;
29314 err = 0;
29315 break;
29316 diff -urNp linux-3.0.3/drivers/net/r8169.c linux-3.0.3/drivers/net/r8169.c
29317 --- linux-3.0.3/drivers/net/r8169.c 2011-08-23 21:44:40.000000000 -0400
29318 +++ linux-3.0.3/drivers/net/r8169.c 2011-08-23 21:47:55.000000000 -0400
29319 @@ -645,12 +645,12 @@ struct rtl8169_private {
29320 struct mdio_ops {
29321 void (*write)(void __iomem *, int, int);
29322 int (*read)(void __iomem *, int);
29323 - } mdio_ops;
29324 + } __no_const mdio_ops;
29325
29326 struct pll_power_ops {
29327 void (*down)(struct rtl8169_private *);
29328 void (*up)(struct rtl8169_private *);
29329 - } pll_power_ops;
29330 + } __no_const pll_power_ops;
29331
29332 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
29333 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
29334 diff -urNp linux-3.0.3/drivers/net/tg3.h linux-3.0.3/drivers/net/tg3.h
29335 --- linux-3.0.3/drivers/net/tg3.h 2011-07-21 22:17:23.000000000 -0400
29336 +++ linux-3.0.3/drivers/net/tg3.h 2011-08-23 21:47:55.000000000 -0400
29337 @@ -134,6 +134,7 @@
29338 #define CHIPREV_ID_5750_A0 0x4000
29339 #define CHIPREV_ID_5750_A1 0x4001
29340 #define CHIPREV_ID_5750_A3 0x4003
29341 +#define CHIPREV_ID_5750_C1 0x4201
29342 #define CHIPREV_ID_5750_C2 0x4202
29343 #define CHIPREV_ID_5752_A0_HW 0x5000
29344 #define CHIPREV_ID_5752_A0 0x6000
29345 diff -urNp linux-3.0.3/drivers/net/tokenring/abyss.c linux-3.0.3/drivers/net/tokenring/abyss.c
29346 --- linux-3.0.3/drivers/net/tokenring/abyss.c 2011-07-21 22:17:23.000000000 -0400
29347 +++ linux-3.0.3/drivers/net/tokenring/abyss.c 2011-08-23 21:47:55.000000000 -0400
29348 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
29349
29350 static int __init abyss_init (void)
29351 {
29352 - abyss_netdev_ops = tms380tr_netdev_ops;
29353 + pax_open_kernel();
29354 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29355
29356 - abyss_netdev_ops.ndo_open = abyss_open;
29357 - abyss_netdev_ops.ndo_stop = abyss_close;
29358 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
29359 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
29360 + pax_close_kernel();
29361
29362 return pci_register_driver(&abyss_driver);
29363 }
29364 diff -urNp linux-3.0.3/drivers/net/tokenring/madgemc.c linux-3.0.3/drivers/net/tokenring/madgemc.c
29365 --- linux-3.0.3/drivers/net/tokenring/madgemc.c 2011-07-21 22:17:23.000000000 -0400
29366 +++ linux-3.0.3/drivers/net/tokenring/madgemc.c 2011-08-23 21:47:55.000000000 -0400
29367 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver
29368
29369 static int __init madgemc_init (void)
29370 {
29371 - madgemc_netdev_ops = tms380tr_netdev_ops;
29372 - madgemc_netdev_ops.ndo_open = madgemc_open;
29373 - madgemc_netdev_ops.ndo_stop = madgemc_close;
29374 + pax_open_kernel();
29375 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29376 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
29377 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
29378 + pax_close_kernel();
29379
29380 return mca_register_driver (&madgemc_driver);
29381 }
29382 diff -urNp linux-3.0.3/drivers/net/tokenring/proteon.c linux-3.0.3/drivers/net/tokenring/proteon.c
29383 --- linux-3.0.3/drivers/net/tokenring/proteon.c 2011-07-21 22:17:23.000000000 -0400
29384 +++ linux-3.0.3/drivers/net/tokenring/proteon.c 2011-08-23 21:47:55.000000000 -0400
29385 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
29386 struct platform_device *pdev;
29387 int i, num = 0, err = 0;
29388
29389 - proteon_netdev_ops = tms380tr_netdev_ops;
29390 - proteon_netdev_ops.ndo_open = proteon_open;
29391 - proteon_netdev_ops.ndo_stop = tms380tr_close;
29392 + pax_open_kernel();
29393 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29394 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
29395 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
29396 + pax_close_kernel();
29397
29398 err = platform_driver_register(&proteon_driver);
29399 if (err)
29400 diff -urNp linux-3.0.3/drivers/net/tokenring/skisa.c linux-3.0.3/drivers/net/tokenring/skisa.c
29401 --- linux-3.0.3/drivers/net/tokenring/skisa.c 2011-07-21 22:17:23.000000000 -0400
29402 +++ linux-3.0.3/drivers/net/tokenring/skisa.c 2011-08-23 21:47:55.000000000 -0400
29403 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
29404 struct platform_device *pdev;
29405 int i, num = 0, err = 0;
29406
29407 - sk_isa_netdev_ops = tms380tr_netdev_ops;
29408 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
29409 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29410 + pax_open_kernel();
29411 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29412 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
29413 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29414 + pax_close_kernel();
29415
29416 err = platform_driver_register(&sk_isa_driver);
29417 if (err)
29418 diff -urNp linux-3.0.3/drivers/net/tulip/de2104x.c linux-3.0.3/drivers/net/tulip/de2104x.c
29419 --- linux-3.0.3/drivers/net/tulip/de2104x.c 2011-07-21 22:17:23.000000000 -0400
29420 +++ linux-3.0.3/drivers/net/tulip/de2104x.c 2011-08-23 21:48:14.000000000 -0400
29421 @@ -1794,6 +1794,8 @@ static void __devinit de21041_get_srom_i
29422 struct de_srom_info_leaf *il;
29423 void *bufp;
29424
29425 + pax_track_stack();
29426 +
29427 /* download entire eeprom */
29428 for (i = 0; i < DE_EEPROM_WORDS; i++)
29429 ((__le16 *)ee_data)[i] =
29430 diff -urNp linux-3.0.3/drivers/net/tulip/de4x5.c linux-3.0.3/drivers/net/tulip/de4x5.c
29431 --- linux-3.0.3/drivers/net/tulip/de4x5.c 2011-07-21 22:17:23.000000000 -0400
29432 +++ linux-3.0.3/drivers/net/tulip/de4x5.c 2011-08-23 21:47:55.000000000 -0400
29433 @@ -5401,7 +5401,7 @@ de4x5_ioctl(struct net_device *dev, stru
29434 for (i=0; i<ETH_ALEN; i++) {
29435 tmp.addr[i] = dev->dev_addr[i];
29436 }
29437 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29438 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29439 break;
29440
29441 case DE4X5_SET_HWADDR: /* Set the hardware address */
29442 @@ -5441,7 +5441,7 @@ de4x5_ioctl(struct net_device *dev, stru
29443 spin_lock_irqsave(&lp->lock, flags);
29444 memcpy(&statbuf, &lp->pktStats, ioc->len);
29445 spin_unlock_irqrestore(&lp->lock, flags);
29446 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
29447 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
29448 return -EFAULT;
29449 break;
29450 }
29451 diff -urNp linux-3.0.3/drivers/net/usb/hso.c linux-3.0.3/drivers/net/usb/hso.c
29452 --- linux-3.0.3/drivers/net/usb/hso.c 2011-07-21 22:17:23.000000000 -0400
29453 +++ linux-3.0.3/drivers/net/usb/hso.c 2011-08-23 21:47:55.000000000 -0400
29454 @@ -71,7 +71,7 @@
29455 #include <asm/byteorder.h>
29456 #include <linux/serial_core.h>
29457 #include <linux/serial.h>
29458 -
29459 +#include <asm/local.h>
29460
29461 #define MOD_AUTHOR "Option Wireless"
29462 #define MOD_DESCRIPTION "USB High Speed Option driver"
29463 @@ -257,7 +257,7 @@ struct hso_serial {
29464
29465 /* from usb_serial_port */
29466 struct tty_struct *tty;
29467 - int open_count;
29468 + local_t open_count;
29469 spinlock_t serial_lock;
29470
29471 int (*write_data) (struct hso_serial *serial);
29472 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
29473 struct urb *urb;
29474
29475 urb = serial->rx_urb[0];
29476 - if (serial->open_count > 0) {
29477 + if (local_read(&serial->open_count) > 0) {
29478 count = put_rxbuf_data(urb, serial);
29479 if (count == -1)
29480 return;
29481 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
29482 DUMP1(urb->transfer_buffer, urb->actual_length);
29483
29484 /* Anyone listening? */
29485 - if (serial->open_count == 0)
29486 + if (local_read(&serial->open_count) == 0)
29487 return;
29488
29489 if (status == 0) {
29490 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
29491 spin_unlock_irq(&serial->serial_lock);
29492
29493 /* check for port already opened, if not set the termios */
29494 - serial->open_count++;
29495 - if (serial->open_count == 1) {
29496 + if (local_inc_return(&serial->open_count) == 1) {
29497 serial->rx_state = RX_IDLE;
29498 /* Force default termio settings */
29499 _hso_serial_set_termios(tty, NULL);
29500 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
29501 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
29502 if (result) {
29503 hso_stop_serial_device(serial->parent);
29504 - serial->open_count--;
29505 + local_dec(&serial->open_count);
29506 kref_put(&serial->parent->ref, hso_serial_ref_free);
29507 }
29508 } else {
29509 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
29510
29511 /* reset the rts and dtr */
29512 /* do the actual close */
29513 - serial->open_count--;
29514 + local_dec(&serial->open_count);
29515
29516 - if (serial->open_count <= 0) {
29517 - serial->open_count = 0;
29518 + if (local_read(&serial->open_count) <= 0) {
29519 + local_set(&serial->open_count, 0);
29520 spin_lock_irq(&serial->serial_lock);
29521 if (serial->tty == tty) {
29522 serial->tty->driver_data = NULL;
29523 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
29524
29525 /* the actual setup */
29526 spin_lock_irqsave(&serial->serial_lock, flags);
29527 - if (serial->open_count)
29528 + if (local_read(&serial->open_count))
29529 _hso_serial_set_termios(tty, old);
29530 else
29531 tty->termios = old;
29532 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
29533 D1("Pending read interrupt on port %d\n", i);
29534 spin_lock(&serial->serial_lock);
29535 if (serial->rx_state == RX_IDLE &&
29536 - serial->open_count > 0) {
29537 + local_read(&serial->open_count) > 0) {
29538 /* Setup and send a ctrl req read on
29539 * port i */
29540 if (!serial->rx_urb_filled[0]) {
29541 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interfa
29542 /* Start all serial ports */
29543 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
29544 if (serial_table[i] && (serial_table[i]->interface == iface)) {
29545 - if (dev2ser(serial_table[i])->open_count) {
29546 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
29547 result =
29548 hso_start_serial_device(serial_table[i], GFP_NOIO);
29549 hso_kick_transmit(dev2ser(serial_table[i]));
29550 diff -urNp linux-3.0.3/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-3.0.3/drivers/net/vmxnet3/vmxnet3_ethtool.c
29551 --- linux-3.0.3/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-07-21 22:17:23.000000000 -0400
29552 +++ linux-3.0.3/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-08-23 21:47:55.000000000 -0400
29553 @@ -594,8 +594,7 @@ vmxnet3_set_rss_indir(struct net_device
29554 * Return with error code if any of the queue indices
29555 * is out of range
29556 */
29557 - if (p->ring_index[i] < 0 ||
29558 - p->ring_index[i] >= adapter->num_rx_queues)
29559 + if (p->ring_index[i] >= adapter->num_rx_queues)
29560 return -EINVAL;
29561 }
29562
29563 diff -urNp linux-3.0.3/drivers/net/vxge/vxge-config.h linux-3.0.3/drivers/net/vxge/vxge-config.h
29564 --- linux-3.0.3/drivers/net/vxge/vxge-config.h 2011-07-21 22:17:23.000000000 -0400
29565 +++ linux-3.0.3/drivers/net/vxge/vxge-config.h 2011-08-23 21:47:55.000000000 -0400
29566 @@ -512,7 +512,7 @@ struct vxge_hw_uld_cbs {
29567 void (*link_down)(struct __vxge_hw_device *devh);
29568 void (*crit_err)(struct __vxge_hw_device *devh,
29569 enum vxge_hw_event type, u64 ext_data);
29570 -};
29571 +} __no_const;
29572
29573 /*
29574 * struct __vxge_hw_blockpool_entry - Block private data structure
29575 diff -urNp linux-3.0.3/drivers/net/vxge/vxge-main.c linux-3.0.3/drivers/net/vxge/vxge-main.c
29576 --- linux-3.0.3/drivers/net/vxge/vxge-main.c 2011-07-21 22:17:23.000000000 -0400
29577 +++ linux-3.0.3/drivers/net/vxge/vxge-main.c 2011-08-23 21:48:14.000000000 -0400
29578 @@ -98,6 +98,8 @@ static inline void VXGE_COMPLETE_VPATH_T
29579 struct sk_buff *completed[NR_SKB_COMPLETED];
29580 int more;
29581
29582 + pax_track_stack();
29583 +
29584 do {
29585 more = 0;
29586 skb_ptr = completed;
29587 @@ -1920,6 +1922,8 @@ static enum vxge_hw_status vxge_rth_conf
29588 u8 mtable[256] = {0}; /* CPU to vpath mapping */
29589 int index;
29590
29591 + pax_track_stack();
29592 +
29593 /*
29594 * Filling
29595 * - itable with bucket numbers
29596 diff -urNp linux-3.0.3/drivers/net/vxge/vxge-traffic.h linux-3.0.3/drivers/net/vxge/vxge-traffic.h
29597 --- linux-3.0.3/drivers/net/vxge/vxge-traffic.h 2011-07-21 22:17:23.000000000 -0400
29598 +++ linux-3.0.3/drivers/net/vxge/vxge-traffic.h 2011-08-23 21:47:55.000000000 -0400
29599 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
29600 struct vxge_hw_mempool_dma *dma_object,
29601 u32 index,
29602 u32 is_last);
29603 -};
29604 +} __no_const;
29605
29606 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
29607 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
29608 diff -urNp linux-3.0.3/drivers/net/wan/cycx_x25.c linux-3.0.3/drivers/net/wan/cycx_x25.c
29609 --- linux-3.0.3/drivers/net/wan/cycx_x25.c 2011-07-21 22:17:23.000000000 -0400
29610 +++ linux-3.0.3/drivers/net/wan/cycx_x25.c 2011-08-23 21:48:14.000000000 -0400
29611 @@ -1018,6 +1018,8 @@ static void hex_dump(char *msg, unsigned
29612 unsigned char hex[1024],
29613 * phex = hex;
29614
29615 + pax_track_stack();
29616 +
29617 if (len >= (sizeof(hex) / 2))
29618 len = (sizeof(hex) / 2) - 1;
29619
29620 diff -urNp linux-3.0.3/drivers/net/wan/hdlc_x25.c linux-3.0.3/drivers/net/wan/hdlc_x25.c
29621 --- linux-3.0.3/drivers/net/wan/hdlc_x25.c 2011-07-21 22:17:23.000000000 -0400
29622 +++ linux-3.0.3/drivers/net/wan/hdlc_x25.c 2011-08-23 21:47:55.000000000 -0400
29623 @@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
29624
29625 static int x25_open(struct net_device *dev)
29626 {
29627 - struct lapb_register_struct cb;
29628 + static struct lapb_register_struct cb = {
29629 + .connect_confirmation = x25_connected,
29630 + .connect_indication = x25_connected,
29631 + .disconnect_confirmation = x25_disconnected,
29632 + .disconnect_indication = x25_disconnected,
29633 + .data_indication = x25_data_indication,
29634 + .data_transmit = x25_data_transmit
29635 + };
29636 int result;
29637
29638 - cb.connect_confirmation = x25_connected;
29639 - cb.connect_indication = x25_connected;
29640 - cb.disconnect_confirmation = x25_disconnected;
29641 - cb.disconnect_indication = x25_disconnected;
29642 - cb.data_indication = x25_data_indication;
29643 - cb.data_transmit = x25_data_transmit;
29644 -
29645 result = lapb_register(dev, &cb);
29646 if (result != LAPB_OK)
29647 return result;
29648 diff -urNp linux-3.0.3/drivers/net/wimax/i2400m/usb-fw.c linux-3.0.3/drivers/net/wimax/i2400m/usb-fw.c
29649 --- linux-3.0.3/drivers/net/wimax/i2400m/usb-fw.c 2011-07-21 22:17:23.000000000 -0400
29650 +++ linux-3.0.3/drivers/net/wimax/i2400m/usb-fw.c 2011-08-23 21:48:14.000000000 -0400
29651 @@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
29652 int do_autopm = 1;
29653 DECLARE_COMPLETION_ONSTACK(notif_completion);
29654
29655 + pax_track_stack();
29656 +
29657 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
29658 i2400m, ack, ack_size);
29659 BUG_ON(_ack == i2400m->bm_ack_buf);
29660 diff -urNp linux-3.0.3/drivers/net/wireless/airo.c linux-3.0.3/drivers/net/wireless/airo.c
29661 --- linux-3.0.3/drivers/net/wireless/airo.c 2011-08-23 21:44:40.000000000 -0400
29662 +++ linux-3.0.3/drivers/net/wireless/airo.c 2011-08-23 21:48:14.000000000 -0400
29663 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
29664 BSSListElement * loop_net;
29665 BSSListElement * tmp_net;
29666
29667 + pax_track_stack();
29668 +
29669 /* Blow away current list of scan results */
29670 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
29671 list_move_tail (&loop_net->list, &ai->network_free_list);
29672 @@ -3794,6 +3796,8 @@ static u16 setup_card(struct airo_info *
29673 WepKeyRid wkr;
29674 int rc;
29675
29676 + pax_track_stack();
29677 +
29678 memset( &mySsid, 0, sizeof( mySsid ) );
29679 kfree (ai->flash);
29680 ai->flash = NULL;
29681 @@ -4753,6 +4757,8 @@ static int proc_stats_rid_open( struct i
29682 __le32 *vals = stats.vals;
29683 int len;
29684
29685 + pax_track_stack();
29686 +
29687 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
29688 return -ENOMEM;
29689 data = file->private_data;
29690 @@ -5476,6 +5482,8 @@ static int proc_BSSList_open( struct ino
29691 /* If doLoseSync is not 1, we won't do a Lose Sync */
29692 int doLoseSync = -1;
29693
29694 + pax_track_stack();
29695 +
29696 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
29697 return -ENOMEM;
29698 data = file->private_data;
29699 @@ -7181,6 +7189,8 @@ static int airo_get_aplist(struct net_de
29700 int i;
29701 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
29702
29703 + pax_track_stack();
29704 +
29705 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
29706 if (!qual)
29707 return -ENOMEM;
29708 @@ -7741,6 +7751,8 @@ static void airo_read_wireless_stats(str
29709 CapabilityRid cap_rid;
29710 __le32 *vals = stats_rid.vals;
29711
29712 + pax_track_stack();
29713 +
29714 /* Get stats out of the card */
29715 clear_bit(JOB_WSTATS, &local->jobs);
29716 if (local->power.event) {
29717 diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath5k/debug.c linux-3.0.3/drivers/net/wireless/ath/ath5k/debug.c
29718 --- linux-3.0.3/drivers/net/wireless/ath/ath5k/debug.c 2011-07-21 22:17:23.000000000 -0400
29719 +++ linux-3.0.3/drivers/net/wireless/ath/ath5k/debug.c 2011-08-23 21:48:14.000000000 -0400
29720 @@ -204,6 +204,8 @@ static ssize_t read_file_beacon(struct f
29721 unsigned int v;
29722 u64 tsf;
29723
29724 + pax_track_stack();
29725 +
29726 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
29727 len += snprintf(buf+len, sizeof(buf)-len,
29728 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
29729 @@ -323,6 +325,8 @@ static ssize_t read_file_debug(struct fi
29730 unsigned int len = 0;
29731 unsigned int i;
29732
29733 + pax_track_stack();
29734 +
29735 len += snprintf(buf+len, sizeof(buf)-len,
29736 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
29737
29738 @@ -384,6 +388,8 @@ static ssize_t read_file_antenna(struct
29739 unsigned int i;
29740 unsigned int v;
29741
29742 + pax_track_stack();
29743 +
29744 len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
29745 sc->ah->ah_ant_mode);
29746 len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
29747 @@ -494,6 +500,8 @@ static ssize_t read_file_misc(struct fil
29748 unsigned int len = 0;
29749 u32 filt = ath5k_hw_get_rx_filter(sc->ah);
29750
29751 + pax_track_stack();
29752 +
29753 len += snprintf(buf+len, sizeof(buf)-len, "bssid-mask: %pM\n",
29754 sc->bssidmask);
29755 len += snprintf(buf+len, sizeof(buf)-len, "filter-flags: 0x%x ",
29756 @@ -550,6 +558,8 @@ static ssize_t read_file_frameerrors(str
29757 unsigned int len = 0;
29758 int i;
29759
29760 + pax_track_stack();
29761 +
29762 len += snprintf(buf+len, sizeof(buf)-len,
29763 "RX\n---------------------\n");
29764 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
29765 @@ -667,6 +677,8 @@ static ssize_t read_file_ani(struct file
29766 char buf[700];
29767 unsigned int len = 0;
29768
29769 + pax_track_stack();
29770 +
29771 len += snprintf(buf+len, sizeof(buf)-len,
29772 "HW has PHY error counters:\t%s\n",
29773 sc->ah->ah_capabilities.cap_has_phyerr_counters ?
29774 @@ -827,6 +839,8 @@ static ssize_t read_file_queue(struct fi
29775 struct ath5k_buf *bf, *bf0;
29776 int i, n;
29777
29778 + pax_track_stack();
29779 +
29780 len += snprintf(buf+len, sizeof(buf)-len,
29781 "available txbuffers: %d\n", sc->txbuf_len);
29782
29783 diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_calib.c
29784 --- linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-07-21 22:17:23.000000000 -0400
29785 +++ linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-08-23 21:48:14.000000000 -0400
29786 @@ -757,6 +757,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
29787 int i, im, j;
29788 int nmeasurement;
29789
29790 + pax_track_stack();
29791 +
29792 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
29793 if (ah->txchainmask & (1 << i))
29794 num_chains++;
29795 diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
29796 --- linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-07-21 22:17:23.000000000 -0400
29797 +++ linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-08-23 21:48:14.000000000 -0400
29798 @@ -356,6 +356,8 @@ static bool create_pa_curve(u32 *data_L,
29799 int theta_low_bin = 0;
29800 int i;
29801
29802 + pax_track_stack();
29803 +
29804 /* disregard any bin that contains <= 16 samples */
29805 thresh_accum_cnt = 16;
29806 scale_factor = 5;
29807 diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath9k/debug.c linux-3.0.3/drivers/net/wireless/ath/ath9k/debug.c
29808 --- linux-3.0.3/drivers/net/wireless/ath/ath9k/debug.c 2011-07-21 22:17:23.000000000 -0400
29809 +++ linux-3.0.3/drivers/net/wireless/ath/ath9k/debug.c 2011-08-23 21:48:14.000000000 -0400
29810 @@ -337,6 +337,8 @@ static ssize_t read_file_interrupt(struc
29811 char buf[512];
29812 unsigned int len = 0;
29813
29814 + pax_track_stack();
29815 +
29816 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
29817 len += snprintf(buf + len, sizeof(buf) - len,
29818 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
29819 @@ -427,6 +429,8 @@ static ssize_t read_file_wiphy(struct fi
29820 u8 addr[ETH_ALEN];
29821 u32 tmp;
29822
29823 + pax_track_stack();
29824 +
29825 len += snprintf(buf + len, sizeof(buf) - len,
29826 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
29827 wiphy_name(sc->hw->wiphy),
29828 diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath9k/htc_drv_debug.c linux-3.0.3/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
29829 --- linux-3.0.3/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-07-21 22:17:23.000000000 -0400
29830 +++ linux-3.0.3/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-08-23 21:48:14.000000000 -0400
29831 @@ -31,6 +31,8 @@ static ssize_t read_file_tgt_int_stats(s
29832 unsigned int len = 0;
29833 int ret = 0;
29834
29835 + pax_track_stack();
29836 +
29837 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
29838
29839 ath9k_htc_ps_wakeup(priv);
29840 @@ -89,6 +91,8 @@ static ssize_t read_file_tgt_tx_stats(st
29841 unsigned int len = 0;
29842 int ret = 0;
29843
29844 + pax_track_stack();
29845 +
29846 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
29847
29848 ath9k_htc_ps_wakeup(priv);
29849 @@ -159,6 +163,8 @@ static ssize_t read_file_tgt_rx_stats(st
29850 unsigned int len = 0;
29851 int ret = 0;
29852
29853 + pax_track_stack();
29854 +
29855 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
29856
29857 ath9k_htc_ps_wakeup(priv);
29858 @@ -203,6 +209,8 @@ static ssize_t read_file_xmit(struct fil
29859 char buf[512];
29860 unsigned int len = 0;
29861
29862 + pax_track_stack();
29863 +
29864 len += snprintf(buf + len, sizeof(buf) - len,
29865 "%20s : %10u\n", "Buffers queued",
29866 priv->debug.tx_stats.buf_queued);
29867 @@ -376,6 +384,8 @@ static ssize_t read_file_slot(struct fil
29868 char buf[512];
29869 unsigned int len = 0;
29870
29871 + pax_track_stack();
29872 +
29873 spin_lock_bh(&priv->tx.tx_lock);
29874
29875 len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
29876 @@ -411,6 +421,8 @@ static ssize_t read_file_queue(struct fi
29877 char buf[512];
29878 unsigned int len = 0;
29879
29880 + pax_track_stack();
29881 +
29882 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
29883 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
29884
29885 diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath9k/hw.h linux-3.0.3/drivers/net/wireless/ath/ath9k/hw.h
29886 --- linux-3.0.3/drivers/net/wireless/ath/ath9k/hw.h 2011-08-23 21:44:40.000000000 -0400
29887 +++ linux-3.0.3/drivers/net/wireless/ath/ath9k/hw.h 2011-08-23 21:47:55.000000000 -0400
29888 @@ -585,7 +585,7 @@ struct ath_hw_private_ops {
29889
29890 /* ANI */
29891 void (*ani_cache_ini_regs)(struct ath_hw *ah);
29892 -};
29893 +} __no_const;
29894
29895 /**
29896 * struct ath_hw_ops - callbacks used by hardware code and driver code
29897 @@ -637,7 +637,7 @@ struct ath_hw_ops {
29898 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
29899 struct ath_hw_antcomb_conf *antconf);
29900
29901 -};
29902 +} __no_const;
29903
29904 struct ath_nf_limits {
29905 s16 max;
29906 @@ -650,7 +650,7 @@ struct ath_nf_limits {
29907 #define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
29908
29909 struct ath_hw {
29910 - struct ath_ops reg_ops;
29911 + ath_ops_no_const reg_ops;
29912
29913 struct ieee80211_hw *hw;
29914 struct ath_common common;
29915 diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath.h linux-3.0.3/drivers/net/wireless/ath/ath.h
29916 --- linux-3.0.3/drivers/net/wireless/ath/ath.h 2011-07-21 22:17:23.000000000 -0400
29917 +++ linux-3.0.3/drivers/net/wireless/ath/ath.h 2011-08-23 21:47:55.000000000 -0400
29918 @@ -121,6 +121,7 @@ struct ath_ops {
29919 void (*write_flush) (void *);
29920 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
29921 };
29922 +typedef struct ath_ops __no_const ath_ops_no_const;
29923
29924 struct ath_common;
29925 struct ath_bus_ops;
29926 diff -urNp linux-3.0.3/drivers/net/wireless/ipw2x00/ipw2100.c linux-3.0.3/drivers/net/wireless/ipw2x00/ipw2100.c
29927 --- linux-3.0.3/drivers/net/wireless/ipw2x00/ipw2100.c 2011-07-21 22:17:23.000000000 -0400
29928 +++ linux-3.0.3/drivers/net/wireless/ipw2x00/ipw2100.c 2011-08-23 21:48:14.000000000 -0400
29929 @@ -2100,6 +2100,8 @@ static int ipw2100_set_essid(struct ipw2
29930 int err;
29931 DECLARE_SSID_BUF(ssid);
29932
29933 + pax_track_stack();
29934 +
29935 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
29936
29937 if (ssid_len)
29938 @@ -5449,6 +5451,8 @@ static int ipw2100_set_key(struct ipw210
29939 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
29940 int err;
29941
29942 + pax_track_stack();
29943 +
29944 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
29945 idx, keylen, len);
29946
29947 diff -urNp linux-3.0.3/drivers/net/wireless/ipw2x00/libipw_rx.c linux-3.0.3/drivers/net/wireless/ipw2x00/libipw_rx.c
29948 --- linux-3.0.3/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-07-21 22:17:23.000000000 -0400
29949 +++ linux-3.0.3/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-08-23 21:48:14.000000000 -0400
29950 @@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
29951 unsigned long flags;
29952 DECLARE_SSID_BUF(ssid);
29953
29954 + pax_track_stack();
29955 +
29956 LIBIPW_DEBUG_SCAN("'%s' (%pM"
29957 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
29958 print_ssid(ssid, info_element->data, info_element->len),
29959 diff -urNp linux-3.0.3/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-3.0.3/drivers/net/wireless/iwlegacy/iwl3945-base.c
29960 --- linux-3.0.3/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-07-21 22:17:23.000000000 -0400
29961 +++ linux-3.0.3/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-08-23 21:47:55.000000000 -0400
29962 @@ -3962,7 +3962,9 @@ static int iwl3945_pci_probe(struct pci_
29963 */
29964 if (iwl3945_mod_params.disable_hw_scan) {
29965 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
29966 - iwl3945_hw_ops.hw_scan = NULL;
29967 + pax_open_kernel();
29968 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
29969 + pax_close_kernel();
29970 }
29971
29972 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
29973 diff -urNp linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
29974 --- linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-07-21 22:17:23.000000000 -0400
29975 +++ linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-08-23 21:48:14.000000000 -0400
29976 @@ -910,6 +910,8 @@ static void rs_tx_status(void *priv_r, s
29977 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
29978 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
29979
29980 + pax_track_stack();
29981 +
29982 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
29983
29984 /* Treat uninitialized rate scaling data same as non-existing. */
29985 @@ -2918,6 +2920,8 @@ static void rs_fill_link_cmd(struct iwl_
29986 container_of(lq_sta, struct iwl_station_priv, lq_sta);
29987 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
29988
29989 + pax_track_stack();
29990 +
29991 /* Override starting rate (index 0) if needed for debug purposes */
29992 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
29993
29994 diff -urNp linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debugfs.c
29995 --- linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-07-21 22:17:23.000000000 -0400
29996 +++ linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-08-23 21:48:14.000000000 -0400
29997 @@ -548,6 +548,8 @@ static ssize_t iwl_dbgfs_status_read(str
29998 int pos = 0;
29999 const size_t bufsz = sizeof(buf);
30000
30001 + pax_track_stack();
30002 +
30003 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
30004 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
30005 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
30006 @@ -680,6 +682,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
30007 char buf[256 * NUM_IWL_RXON_CTX];
30008 const size_t bufsz = sizeof(buf);
30009
30010 + pax_track_stack();
30011 +
30012 for_each_context(priv, ctx) {
30013 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
30014 ctx->ctxid);
30015 diff -urNp linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debug.h linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debug.h
30016 --- linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-07-21 22:17:23.000000000 -0400
30017 +++ linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-08-23 21:47:55.000000000 -0400
30018 @@ -68,8 +68,8 @@ do {
30019 } while (0)
30020
30021 #else
30022 -#define IWL_DEBUG(__priv, level, fmt, args...)
30023 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
30024 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
30025 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
30026 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
30027 const void *p, u32 len)
30028 {}
30029 diff -urNp linux-3.0.3/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-3.0.3/drivers/net/wireless/iwmc3200wifi/debugfs.c
30030 --- linux-3.0.3/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-07-21 22:17:23.000000000 -0400
30031 +++ linux-3.0.3/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-08-23 21:48:14.000000000 -0400
30032 @@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
30033 int buf_len = 512;
30034 size_t len = 0;
30035
30036 + pax_track_stack();
30037 +
30038 if (*ppos != 0)
30039 return 0;
30040 if (count < sizeof(buf))
30041 diff -urNp linux-3.0.3/drivers/net/wireless/mac80211_hwsim.c linux-3.0.3/drivers/net/wireless/mac80211_hwsim.c
30042 --- linux-3.0.3/drivers/net/wireless/mac80211_hwsim.c 2011-07-21 22:17:23.000000000 -0400
30043 +++ linux-3.0.3/drivers/net/wireless/mac80211_hwsim.c 2011-08-23 21:47:55.000000000 -0400
30044 @@ -1260,9 +1260,11 @@ static int __init init_mac80211_hwsim(vo
30045 return -EINVAL;
30046
30047 if (fake_hw_scan) {
30048 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30049 - mac80211_hwsim_ops.sw_scan_start = NULL;
30050 - mac80211_hwsim_ops.sw_scan_complete = NULL;
30051 + pax_open_kernel();
30052 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30053 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
30054 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
30055 + pax_close_kernel();
30056 }
30057
30058 spin_lock_init(&hwsim_radio_lock);
30059 diff -urNp linux-3.0.3/drivers/net/wireless/rndis_wlan.c linux-3.0.3/drivers/net/wireless/rndis_wlan.c
30060 --- linux-3.0.3/drivers/net/wireless/rndis_wlan.c 2011-07-21 22:17:23.000000000 -0400
30061 +++ linux-3.0.3/drivers/net/wireless/rndis_wlan.c 2011-08-23 21:47:55.000000000 -0400
30062 @@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
30063
30064 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
30065
30066 - if (rts_threshold < 0 || rts_threshold > 2347)
30067 + if (rts_threshold > 2347)
30068 rts_threshold = 2347;
30069
30070 tmp = cpu_to_le32(rts_threshold);
30071 diff -urNp linux-3.0.3/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-3.0.3/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
30072 --- linux-3.0.3/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-07-21 22:17:23.000000000 -0400
30073 +++ linux-3.0.3/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-08-23 21:48:14.000000000 -0400
30074 @@ -837,6 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(st
30075 u8 rfpath;
30076 u8 num_total_rfpath = rtlphy->num_total_rfpath;
30077
30078 + pax_track_stack();
30079 +
30080 precommoncmdcnt = 0;
30081 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
30082 MAX_PRECMD_CNT,
30083 diff -urNp linux-3.0.3/drivers/net/wireless/wl1251/wl1251.h linux-3.0.3/drivers/net/wireless/wl1251/wl1251.h
30084 --- linux-3.0.3/drivers/net/wireless/wl1251/wl1251.h 2011-07-21 22:17:23.000000000 -0400
30085 +++ linux-3.0.3/drivers/net/wireless/wl1251/wl1251.h 2011-08-23 21:47:55.000000000 -0400
30086 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
30087 void (*reset)(struct wl1251 *wl);
30088 void (*enable_irq)(struct wl1251 *wl);
30089 void (*disable_irq)(struct wl1251 *wl);
30090 -};
30091 +} __no_const;
30092
30093 struct wl1251 {
30094 struct ieee80211_hw *hw;
30095 diff -urNp linux-3.0.3/drivers/net/wireless/wl12xx/spi.c linux-3.0.3/drivers/net/wireless/wl12xx/spi.c
30096 --- linux-3.0.3/drivers/net/wireless/wl12xx/spi.c 2011-07-21 22:17:23.000000000 -0400
30097 +++ linux-3.0.3/drivers/net/wireless/wl12xx/spi.c 2011-08-23 21:48:14.000000000 -0400
30098 @@ -280,6 +280,8 @@ static void wl1271_spi_raw_write(struct
30099 u32 chunk_len;
30100 int i;
30101
30102 + pax_track_stack();
30103 +
30104 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
30105
30106 spi_message_init(&m);
30107 diff -urNp linux-3.0.3/drivers/oprofile/buffer_sync.c linux-3.0.3/drivers/oprofile/buffer_sync.c
30108 --- linux-3.0.3/drivers/oprofile/buffer_sync.c 2011-07-21 22:17:23.000000000 -0400
30109 +++ linux-3.0.3/drivers/oprofile/buffer_sync.c 2011-08-23 21:47:55.000000000 -0400
30110 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
30111 if (cookie == NO_COOKIE)
30112 offset = pc;
30113 if (cookie == INVALID_COOKIE) {
30114 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30115 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30116 offset = pc;
30117 }
30118 if (cookie != last_cookie) {
30119 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
30120 /* add userspace sample */
30121
30122 if (!mm) {
30123 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
30124 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
30125 return 0;
30126 }
30127
30128 cookie = lookup_dcookie(mm, s->eip, &offset);
30129
30130 if (cookie == INVALID_COOKIE) {
30131 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30132 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30133 return 0;
30134 }
30135
30136 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
30137 /* ignore backtraces if failed to add a sample */
30138 if (state == sb_bt_start) {
30139 state = sb_bt_ignore;
30140 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
30141 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
30142 }
30143 }
30144 release_mm(mm);
30145 diff -urNp linux-3.0.3/drivers/oprofile/event_buffer.c linux-3.0.3/drivers/oprofile/event_buffer.c
30146 --- linux-3.0.3/drivers/oprofile/event_buffer.c 2011-07-21 22:17:23.000000000 -0400
30147 +++ linux-3.0.3/drivers/oprofile/event_buffer.c 2011-08-23 21:47:55.000000000 -0400
30148 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
30149 }
30150
30151 if (buffer_pos == buffer_size) {
30152 - atomic_inc(&oprofile_stats.event_lost_overflow);
30153 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
30154 return;
30155 }
30156
30157 diff -urNp linux-3.0.3/drivers/oprofile/oprof.c linux-3.0.3/drivers/oprofile/oprof.c
30158 --- linux-3.0.3/drivers/oprofile/oprof.c 2011-07-21 22:17:23.000000000 -0400
30159 +++ linux-3.0.3/drivers/oprofile/oprof.c 2011-08-23 21:47:55.000000000 -0400
30160 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st
30161 if (oprofile_ops.switch_events())
30162 return;
30163
30164 - atomic_inc(&oprofile_stats.multiplex_counter);
30165 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
30166 start_switch_worker();
30167 }
30168
30169 diff -urNp linux-3.0.3/drivers/oprofile/oprofilefs.c linux-3.0.3/drivers/oprofile/oprofilefs.c
30170 --- linux-3.0.3/drivers/oprofile/oprofilefs.c 2011-07-21 22:17:23.000000000 -0400
30171 +++ linux-3.0.3/drivers/oprofile/oprofilefs.c 2011-08-23 21:47:55.000000000 -0400
30172 @@ -186,7 +186,7 @@ static const struct file_operations atom
30173
30174
30175 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
30176 - char const *name, atomic_t *val)
30177 + char const *name, atomic_unchecked_t *val)
30178 {
30179 return __oprofilefs_create_file(sb, root, name,
30180 &atomic_ro_fops, 0444, val);
30181 diff -urNp linux-3.0.3/drivers/oprofile/oprofile_stats.c linux-3.0.3/drivers/oprofile/oprofile_stats.c
30182 --- linux-3.0.3/drivers/oprofile/oprofile_stats.c 2011-07-21 22:17:23.000000000 -0400
30183 +++ linux-3.0.3/drivers/oprofile/oprofile_stats.c 2011-08-23 21:47:55.000000000 -0400
30184 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
30185 cpu_buf->sample_invalid_eip = 0;
30186 }
30187
30188 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
30189 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
30190 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
30191 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
30192 - atomic_set(&oprofile_stats.multiplex_counter, 0);
30193 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
30194 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
30195 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
30196 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
30197 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
30198 }
30199
30200
30201 diff -urNp linux-3.0.3/drivers/oprofile/oprofile_stats.h linux-3.0.3/drivers/oprofile/oprofile_stats.h
30202 --- linux-3.0.3/drivers/oprofile/oprofile_stats.h 2011-07-21 22:17:23.000000000 -0400
30203 +++ linux-3.0.3/drivers/oprofile/oprofile_stats.h 2011-08-23 21:47:55.000000000 -0400
30204 @@ -13,11 +13,11 @@
30205 #include <asm/atomic.h>
30206
30207 struct oprofile_stat_struct {
30208 - atomic_t sample_lost_no_mm;
30209 - atomic_t sample_lost_no_mapping;
30210 - atomic_t bt_lost_no_mapping;
30211 - atomic_t event_lost_overflow;
30212 - atomic_t multiplex_counter;
30213 + atomic_unchecked_t sample_lost_no_mm;
30214 + atomic_unchecked_t sample_lost_no_mapping;
30215 + atomic_unchecked_t bt_lost_no_mapping;
30216 + atomic_unchecked_t event_lost_overflow;
30217 + atomic_unchecked_t multiplex_counter;
30218 };
30219
30220 extern struct oprofile_stat_struct oprofile_stats;
30221 diff -urNp linux-3.0.3/drivers/parport/procfs.c linux-3.0.3/drivers/parport/procfs.c
30222 --- linux-3.0.3/drivers/parport/procfs.c 2011-07-21 22:17:23.000000000 -0400
30223 +++ linux-3.0.3/drivers/parport/procfs.c 2011-08-23 21:47:55.000000000 -0400
30224 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
30225
30226 *ppos += len;
30227
30228 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
30229 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
30230 }
30231
30232 #ifdef CONFIG_PARPORT_1284
30233 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
30234
30235 *ppos += len;
30236
30237 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
30238 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
30239 }
30240 #endif /* IEEE1284.3 support. */
30241
30242 diff -urNp linux-3.0.3/drivers/pci/hotplug/cpci_hotplug.h linux-3.0.3/drivers/pci/hotplug/cpci_hotplug.h
30243 --- linux-3.0.3/drivers/pci/hotplug/cpci_hotplug.h 2011-07-21 22:17:23.000000000 -0400
30244 +++ linux-3.0.3/drivers/pci/hotplug/cpci_hotplug.h 2011-08-23 21:47:55.000000000 -0400
30245 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
30246 int (*hardware_test) (struct slot* slot, u32 value);
30247 u8 (*get_power) (struct slot* slot);
30248 int (*set_power) (struct slot* slot, int value);
30249 -};
30250 +} __no_const;
30251
30252 struct cpci_hp_controller {
30253 unsigned int irq;
30254 diff -urNp linux-3.0.3/drivers/pci/hotplug/cpqphp_nvram.c linux-3.0.3/drivers/pci/hotplug/cpqphp_nvram.c
30255 --- linux-3.0.3/drivers/pci/hotplug/cpqphp_nvram.c 2011-07-21 22:17:23.000000000 -0400
30256 +++ linux-3.0.3/drivers/pci/hotplug/cpqphp_nvram.c 2011-08-23 21:47:55.000000000 -0400
30257 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
30258
30259 void compaq_nvram_init (void __iomem *rom_start)
30260 {
30261 +
30262 +#ifndef CONFIG_PAX_KERNEXEC
30263 if (rom_start) {
30264 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
30265 }
30266 +#endif
30267 +
30268 dbg("int15 entry = %p\n", compaq_int15_entry_point);
30269
30270 /* initialize our int15 lock */
30271 diff -urNp linux-3.0.3/drivers/pci/pcie/aspm.c linux-3.0.3/drivers/pci/pcie/aspm.c
30272 --- linux-3.0.3/drivers/pci/pcie/aspm.c 2011-07-21 22:17:23.000000000 -0400
30273 +++ linux-3.0.3/drivers/pci/pcie/aspm.c 2011-08-23 21:47:55.000000000 -0400
30274 @@ -27,9 +27,9 @@
30275 #define MODULE_PARAM_PREFIX "pcie_aspm."
30276
30277 /* Note: those are not register definitions */
30278 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
30279 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
30280 -#define ASPM_STATE_L1 (4) /* L1 state */
30281 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
30282 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
30283 +#define ASPM_STATE_L1 (4U) /* L1 state */
30284 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
30285 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
30286
30287 diff -urNp linux-3.0.3/drivers/pci/probe.c linux-3.0.3/drivers/pci/probe.c
30288 --- linux-3.0.3/drivers/pci/probe.c 2011-07-21 22:17:23.000000000 -0400
30289 +++ linux-3.0.3/drivers/pci/probe.c 2011-08-23 21:47:55.000000000 -0400
30290 @@ -129,7 +129,7 @@ int __pci_read_base(struct pci_dev *dev,
30291 u32 l, sz, mask;
30292 u16 orig_cmd;
30293
30294 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
30295 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
30296
30297 if (!dev->mmio_always_on) {
30298 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
30299 diff -urNp linux-3.0.3/drivers/pci/proc.c linux-3.0.3/drivers/pci/proc.c
30300 --- linux-3.0.3/drivers/pci/proc.c 2011-07-21 22:17:23.000000000 -0400
30301 +++ linux-3.0.3/drivers/pci/proc.c 2011-08-23 21:48:14.000000000 -0400
30302 @@ -476,7 +476,16 @@ static const struct file_operations proc
30303 static int __init pci_proc_init(void)
30304 {
30305 struct pci_dev *dev = NULL;
30306 +
30307 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
30308 +#ifdef CONFIG_GRKERNSEC_PROC_USER
30309 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
30310 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
30311 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
30312 +#endif
30313 +#else
30314 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
30315 +#endif
30316 proc_create("devices", 0, proc_bus_pci_dir,
30317 &proc_bus_pci_dev_operations);
30318 proc_initialized = 1;
30319 diff -urNp linux-3.0.3/drivers/pci/xen-pcifront.c linux-3.0.3/drivers/pci/xen-pcifront.c
30320 --- linux-3.0.3/drivers/pci/xen-pcifront.c 2011-07-21 22:17:23.000000000 -0400
30321 +++ linux-3.0.3/drivers/pci/xen-pcifront.c 2011-08-23 21:48:14.000000000 -0400
30322 @@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
30323 struct pcifront_sd *sd = bus->sysdata;
30324 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30325
30326 + pax_track_stack();
30327 +
30328 if (verbose_request)
30329 dev_info(&pdev->xdev->dev,
30330 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
30331 @@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
30332 struct pcifront_sd *sd = bus->sysdata;
30333 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30334
30335 + pax_track_stack();
30336 +
30337 if (verbose_request)
30338 dev_info(&pdev->xdev->dev,
30339 "write dev=%04x:%02x:%02x.%01x - "
30340 @@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
30341 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30342 struct msi_desc *entry;
30343
30344 + pax_track_stack();
30345 +
30346 if (nvec > SH_INFO_MAX_VEC) {
30347 dev_err(&dev->dev, "too much vector for pci frontend: %x."
30348 " Increase SH_INFO_MAX_VEC.\n", nvec);
30349 @@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
30350 struct pcifront_sd *sd = dev->bus->sysdata;
30351 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30352
30353 + pax_track_stack();
30354 +
30355 err = do_pci_op(pdev, &op);
30356
30357 /* What should do for error ? */
30358 @@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
30359 struct pcifront_sd *sd = dev->bus->sysdata;
30360 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30361
30362 + pax_track_stack();
30363 +
30364 err = do_pci_op(pdev, &op);
30365 if (likely(!err)) {
30366 vector[0] = op.value;
30367 diff -urNp linux-3.0.3/drivers/platform/x86/thinkpad_acpi.c linux-3.0.3/drivers/platform/x86/thinkpad_acpi.c
30368 --- linux-3.0.3/drivers/platform/x86/thinkpad_acpi.c 2011-07-21 22:17:23.000000000 -0400
30369 +++ linux-3.0.3/drivers/platform/x86/thinkpad_acpi.c 2011-08-23 21:47:55.000000000 -0400
30370 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
30371 return 0;
30372 }
30373
30374 -void static hotkey_mask_warn_incomplete_mask(void)
30375 +static void hotkey_mask_warn_incomplete_mask(void)
30376 {
30377 /* log only what the user can fix... */
30378 const u32 wantedmask = hotkey_driver_mask &
30379 diff -urNp linux-3.0.3/drivers/pnp/pnpbios/bioscalls.c linux-3.0.3/drivers/pnp/pnpbios/bioscalls.c
30380 --- linux-3.0.3/drivers/pnp/pnpbios/bioscalls.c 2011-07-21 22:17:23.000000000 -0400
30381 +++ linux-3.0.3/drivers/pnp/pnpbios/bioscalls.c 2011-08-23 21:47:55.000000000 -0400
30382 @@ -59,7 +59,7 @@ do { \
30383 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
30384 } while(0)
30385
30386 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
30387 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
30388 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
30389
30390 /*
30391 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
30392
30393 cpu = get_cpu();
30394 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
30395 +
30396 + pax_open_kernel();
30397 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
30398 + pax_close_kernel();
30399
30400 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
30401 spin_lock_irqsave(&pnp_bios_lock, flags);
30402 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
30403 :"memory");
30404 spin_unlock_irqrestore(&pnp_bios_lock, flags);
30405
30406 + pax_open_kernel();
30407 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
30408 + pax_close_kernel();
30409 +
30410 put_cpu();
30411
30412 /* If we get here and this is set then the PnP BIOS faulted on us. */
30413 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
30414 return status;
30415 }
30416
30417 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
30418 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
30419 {
30420 int i;
30421
30422 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
30423 pnp_bios_callpoint.offset = header->fields.pm16offset;
30424 pnp_bios_callpoint.segment = PNP_CS16;
30425
30426 + pax_open_kernel();
30427 +
30428 for_each_possible_cpu(i) {
30429 struct desc_struct *gdt = get_cpu_gdt_table(i);
30430 if (!gdt)
30431 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
30432 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
30433 (unsigned long)__va(header->fields.pm16dseg));
30434 }
30435 +
30436 + pax_close_kernel();
30437 }
30438 diff -urNp linux-3.0.3/drivers/pnp/resource.c linux-3.0.3/drivers/pnp/resource.c
30439 --- linux-3.0.3/drivers/pnp/resource.c 2011-07-21 22:17:23.000000000 -0400
30440 +++ linux-3.0.3/drivers/pnp/resource.c 2011-08-23 21:47:55.000000000 -0400
30441 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
30442 return 1;
30443
30444 /* check if the resource is valid */
30445 - if (*irq < 0 || *irq > 15)
30446 + if (*irq > 15)
30447 return 0;
30448
30449 /* check if the resource is reserved */
30450 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
30451 return 1;
30452
30453 /* check if the resource is valid */
30454 - if (*dma < 0 || *dma == 4 || *dma > 7)
30455 + if (*dma == 4 || *dma > 7)
30456 return 0;
30457
30458 /* check if the resource is reserved */
30459 diff -urNp linux-3.0.3/drivers/power/bq27x00_battery.c linux-3.0.3/drivers/power/bq27x00_battery.c
30460 --- linux-3.0.3/drivers/power/bq27x00_battery.c 2011-07-21 22:17:23.000000000 -0400
30461 +++ linux-3.0.3/drivers/power/bq27x00_battery.c 2011-08-23 21:47:55.000000000 -0400
30462 @@ -67,7 +67,7 @@
30463 struct bq27x00_device_info;
30464 struct bq27x00_access_methods {
30465 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
30466 -};
30467 +} __no_const;
30468
30469 enum bq27x00_chip { BQ27000, BQ27500 };
30470
30471 diff -urNp linux-3.0.3/drivers/regulator/max8660.c linux-3.0.3/drivers/regulator/max8660.c
30472 --- linux-3.0.3/drivers/regulator/max8660.c 2011-07-21 22:17:23.000000000 -0400
30473 +++ linux-3.0.3/drivers/regulator/max8660.c 2011-08-23 21:47:55.000000000 -0400
30474 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
30475 max8660->shadow_regs[MAX8660_OVER1] = 5;
30476 } else {
30477 /* Otherwise devices can be toggled via software */
30478 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
30479 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
30480 + pax_open_kernel();
30481 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
30482 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
30483 + pax_close_kernel();
30484 }
30485
30486 /*
30487 diff -urNp linux-3.0.3/drivers/regulator/mc13892-regulator.c linux-3.0.3/drivers/regulator/mc13892-regulator.c
30488 --- linux-3.0.3/drivers/regulator/mc13892-regulator.c 2011-07-21 22:17:23.000000000 -0400
30489 +++ linux-3.0.3/drivers/regulator/mc13892-regulator.c 2011-08-23 21:47:55.000000000 -0400
30490 @@ -564,10 +564,12 @@ static int __devinit mc13892_regulator_p
30491 }
30492 mc13xxx_unlock(mc13892);
30493
30494 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30495 + pax_open_kernel();
30496 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30497 = mc13892_vcam_set_mode;
30498 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30499 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30500 = mc13892_vcam_get_mode;
30501 + pax_close_kernel();
30502 for (i = 0; i < pdata->num_regulators; i++) {
30503 init_data = &pdata->regulators[i];
30504 priv->regulators[i] = regulator_register(
30505 diff -urNp linux-3.0.3/drivers/rtc/rtc-dev.c linux-3.0.3/drivers/rtc/rtc-dev.c
30506 --- linux-3.0.3/drivers/rtc/rtc-dev.c 2011-07-21 22:17:23.000000000 -0400
30507 +++ linux-3.0.3/drivers/rtc/rtc-dev.c 2011-08-23 21:48:14.000000000 -0400
30508 @@ -14,6 +14,7 @@
30509 #include <linux/module.h>
30510 #include <linux/rtc.h>
30511 #include <linux/sched.h>
30512 +#include <linux/grsecurity.h>
30513 #include "rtc-core.h"
30514
30515 static dev_t rtc_devt;
30516 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
30517 if (copy_from_user(&tm, uarg, sizeof(tm)))
30518 return -EFAULT;
30519
30520 + gr_log_timechange();
30521 +
30522 return rtc_set_time(rtc, &tm);
30523
30524 case RTC_PIE_ON:
30525 diff -urNp linux-3.0.3/drivers/scsi/aacraid/aacraid.h linux-3.0.3/drivers/scsi/aacraid/aacraid.h
30526 --- linux-3.0.3/drivers/scsi/aacraid/aacraid.h 2011-07-21 22:17:23.000000000 -0400
30527 +++ linux-3.0.3/drivers/scsi/aacraid/aacraid.h 2011-08-23 21:47:55.000000000 -0400
30528 @@ -492,7 +492,7 @@ struct adapter_ops
30529 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
30530 /* Administrative operations */
30531 int (*adapter_comm)(struct aac_dev * dev, int comm);
30532 -};
30533 +} __no_const;
30534
30535 /*
30536 * Define which interrupt handler needs to be installed
30537 diff -urNp linux-3.0.3/drivers/scsi/aacraid/commctrl.c linux-3.0.3/drivers/scsi/aacraid/commctrl.c
30538 --- linux-3.0.3/drivers/scsi/aacraid/commctrl.c 2011-07-21 22:17:23.000000000 -0400
30539 +++ linux-3.0.3/drivers/scsi/aacraid/commctrl.c 2011-08-23 21:48:14.000000000 -0400
30540 @@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
30541 u32 actual_fibsize64, actual_fibsize = 0;
30542 int i;
30543
30544 + pax_track_stack();
30545
30546 if (dev->in_reset) {
30547 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
30548 diff -urNp linux-3.0.3/drivers/scsi/bfa/bfad.c linux-3.0.3/drivers/scsi/bfa/bfad.c
30549 --- linux-3.0.3/drivers/scsi/bfa/bfad.c 2011-07-21 22:17:23.000000000 -0400
30550 +++ linux-3.0.3/drivers/scsi/bfa/bfad.c 2011-08-23 21:48:14.000000000 -0400
30551 @@ -1032,6 +1032,8 @@ bfad_start_ops(struct bfad_s *bfad) {
30552 struct bfad_vport_s *vport, *vport_new;
30553 struct bfa_fcs_driver_info_s driver_info;
30554
30555 + pax_track_stack();
30556 +
30557 /* Fill the driver_info info to fcs*/
30558 memset(&driver_info, 0, sizeof(driver_info));
30559 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
30560 diff -urNp linux-3.0.3/drivers/scsi/bfa/bfa_fcs_lport.c linux-3.0.3/drivers/scsi/bfa/bfa_fcs_lport.c
30561 --- linux-3.0.3/drivers/scsi/bfa/bfa_fcs_lport.c 2011-07-21 22:17:23.000000000 -0400
30562 +++ linux-3.0.3/drivers/scsi/bfa/bfa_fcs_lport.c 2011-08-23 21:48:14.000000000 -0400
30563 @@ -1559,6 +1559,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
30564 u16 len, count;
30565 u16 templen;
30566
30567 + pax_track_stack();
30568 +
30569 /*
30570 * get hba attributes
30571 */
30572 @@ -1836,6 +1838,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
30573 u8 count = 0;
30574 u16 templen;
30575
30576 + pax_track_stack();
30577 +
30578 /*
30579 * get port attributes
30580 */
30581 diff -urNp linux-3.0.3/drivers/scsi/bfa/bfa_fcs_rport.c linux-3.0.3/drivers/scsi/bfa/bfa_fcs_rport.c
30582 --- linux-3.0.3/drivers/scsi/bfa/bfa_fcs_rport.c 2011-07-21 22:17:23.000000000 -0400
30583 +++ linux-3.0.3/drivers/scsi/bfa/bfa_fcs_rport.c 2011-08-23 21:48:14.000000000 -0400
30584 @@ -1844,6 +1844,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
30585 struct fc_rpsc_speed_info_s speeds;
30586 struct bfa_port_attr_s pport_attr;
30587
30588 + pax_track_stack();
30589 +
30590 bfa_trc(port->fcs, rx_fchs->s_id);
30591 bfa_trc(port->fcs, rx_fchs->d_id);
30592
30593 diff -urNp linux-3.0.3/drivers/scsi/bfa/bfa.h linux-3.0.3/drivers/scsi/bfa/bfa.h
30594 --- linux-3.0.3/drivers/scsi/bfa/bfa.h 2011-07-21 22:17:23.000000000 -0400
30595 +++ linux-3.0.3/drivers/scsi/bfa/bfa.h 2011-08-23 21:47:55.000000000 -0400
30596 @@ -238,7 +238,7 @@ struct bfa_hwif_s {
30597 u32 *nvecs, u32 *maxvec);
30598 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
30599 u32 *end);
30600 -};
30601 +} __no_const;
30602 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
30603
30604 struct bfa_iocfc_s {
30605 diff -urNp linux-3.0.3/drivers/scsi/bfa/bfa_ioc.h linux-3.0.3/drivers/scsi/bfa/bfa_ioc.h
30606 --- linux-3.0.3/drivers/scsi/bfa/bfa_ioc.h 2011-07-21 22:17:23.000000000 -0400
30607 +++ linux-3.0.3/drivers/scsi/bfa/bfa_ioc.h 2011-08-23 21:47:55.000000000 -0400
30608 @@ -196,7 +196,7 @@ struct bfa_ioc_cbfn_s {
30609 bfa_ioc_disable_cbfn_t disable_cbfn;
30610 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
30611 bfa_ioc_reset_cbfn_t reset_cbfn;
30612 -};
30613 +} __no_const;
30614
30615 /*
30616 * Heartbeat failure notification queue element.
30617 @@ -268,7 +268,7 @@ struct bfa_ioc_hwif_s {
30618 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
30619 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
30620 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
30621 -};
30622 +} __no_const;
30623
30624 #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
30625 #define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
30626 diff -urNp linux-3.0.3/drivers/scsi/BusLogic.c linux-3.0.3/drivers/scsi/BusLogic.c
30627 --- linux-3.0.3/drivers/scsi/BusLogic.c 2011-07-21 22:17:23.000000000 -0400
30628 +++ linux-3.0.3/drivers/scsi/BusLogic.c 2011-08-23 21:48:14.000000000 -0400
30629 @@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
30630 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
30631 *PrototypeHostAdapter)
30632 {
30633 + pax_track_stack();
30634 +
30635 /*
30636 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
30637 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
30638 diff -urNp linux-3.0.3/drivers/scsi/dpt_i2o.c linux-3.0.3/drivers/scsi/dpt_i2o.c
30639 --- linux-3.0.3/drivers/scsi/dpt_i2o.c 2011-07-21 22:17:23.000000000 -0400
30640 +++ linux-3.0.3/drivers/scsi/dpt_i2o.c 2011-08-23 21:48:14.000000000 -0400
30641 @@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
30642 dma_addr_t addr;
30643 ulong flags = 0;
30644
30645 + pax_track_stack();
30646 +
30647 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
30648 // get user msg size in u32s
30649 if(get_user(size, &user_msg[0])){
30650 @@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
30651 s32 rcode;
30652 dma_addr_t addr;
30653
30654 + pax_track_stack();
30655 +
30656 memset(msg, 0 , sizeof(msg));
30657 len = scsi_bufflen(cmd);
30658 direction = 0x00000000;
30659 diff -urNp linux-3.0.3/drivers/scsi/eata.c linux-3.0.3/drivers/scsi/eata.c
30660 --- linux-3.0.3/drivers/scsi/eata.c 2011-07-21 22:17:23.000000000 -0400
30661 +++ linux-3.0.3/drivers/scsi/eata.c 2011-08-23 21:48:14.000000000 -0400
30662 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
30663 struct hostdata *ha;
30664 char name[16];
30665
30666 + pax_track_stack();
30667 +
30668 sprintf(name, "%s%d", driver_name, j);
30669
30670 if (!request_region(port_base, REGION_SIZE, driver_name)) {
30671 diff -urNp linux-3.0.3/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.0.3/drivers/scsi/fcoe/fcoe_ctlr.c
30672 --- linux-3.0.3/drivers/scsi/fcoe/fcoe_ctlr.c 2011-07-21 22:17:23.000000000 -0400
30673 +++ linux-3.0.3/drivers/scsi/fcoe/fcoe_ctlr.c 2011-08-23 21:48:14.000000000 -0400
30674 @@ -2503,6 +2503,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
30675 } buf;
30676 int rc;
30677
30678 + pax_track_stack();
30679 +
30680 fiph = (struct fip_header *)skb->data;
30681 sub = fiph->fip_subcode;
30682
30683 diff -urNp linux-3.0.3/drivers/scsi/gdth.c linux-3.0.3/drivers/scsi/gdth.c
30684 --- linux-3.0.3/drivers/scsi/gdth.c 2011-07-21 22:17:23.000000000 -0400
30685 +++ linux-3.0.3/drivers/scsi/gdth.c 2011-08-23 21:48:14.000000000 -0400
30686 @@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
30687 unsigned long flags;
30688 gdth_ha_str *ha;
30689
30690 + pax_track_stack();
30691 +
30692 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
30693 return -EFAULT;
30694 ha = gdth_find_ha(ldrv.ionode);
30695 @@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
30696 gdth_ha_str *ha;
30697 int rval;
30698
30699 + pax_track_stack();
30700 +
30701 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
30702 res.number >= MAX_HDRIVES)
30703 return -EFAULT;
30704 @@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
30705 gdth_ha_str *ha;
30706 int rval;
30707
30708 + pax_track_stack();
30709 +
30710 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
30711 return -EFAULT;
30712 ha = gdth_find_ha(gen.ionode);
30713 @@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
30714 int i;
30715 gdth_cmd_str gdtcmd;
30716 char cmnd[MAX_COMMAND_SIZE];
30717 +
30718 + pax_track_stack();
30719 +
30720 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
30721
30722 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
30723 diff -urNp linux-3.0.3/drivers/scsi/gdth_proc.c linux-3.0.3/drivers/scsi/gdth_proc.c
30724 --- linux-3.0.3/drivers/scsi/gdth_proc.c 2011-07-21 22:17:23.000000000 -0400
30725 +++ linux-3.0.3/drivers/scsi/gdth_proc.c 2011-08-23 21:48:14.000000000 -0400
30726 @@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
30727 u64 paddr;
30728
30729 char cmnd[MAX_COMMAND_SIZE];
30730 +
30731 + pax_track_stack();
30732 +
30733 memset(cmnd, 0xff, 12);
30734 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
30735
30736 @@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
30737 gdth_hget_str *phg;
30738 char cmnd[MAX_COMMAND_SIZE];
30739
30740 + pax_track_stack();
30741 +
30742 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
30743 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
30744 if (!gdtcmd || !estr)
30745 diff -urNp linux-3.0.3/drivers/scsi/hosts.c linux-3.0.3/drivers/scsi/hosts.c
30746 --- linux-3.0.3/drivers/scsi/hosts.c 2011-07-21 22:17:23.000000000 -0400
30747 +++ linux-3.0.3/drivers/scsi/hosts.c 2011-08-23 21:47:55.000000000 -0400
30748 @@ -42,7 +42,7 @@
30749 #include "scsi_logging.h"
30750
30751
30752 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
30753 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
30754
30755
30756 static void scsi_host_cls_release(struct device *dev)
30757 @@ -354,7 +354,7 @@ struct Scsi_Host *scsi_host_alloc(struct
30758 * subtract one because we increment first then return, but we need to
30759 * know what the next host number was before increment
30760 */
30761 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
30762 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
30763 shost->dma_channel = 0xff;
30764
30765 /* These three are default values which can be overridden */
30766 diff -urNp linux-3.0.3/drivers/scsi/hpsa.c linux-3.0.3/drivers/scsi/hpsa.c
30767 --- linux-3.0.3/drivers/scsi/hpsa.c 2011-07-21 22:17:23.000000000 -0400
30768 +++ linux-3.0.3/drivers/scsi/hpsa.c 2011-08-23 21:47:55.000000000 -0400
30769 @@ -498,7 +498,7 @@ static inline u32 next_command(struct ct
30770 u32 a;
30771
30772 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
30773 - return h->access.command_completed(h);
30774 + return h->access->command_completed(h);
30775
30776 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
30777 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
30778 @@ -2938,7 +2938,7 @@ static void start_io(struct ctlr_info *h
30779 while (!list_empty(&h->reqQ)) {
30780 c = list_entry(h->reqQ.next, struct CommandList, list);
30781 /* can't do anything if fifo is full */
30782 - if ((h->access.fifo_full(h))) {
30783 + if ((h->access->fifo_full(h))) {
30784 dev_warn(&h->pdev->dev, "fifo full\n");
30785 break;
30786 }
30787 @@ -2948,7 +2948,7 @@ static void start_io(struct ctlr_info *h
30788 h->Qdepth--;
30789
30790 /* Tell the controller execute command */
30791 - h->access.submit_command(h, c);
30792 + h->access->submit_command(h, c);
30793
30794 /* Put job onto the completed Q */
30795 addQ(&h->cmpQ, c);
30796 @@ -2957,17 +2957,17 @@ static void start_io(struct ctlr_info *h
30797
30798 static inline unsigned long get_next_completion(struct ctlr_info *h)
30799 {
30800 - return h->access.command_completed(h);
30801 + return h->access->command_completed(h);
30802 }
30803
30804 static inline bool interrupt_pending(struct ctlr_info *h)
30805 {
30806 - return h->access.intr_pending(h);
30807 + return h->access->intr_pending(h);
30808 }
30809
30810 static inline long interrupt_not_for_us(struct ctlr_info *h)
30811 {
30812 - return (h->access.intr_pending(h) == 0) ||
30813 + return (h->access->intr_pending(h) == 0) ||
30814 (h->interrupts_enabled == 0);
30815 }
30816
30817 @@ -3857,7 +3857,7 @@ static int __devinit hpsa_pci_init(struc
30818 if (prod_index < 0)
30819 return -ENODEV;
30820 h->product_name = products[prod_index].product_name;
30821 - h->access = *(products[prod_index].access);
30822 + h->access = products[prod_index].access;
30823
30824 if (hpsa_board_disabled(h->pdev)) {
30825 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
30826 @@ -4134,7 +4134,7 @@ reinit_after_soft_reset:
30827 }
30828
30829 /* make sure the board interrupts are off */
30830 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
30831 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
30832
30833 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
30834 goto clean2;
30835 @@ -4168,7 +4168,7 @@ reinit_after_soft_reset:
30836 * fake ones to scoop up any residual completions.
30837 */
30838 spin_lock_irqsave(&h->lock, flags);
30839 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
30840 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
30841 spin_unlock_irqrestore(&h->lock, flags);
30842 free_irq(h->intr[h->intr_mode], h);
30843 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
30844 @@ -4187,9 +4187,9 @@ reinit_after_soft_reset:
30845 dev_info(&h->pdev->dev, "Board READY.\n");
30846 dev_info(&h->pdev->dev,
30847 "Waiting for stale completions to drain.\n");
30848 - h->access.set_intr_mask(h, HPSA_INTR_ON);
30849 + h->access->set_intr_mask(h, HPSA_INTR_ON);
30850 msleep(10000);
30851 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
30852 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
30853
30854 rc = controller_reset_failed(h->cfgtable);
30855 if (rc)
30856 @@ -4210,7 +4210,7 @@ reinit_after_soft_reset:
30857 }
30858
30859 /* Turn the interrupts on so we can service requests */
30860 - h->access.set_intr_mask(h, HPSA_INTR_ON);
30861 + h->access->set_intr_mask(h, HPSA_INTR_ON);
30862
30863 hpsa_hba_inquiry(h);
30864 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
30865 @@ -4263,7 +4263,7 @@ static void hpsa_shutdown(struct pci_dev
30866 * To write all data in the battery backed cache to disks
30867 */
30868 hpsa_flush_cache(h);
30869 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
30870 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
30871 free_irq(h->intr[h->intr_mode], h);
30872 #ifdef CONFIG_PCI_MSI
30873 if (h->msix_vector)
30874 @@ -4426,7 +4426,7 @@ static __devinit void hpsa_enter_perform
30875 return;
30876 }
30877 /* Change the access methods to the performant access methods */
30878 - h->access = SA5_performant_access;
30879 + h->access = &SA5_performant_access;
30880 h->transMethod = CFGTBL_Trans_Performant;
30881 }
30882
30883 diff -urNp linux-3.0.3/drivers/scsi/hpsa.h linux-3.0.3/drivers/scsi/hpsa.h
30884 --- linux-3.0.3/drivers/scsi/hpsa.h 2011-08-23 21:44:40.000000000 -0400
30885 +++ linux-3.0.3/drivers/scsi/hpsa.h 2011-08-23 21:47:55.000000000 -0400
30886 @@ -73,7 +73,7 @@ struct ctlr_info {
30887 unsigned int msix_vector;
30888 unsigned int msi_vector;
30889 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
30890 - struct access_method access;
30891 + struct access_method *access;
30892
30893 /* queue and queue Info */
30894 struct list_head reqQ;
30895 diff -urNp linux-3.0.3/drivers/scsi/ips.h linux-3.0.3/drivers/scsi/ips.h
30896 --- linux-3.0.3/drivers/scsi/ips.h 2011-07-21 22:17:23.000000000 -0400
30897 +++ linux-3.0.3/drivers/scsi/ips.h 2011-08-23 21:47:55.000000000 -0400
30898 @@ -1027,7 +1027,7 @@ typedef struct {
30899 int (*intr)(struct ips_ha *);
30900 void (*enableint)(struct ips_ha *);
30901 uint32_t (*statupd)(struct ips_ha *);
30902 -} ips_hw_func_t;
30903 +} __no_const ips_hw_func_t;
30904
30905 typedef struct ips_ha {
30906 uint8_t ha_id[IPS_MAX_CHANNELS+1];
30907 diff -urNp linux-3.0.3/drivers/scsi/libfc/fc_exch.c linux-3.0.3/drivers/scsi/libfc/fc_exch.c
30908 --- linux-3.0.3/drivers/scsi/libfc/fc_exch.c 2011-07-21 22:17:23.000000000 -0400
30909 +++ linux-3.0.3/drivers/scsi/libfc/fc_exch.c 2011-08-23 21:47:55.000000000 -0400
30910 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
30911 * all together if not used XXX
30912 */
30913 struct {
30914 - atomic_t no_free_exch;
30915 - atomic_t no_free_exch_xid;
30916 - atomic_t xid_not_found;
30917 - atomic_t xid_busy;
30918 - atomic_t seq_not_found;
30919 - atomic_t non_bls_resp;
30920 + atomic_unchecked_t no_free_exch;
30921 + atomic_unchecked_t no_free_exch_xid;
30922 + atomic_unchecked_t xid_not_found;
30923 + atomic_unchecked_t xid_busy;
30924 + atomic_unchecked_t seq_not_found;
30925 + atomic_unchecked_t non_bls_resp;
30926 } stats;
30927 };
30928
30929 @@ -700,7 +700,7 @@ static struct fc_exch *fc_exch_em_alloc(
30930 /* allocate memory for exchange */
30931 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
30932 if (!ep) {
30933 - atomic_inc(&mp->stats.no_free_exch);
30934 + atomic_inc_unchecked(&mp->stats.no_free_exch);
30935 goto out;
30936 }
30937 memset(ep, 0, sizeof(*ep));
30938 @@ -761,7 +761,7 @@ out:
30939 return ep;
30940 err:
30941 spin_unlock_bh(&pool->lock);
30942 - atomic_inc(&mp->stats.no_free_exch_xid);
30943 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
30944 mempool_free(ep, mp->ep_pool);
30945 return NULL;
30946 }
30947 @@ -906,7 +906,7 @@ static enum fc_pf_rjt_reason fc_seq_look
30948 xid = ntohs(fh->fh_ox_id); /* we originated exch */
30949 ep = fc_exch_find(mp, xid);
30950 if (!ep) {
30951 - atomic_inc(&mp->stats.xid_not_found);
30952 + atomic_inc_unchecked(&mp->stats.xid_not_found);
30953 reject = FC_RJT_OX_ID;
30954 goto out;
30955 }
30956 @@ -936,7 +936,7 @@ static enum fc_pf_rjt_reason fc_seq_look
30957 ep = fc_exch_find(mp, xid);
30958 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
30959 if (ep) {
30960 - atomic_inc(&mp->stats.xid_busy);
30961 + atomic_inc_unchecked(&mp->stats.xid_busy);
30962 reject = FC_RJT_RX_ID;
30963 goto rel;
30964 }
30965 @@ -947,7 +947,7 @@ static enum fc_pf_rjt_reason fc_seq_look
30966 }
30967 xid = ep->xid; /* get our XID */
30968 } else if (!ep) {
30969 - atomic_inc(&mp->stats.xid_not_found);
30970 + atomic_inc_unchecked(&mp->stats.xid_not_found);
30971 reject = FC_RJT_RX_ID; /* XID not found */
30972 goto out;
30973 }
30974 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_look
30975 } else {
30976 sp = &ep->seq;
30977 if (sp->id != fh->fh_seq_id) {
30978 - atomic_inc(&mp->stats.seq_not_found);
30979 + atomic_inc_unchecked(&mp->stats.seq_not_found);
30980 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
30981 goto rel;
30982 }
30983 @@ -1392,22 +1392,22 @@ static void fc_exch_recv_seq_resp(struct
30984
30985 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
30986 if (!ep) {
30987 - atomic_inc(&mp->stats.xid_not_found);
30988 + atomic_inc_unchecked(&mp->stats.xid_not_found);
30989 goto out;
30990 }
30991 if (ep->esb_stat & ESB_ST_COMPLETE) {
30992 - atomic_inc(&mp->stats.xid_not_found);
30993 + atomic_inc_unchecked(&mp->stats.xid_not_found);
30994 goto rel;
30995 }
30996 if (ep->rxid == FC_XID_UNKNOWN)
30997 ep->rxid = ntohs(fh->fh_rx_id);
30998 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
30999 - atomic_inc(&mp->stats.xid_not_found);
31000 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31001 goto rel;
31002 }
31003 if (ep->did != ntoh24(fh->fh_s_id) &&
31004 ep->did != FC_FID_FLOGI) {
31005 - atomic_inc(&mp->stats.xid_not_found);
31006 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31007 goto rel;
31008 }
31009 sof = fr_sof(fp);
31010 @@ -1416,7 +1416,7 @@ static void fc_exch_recv_seq_resp(struct
31011 sp->ssb_stat |= SSB_ST_RESP;
31012 sp->id = fh->fh_seq_id;
31013 } else if (sp->id != fh->fh_seq_id) {
31014 - atomic_inc(&mp->stats.seq_not_found);
31015 + atomic_inc_unchecked(&mp->stats.seq_not_found);
31016 goto rel;
31017 }
31018
31019 @@ -1480,9 +1480,9 @@ static void fc_exch_recv_resp(struct fc_
31020 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
31021
31022 if (!sp)
31023 - atomic_inc(&mp->stats.xid_not_found);
31024 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31025 else
31026 - atomic_inc(&mp->stats.non_bls_resp);
31027 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
31028
31029 fc_frame_free(fp);
31030 }
31031 diff -urNp linux-3.0.3/drivers/scsi/libsas/sas_ata.c linux-3.0.3/drivers/scsi/libsas/sas_ata.c
31032 --- linux-3.0.3/drivers/scsi/libsas/sas_ata.c 2011-07-21 22:17:23.000000000 -0400
31033 +++ linux-3.0.3/drivers/scsi/libsas/sas_ata.c 2011-08-23 21:47:55.000000000 -0400
31034 @@ -368,7 +368,7 @@ static struct ata_port_operations sas_sa
31035 .postreset = ata_std_postreset,
31036 .error_handler = ata_std_error_handler,
31037 .post_internal_cmd = sas_ata_post_internal,
31038 - .qc_defer = ata_std_qc_defer,
31039 + .qc_defer = ata_std_qc_defer,
31040 .qc_prep = ata_noop_qc_prep,
31041 .qc_issue = sas_ata_qc_issue,
31042 .qc_fill_rtf = sas_ata_qc_fill_rtf,
31043 diff -urNp linux-3.0.3/drivers/scsi/lpfc/lpfc_debugfs.c linux-3.0.3/drivers/scsi/lpfc/lpfc_debugfs.c
31044 --- linux-3.0.3/drivers/scsi/lpfc/lpfc_debugfs.c 2011-07-21 22:17:23.000000000 -0400
31045 +++ linux-3.0.3/drivers/scsi/lpfc/lpfc_debugfs.c 2011-08-23 21:48:14.000000000 -0400
31046 @@ -104,7 +104,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
31047
31048 #include <linux/debugfs.h>
31049
31050 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31051 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31052 static unsigned long lpfc_debugfs_start_time = 0L;
31053
31054 /* iDiag */
31055 @@ -141,7 +141,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
31056 lpfc_debugfs_enable = 0;
31057
31058 len = 0;
31059 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
31060 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
31061 (lpfc_debugfs_max_disc_trc - 1);
31062 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
31063 dtp = vport->disc_trc + i;
31064 @@ -202,7 +202,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
31065 lpfc_debugfs_enable = 0;
31066
31067 len = 0;
31068 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
31069 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
31070 (lpfc_debugfs_max_slow_ring_trc - 1);
31071 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
31072 dtp = phba->slow_ring_trc + i;
31073 @@ -380,6 +380,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
31074 uint32_t *ptr;
31075 char buffer[1024];
31076
31077 + pax_track_stack();
31078 +
31079 off = 0;
31080 spin_lock_irq(&phba->hbalock);
31081
31082 @@ -617,14 +619,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
31083 !vport || !vport->disc_trc)
31084 return;
31085
31086 - index = atomic_inc_return(&vport->disc_trc_cnt) &
31087 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
31088 (lpfc_debugfs_max_disc_trc - 1);
31089 dtp = vport->disc_trc + index;
31090 dtp->fmt = fmt;
31091 dtp->data1 = data1;
31092 dtp->data2 = data2;
31093 dtp->data3 = data3;
31094 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31095 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31096 dtp->jif = jiffies;
31097 #endif
31098 return;
31099 @@ -655,14 +657,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
31100 !phba || !phba->slow_ring_trc)
31101 return;
31102
31103 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
31104 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
31105 (lpfc_debugfs_max_slow_ring_trc - 1);
31106 dtp = phba->slow_ring_trc + index;
31107 dtp->fmt = fmt;
31108 dtp->data1 = data1;
31109 dtp->data2 = data2;
31110 dtp->data3 = data3;
31111 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31112 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31113 dtp->jif = jiffies;
31114 #endif
31115 return;
31116 @@ -2606,7 +2608,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31117 "slow_ring buffer\n");
31118 goto debug_failed;
31119 }
31120 - atomic_set(&phba->slow_ring_trc_cnt, 0);
31121 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
31122 memset(phba->slow_ring_trc, 0,
31123 (sizeof(struct lpfc_debugfs_trc) *
31124 lpfc_debugfs_max_slow_ring_trc));
31125 @@ -2652,7 +2654,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31126 "buffer\n");
31127 goto debug_failed;
31128 }
31129 - atomic_set(&vport->disc_trc_cnt, 0);
31130 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
31131
31132 snprintf(name, sizeof(name), "discovery_trace");
31133 vport->debug_disc_trc =
31134 diff -urNp linux-3.0.3/drivers/scsi/lpfc/lpfc.h linux-3.0.3/drivers/scsi/lpfc/lpfc.h
31135 --- linux-3.0.3/drivers/scsi/lpfc/lpfc.h 2011-07-21 22:17:23.000000000 -0400
31136 +++ linux-3.0.3/drivers/scsi/lpfc/lpfc.h 2011-08-23 21:47:55.000000000 -0400
31137 @@ -420,7 +420,7 @@ struct lpfc_vport {
31138 struct dentry *debug_nodelist;
31139 struct dentry *vport_debugfs_root;
31140 struct lpfc_debugfs_trc *disc_trc;
31141 - atomic_t disc_trc_cnt;
31142 + atomic_unchecked_t disc_trc_cnt;
31143 #endif
31144 uint8_t stat_data_enabled;
31145 uint8_t stat_data_blocked;
31146 @@ -826,8 +826,8 @@ struct lpfc_hba {
31147 struct timer_list fabric_block_timer;
31148 unsigned long bit_flags;
31149 #define FABRIC_COMANDS_BLOCKED 0
31150 - atomic_t num_rsrc_err;
31151 - atomic_t num_cmd_success;
31152 + atomic_unchecked_t num_rsrc_err;
31153 + atomic_unchecked_t num_cmd_success;
31154 unsigned long last_rsrc_error_time;
31155 unsigned long last_ramp_down_time;
31156 unsigned long last_ramp_up_time;
31157 @@ -841,7 +841,7 @@ struct lpfc_hba {
31158 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
31159 struct dentry *debug_slow_ring_trc;
31160 struct lpfc_debugfs_trc *slow_ring_trc;
31161 - atomic_t slow_ring_trc_cnt;
31162 + atomic_unchecked_t slow_ring_trc_cnt;
31163 /* iDiag debugfs sub-directory */
31164 struct dentry *idiag_root;
31165 struct dentry *idiag_pci_cfg;
31166 diff -urNp linux-3.0.3/drivers/scsi/lpfc/lpfc_init.c linux-3.0.3/drivers/scsi/lpfc/lpfc_init.c
31167 --- linux-3.0.3/drivers/scsi/lpfc/lpfc_init.c 2011-07-21 22:17:23.000000000 -0400
31168 +++ linux-3.0.3/drivers/scsi/lpfc/lpfc_init.c 2011-08-23 21:47:56.000000000 -0400
31169 @@ -9923,8 +9923,10 @@ lpfc_init(void)
31170 printk(LPFC_COPYRIGHT "\n");
31171
31172 if (lpfc_enable_npiv) {
31173 - lpfc_transport_functions.vport_create = lpfc_vport_create;
31174 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31175 + pax_open_kernel();
31176 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
31177 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31178 + pax_close_kernel();
31179 }
31180 lpfc_transport_template =
31181 fc_attach_transport(&lpfc_transport_functions);
31182 diff -urNp linux-3.0.3/drivers/scsi/lpfc/lpfc_scsi.c linux-3.0.3/drivers/scsi/lpfc/lpfc_scsi.c
31183 --- linux-3.0.3/drivers/scsi/lpfc/lpfc_scsi.c 2011-07-21 22:17:23.000000000 -0400
31184 +++ linux-3.0.3/drivers/scsi/lpfc/lpfc_scsi.c 2011-08-23 21:47:56.000000000 -0400
31185 @@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
31186 uint32_t evt_posted;
31187
31188 spin_lock_irqsave(&phba->hbalock, flags);
31189 - atomic_inc(&phba->num_rsrc_err);
31190 + atomic_inc_unchecked(&phba->num_rsrc_err);
31191 phba->last_rsrc_error_time = jiffies;
31192
31193 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
31194 @@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
31195 unsigned long flags;
31196 struct lpfc_hba *phba = vport->phba;
31197 uint32_t evt_posted;
31198 - atomic_inc(&phba->num_cmd_success);
31199 + atomic_inc_unchecked(&phba->num_cmd_success);
31200
31201 if (vport->cfg_lun_queue_depth <= queue_depth)
31202 return;
31203 @@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31204 unsigned long num_rsrc_err, num_cmd_success;
31205 int i;
31206
31207 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
31208 - num_cmd_success = atomic_read(&phba->num_cmd_success);
31209 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
31210 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
31211
31212 vports = lpfc_create_vport_work_array(phba);
31213 if (vports != NULL)
31214 @@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31215 }
31216 }
31217 lpfc_destroy_vport_work_array(phba, vports);
31218 - atomic_set(&phba->num_rsrc_err, 0);
31219 - atomic_set(&phba->num_cmd_success, 0);
31220 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
31221 + atomic_set_unchecked(&phba->num_cmd_success, 0);
31222 }
31223
31224 /**
31225 @@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
31226 }
31227 }
31228 lpfc_destroy_vport_work_array(phba, vports);
31229 - atomic_set(&phba->num_rsrc_err, 0);
31230 - atomic_set(&phba->num_cmd_success, 0);
31231 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
31232 + atomic_set_unchecked(&phba->num_cmd_success, 0);
31233 }
31234
31235 /**
31236 diff -urNp linux-3.0.3/drivers/scsi/megaraid/megaraid_mbox.c linux-3.0.3/drivers/scsi/megaraid/megaraid_mbox.c
31237 --- linux-3.0.3/drivers/scsi/megaraid/megaraid_mbox.c 2011-07-21 22:17:23.000000000 -0400
31238 +++ linux-3.0.3/drivers/scsi/megaraid/megaraid_mbox.c 2011-08-23 21:48:14.000000000 -0400
31239 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
31240 int rval;
31241 int i;
31242
31243 + pax_track_stack();
31244 +
31245 // Allocate memory for the base list of scb for management module.
31246 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
31247
31248 diff -urNp linux-3.0.3/drivers/scsi/osd/osd_initiator.c linux-3.0.3/drivers/scsi/osd/osd_initiator.c
31249 --- linux-3.0.3/drivers/scsi/osd/osd_initiator.c 2011-07-21 22:17:23.000000000 -0400
31250 +++ linux-3.0.3/drivers/scsi/osd/osd_initiator.c 2011-08-23 21:48:14.000000000 -0400
31251 @@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
31252 int nelem = ARRAY_SIZE(get_attrs), a = 0;
31253 int ret;
31254
31255 + pax_track_stack();
31256 +
31257 or = osd_start_request(od, GFP_KERNEL);
31258 if (!or)
31259 return -ENOMEM;
31260 diff -urNp linux-3.0.3/drivers/scsi/pmcraid.c linux-3.0.3/drivers/scsi/pmcraid.c
31261 --- linux-3.0.3/drivers/scsi/pmcraid.c 2011-08-23 21:44:40.000000000 -0400
31262 +++ linux-3.0.3/drivers/scsi/pmcraid.c 2011-08-23 21:47:56.000000000 -0400
31263 @@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
31264 res->scsi_dev = scsi_dev;
31265 scsi_dev->hostdata = res;
31266 res->change_detected = 0;
31267 - atomic_set(&res->read_failures, 0);
31268 - atomic_set(&res->write_failures, 0);
31269 + atomic_set_unchecked(&res->read_failures, 0);
31270 + atomic_set_unchecked(&res->write_failures, 0);
31271 rc = 0;
31272 }
31273 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
31274 @@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
31275
31276 /* If this was a SCSI read/write command keep count of errors */
31277 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
31278 - atomic_inc(&res->read_failures);
31279 + atomic_inc_unchecked(&res->read_failures);
31280 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
31281 - atomic_inc(&res->write_failures);
31282 + atomic_inc_unchecked(&res->write_failures);
31283
31284 if (!RES_IS_GSCSI(res->cfg_entry) &&
31285 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
31286 @@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
31287 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31288 * hrrq_id assigned here in queuecommand
31289 */
31290 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31291 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31292 pinstance->num_hrrq;
31293 cmd->cmd_done = pmcraid_io_done;
31294
31295 @@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
31296 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31297 * hrrq_id assigned here in queuecommand
31298 */
31299 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31300 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31301 pinstance->num_hrrq;
31302
31303 if (request_size) {
31304 @@ -4498,7 +4498,7 @@ static void pmcraid_worker_function(stru
31305
31306 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
31307 /* add resources only after host is added into system */
31308 - if (!atomic_read(&pinstance->expose_resources))
31309 + if (!atomic_read_unchecked(&pinstance->expose_resources))
31310 return;
31311
31312 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
31313 @@ -5332,8 +5332,8 @@ static int __devinit pmcraid_init_instan
31314 init_waitqueue_head(&pinstance->reset_wait_q);
31315
31316 atomic_set(&pinstance->outstanding_cmds, 0);
31317 - atomic_set(&pinstance->last_message_id, 0);
31318 - atomic_set(&pinstance->expose_resources, 0);
31319 + atomic_set_unchecked(&pinstance->last_message_id, 0);
31320 + atomic_set_unchecked(&pinstance->expose_resources, 0);
31321
31322 INIT_LIST_HEAD(&pinstance->free_res_q);
31323 INIT_LIST_HEAD(&pinstance->used_res_q);
31324 @@ -6048,7 +6048,7 @@ static int __devinit pmcraid_probe(
31325 /* Schedule worker thread to handle CCN and take care of adding and
31326 * removing devices to OS
31327 */
31328 - atomic_set(&pinstance->expose_resources, 1);
31329 + atomic_set_unchecked(&pinstance->expose_resources, 1);
31330 schedule_work(&pinstance->worker_q);
31331 return rc;
31332
31333 diff -urNp linux-3.0.3/drivers/scsi/pmcraid.h linux-3.0.3/drivers/scsi/pmcraid.h
31334 --- linux-3.0.3/drivers/scsi/pmcraid.h 2011-07-21 22:17:23.000000000 -0400
31335 +++ linux-3.0.3/drivers/scsi/pmcraid.h 2011-08-23 21:47:56.000000000 -0400
31336 @@ -749,7 +749,7 @@ struct pmcraid_instance {
31337 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
31338
31339 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
31340 - atomic_t last_message_id;
31341 + atomic_unchecked_t last_message_id;
31342
31343 /* configuration table */
31344 struct pmcraid_config_table *cfg_table;
31345 @@ -778,7 +778,7 @@ struct pmcraid_instance {
31346 atomic_t outstanding_cmds;
31347
31348 /* should add/delete resources to mid-layer now ?*/
31349 - atomic_t expose_resources;
31350 + atomic_unchecked_t expose_resources;
31351
31352
31353
31354 @@ -814,8 +814,8 @@ struct pmcraid_resource_entry {
31355 struct pmcraid_config_table_entry_ext cfg_entry_ext;
31356 };
31357 struct scsi_device *scsi_dev; /* Link scsi_device structure */
31358 - atomic_t read_failures; /* count of failed READ commands */
31359 - atomic_t write_failures; /* count of failed WRITE commands */
31360 + atomic_unchecked_t read_failures; /* count of failed READ commands */
31361 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
31362
31363 /* To indicate add/delete/modify during CCN */
31364 u8 change_detected;
31365 diff -urNp linux-3.0.3/drivers/scsi/qla2xxx/qla_def.h linux-3.0.3/drivers/scsi/qla2xxx/qla_def.h
31366 --- linux-3.0.3/drivers/scsi/qla2xxx/qla_def.h 2011-07-21 22:17:23.000000000 -0400
31367 +++ linux-3.0.3/drivers/scsi/qla2xxx/qla_def.h 2011-08-23 21:47:56.000000000 -0400
31368 @@ -2244,7 +2244,7 @@ struct isp_operations {
31369 int (*get_flash_version) (struct scsi_qla_host *, void *);
31370 int (*start_scsi) (srb_t *);
31371 int (*abort_isp) (struct scsi_qla_host *);
31372 -};
31373 +} __no_const;
31374
31375 /* MSI-X Support *************************************************************/
31376
31377 diff -urNp linux-3.0.3/drivers/scsi/qla4xxx/ql4_def.h linux-3.0.3/drivers/scsi/qla4xxx/ql4_def.h
31378 --- linux-3.0.3/drivers/scsi/qla4xxx/ql4_def.h 2011-07-21 22:17:23.000000000 -0400
31379 +++ linux-3.0.3/drivers/scsi/qla4xxx/ql4_def.h 2011-08-23 21:47:56.000000000 -0400
31380 @@ -256,7 +256,7 @@ struct ddb_entry {
31381 atomic_t retry_relogin_timer; /* Min Time between relogins
31382 * (4000 only) */
31383 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
31384 - atomic_t relogin_retry_count; /* Num of times relogin has been
31385 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
31386 * retried */
31387
31388 uint16_t port;
31389 diff -urNp linux-3.0.3/drivers/scsi/qla4xxx/ql4_init.c linux-3.0.3/drivers/scsi/qla4xxx/ql4_init.c
31390 --- linux-3.0.3/drivers/scsi/qla4xxx/ql4_init.c 2011-07-21 22:17:23.000000000 -0400
31391 +++ linux-3.0.3/drivers/scsi/qla4xxx/ql4_init.c 2011-08-23 21:47:56.000000000 -0400
31392 @@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
31393 ddb_entry->fw_ddb_index = fw_ddb_index;
31394 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
31395 atomic_set(&ddb_entry->relogin_timer, 0);
31396 - atomic_set(&ddb_entry->relogin_retry_count, 0);
31397 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31398 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31399 list_add_tail(&ddb_entry->list, &ha->ddb_list);
31400 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
31401 @@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
31402 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
31403 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
31404 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31405 - atomic_set(&ddb_entry->relogin_retry_count, 0);
31406 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31407 atomic_set(&ddb_entry->relogin_timer, 0);
31408 clear_bit(DF_RELOGIN, &ddb_entry->flags);
31409 iscsi_unblock_session(ddb_entry->sess);
31410 diff -urNp linux-3.0.3/drivers/scsi/qla4xxx/ql4_os.c linux-3.0.3/drivers/scsi/qla4xxx/ql4_os.c
31411 --- linux-3.0.3/drivers/scsi/qla4xxx/ql4_os.c 2011-07-21 22:17:23.000000000 -0400
31412 +++ linux-3.0.3/drivers/scsi/qla4xxx/ql4_os.c 2011-08-23 21:47:56.000000000 -0400
31413 @@ -811,13 +811,13 @@ static void qla4xxx_timer(struct scsi_ql
31414 ddb_entry->fw_ddb_device_state ==
31415 DDB_DS_SESSION_FAILED) {
31416 /* Reset retry relogin timer */
31417 - atomic_inc(&ddb_entry->relogin_retry_count);
31418 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
31419 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
31420 " timed out-retrying"
31421 " relogin (%d)\n",
31422 ha->host_no,
31423 ddb_entry->fw_ddb_index,
31424 - atomic_read(&ddb_entry->
31425 + atomic_read_unchecked(&ddb_entry->
31426 relogin_retry_count))
31427 );
31428 start_dpc++;
31429 diff -urNp linux-3.0.3/drivers/scsi/scsi.c linux-3.0.3/drivers/scsi/scsi.c
31430 --- linux-3.0.3/drivers/scsi/scsi.c 2011-07-21 22:17:23.000000000 -0400
31431 +++ linux-3.0.3/drivers/scsi/scsi.c 2011-08-23 21:47:56.000000000 -0400
31432 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
31433 unsigned long timeout;
31434 int rtn = 0;
31435
31436 - atomic_inc(&cmd->device->iorequest_cnt);
31437 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31438
31439 /* check if the device is still usable */
31440 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
31441 diff -urNp linux-3.0.3/drivers/scsi/scsi_debug.c linux-3.0.3/drivers/scsi/scsi_debug.c
31442 --- linux-3.0.3/drivers/scsi/scsi_debug.c 2011-07-21 22:17:23.000000000 -0400
31443 +++ linux-3.0.3/drivers/scsi/scsi_debug.c 2011-08-23 21:48:14.000000000 -0400
31444 @@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
31445 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
31446 unsigned char *cmd = (unsigned char *)scp->cmnd;
31447
31448 + pax_track_stack();
31449 +
31450 if ((errsts = check_readiness(scp, 1, devip)))
31451 return errsts;
31452 memset(arr, 0, sizeof(arr));
31453 @@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
31454 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
31455 unsigned char *cmd = (unsigned char *)scp->cmnd;
31456
31457 + pax_track_stack();
31458 +
31459 if ((errsts = check_readiness(scp, 1, devip)))
31460 return errsts;
31461 memset(arr, 0, sizeof(arr));
31462 diff -urNp linux-3.0.3/drivers/scsi/scsi_lib.c linux-3.0.3/drivers/scsi/scsi_lib.c
31463 --- linux-3.0.3/drivers/scsi/scsi_lib.c 2011-08-23 21:44:40.000000000 -0400
31464 +++ linux-3.0.3/drivers/scsi/scsi_lib.c 2011-08-23 21:47:56.000000000 -0400
31465 @@ -1412,7 +1412,7 @@ static void scsi_kill_request(struct req
31466 shost = sdev->host;
31467 scsi_init_cmd_errh(cmd);
31468 cmd->result = DID_NO_CONNECT << 16;
31469 - atomic_inc(&cmd->device->iorequest_cnt);
31470 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31471
31472 /*
31473 * SCSI request completion path will do scsi_device_unbusy(),
31474 @@ -1438,9 +1438,9 @@ static void scsi_softirq_done(struct req
31475
31476 INIT_LIST_HEAD(&cmd->eh_entry);
31477
31478 - atomic_inc(&cmd->device->iodone_cnt);
31479 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
31480 if (cmd->result)
31481 - atomic_inc(&cmd->device->ioerr_cnt);
31482 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
31483
31484 disposition = scsi_decide_disposition(cmd);
31485 if (disposition != SUCCESS &&
31486 diff -urNp linux-3.0.3/drivers/scsi/scsi_sysfs.c linux-3.0.3/drivers/scsi/scsi_sysfs.c
31487 --- linux-3.0.3/drivers/scsi/scsi_sysfs.c 2011-07-21 22:17:23.000000000 -0400
31488 +++ linux-3.0.3/drivers/scsi/scsi_sysfs.c 2011-08-23 21:47:56.000000000 -0400
31489 @@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
31490 char *buf) \
31491 { \
31492 struct scsi_device *sdev = to_scsi_device(dev); \
31493 - unsigned long long count = atomic_read(&sdev->field); \
31494 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
31495 return snprintf(buf, 20, "0x%llx\n", count); \
31496 } \
31497 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
31498 diff -urNp linux-3.0.3/drivers/scsi/scsi_transport_fc.c linux-3.0.3/drivers/scsi/scsi_transport_fc.c
31499 --- linux-3.0.3/drivers/scsi/scsi_transport_fc.c 2011-07-21 22:17:23.000000000 -0400
31500 +++ linux-3.0.3/drivers/scsi/scsi_transport_fc.c 2011-08-23 21:47:56.000000000 -0400
31501 @@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
31502 * Netlink Infrastructure
31503 */
31504
31505 -static atomic_t fc_event_seq;
31506 +static atomic_unchecked_t fc_event_seq;
31507
31508 /**
31509 * fc_get_event_number - Obtain the next sequential FC event number
31510 @@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
31511 u32
31512 fc_get_event_number(void)
31513 {
31514 - return atomic_add_return(1, &fc_event_seq);
31515 + return atomic_add_return_unchecked(1, &fc_event_seq);
31516 }
31517 EXPORT_SYMBOL(fc_get_event_number);
31518
31519 @@ -645,7 +645,7 @@ static __init int fc_transport_init(void
31520 {
31521 int error;
31522
31523 - atomic_set(&fc_event_seq, 0);
31524 + atomic_set_unchecked(&fc_event_seq, 0);
31525
31526 error = transport_class_register(&fc_host_class);
31527 if (error)
31528 @@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char
31529 char *cp;
31530
31531 *val = simple_strtoul(buf, &cp, 0);
31532 - if ((*cp && (*cp != '\n')) || (*val < 0))
31533 + if (*cp && (*cp != '\n'))
31534 return -EINVAL;
31535 /*
31536 * Check for overflow; dev_loss_tmo is u32
31537 diff -urNp linux-3.0.3/drivers/scsi/scsi_transport_iscsi.c linux-3.0.3/drivers/scsi/scsi_transport_iscsi.c
31538 --- linux-3.0.3/drivers/scsi/scsi_transport_iscsi.c 2011-07-21 22:17:23.000000000 -0400
31539 +++ linux-3.0.3/drivers/scsi/scsi_transport_iscsi.c 2011-08-23 21:47:56.000000000 -0400
31540 @@ -83,7 +83,7 @@ struct iscsi_internal {
31541 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
31542 };
31543
31544 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
31545 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
31546 static struct workqueue_struct *iscsi_eh_timer_workq;
31547
31548 /*
31549 @@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
31550 int err;
31551
31552 ihost = shost->shost_data;
31553 - session->sid = atomic_add_return(1, &iscsi_session_nr);
31554 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
31555
31556 if (id == ISCSI_MAX_TARGET) {
31557 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
31558 @@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
31559 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
31560 ISCSI_TRANSPORT_VERSION);
31561
31562 - atomic_set(&iscsi_session_nr, 0);
31563 + atomic_set_unchecked(&iscsi_session_nr, 0);
31564
31565 err = class_register(&iscsi_transport_class);
31566 if (err)
31567 diff -urNp linux-3.0.3/drivers/scsi/scsi_transport_srp.c linux-3.0.3/drivers/scsi/scsi_transport_srp.c
31568 --- linux-3.0.3/drivers/scsi/scsi_transport_srp.c 2011-07-21 22:17:23.000000000 -0400
31569 +++ linux-3.0.3/drivers/scsi/scsi_transport_srp.c 2011-08-23 21:47:56.000000000 -0400
31570 @@ -33,7 +33,7 @@
31571 #include "scsi_transport_srp_internal.h"
31572
31573 struct srp_host_attrs {
31574 - atomic_t next_port_id;
31575 + atomic_unchecked_t next_port_id;
31576 };
31577 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
31578
31579 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
31580 struct Scsi_Host *shost = dev_to_shost(dev);
31581 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
31582
31583 - atomic_set(&srp_host->next_port_id, 0);
31584 + atomic_set_unchecked(&srp_host->next_port_id, 0);
31585 return 0;
31586 }
31587
31588 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
31589 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
31590 rport->roles = ids->roles;
31591
31592 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
31593 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
31594 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
31595
31596 transport_setup_device(&rport->dev);
31597 diff -urNp linux-3.0.3/drivers/scsi/sg.c linux-3.0.3/drivers/scsi/sg.c
31598 --- linux-3.0.3/drivers/scsi/sg.c 2011-07-21 22:17:23.000000000 -0400
31599 +++ linux-3.0.3/drivers/scsi/sg.c 2011-08-23 21:47:56.000000000 -0400
31600 @@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
31601 const struct file_operations * fops;
31602 };
31603
31604 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
31605 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
31606 {"allow_dio", &adio_fops},
31607 {"debug", &debug_fops},
31608 {"def_reserved_size", &dressz_fops},
31609 @@ -2325,7 +2325,7 @@ sg_proc_init(void)
31610 {
31611 int k, mask;
31612 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
31613 - struct sg_proc_leaf * leaf;
31614 + const struct sg_proc_leaf * leaf;
31615
31616 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
31617 if (!sg_proc_sgp)
31618 diff -urNp linux-3.0.3/drivers/scsi/sym53c8xx_2/sym_glue.c linux-3.0.3/drivers/scsi/sym53c8xx_2/sym_glue.c
31619 --- linux-3.0.3/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-07-21 22:17:23.000000000 -0400
31620 +++ linux-3.0.3/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-08-23 21:48:14.000000000 -0400
31621 @@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
31622 int do_iounmap = 0;
31623 int do_disable_device = 1;
31624
31625 + pax_track_stack();
31626 +
31627 memset(&sym_dev, 0, sizeof(sym_dev));
31628 memset(&nvram, 0, sizeof(nvram));
31629 sym_dev.pdev = pdev;
31630 diff -urNp linux-3.0.3/drivers/scsi/vmw_pvscsi.c linux-3.0.3/drivers/scsi/vmw_pvscsi.c
31631 --- linux-3.0.3/drivers/scsi/vmw_pvscsi.c 2011-07-21 22:17:23.000000000 -0400
31632 +++ linux-3.0.3/drivers/scsi/vmw_pvscsi.c 2011-08-23 21:48:14.000000000 -0400
31633 @@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
31634 dma_addr_t base;
31635 unsigned i;
31636
31637 + pax_track_stack();
31638 +
31639 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
31640 cmd.reqRingNumPages = adapter->req_pages;
31641 cmd.cmpRingNumPages = adapter->cmp_pages;
31642 diff -urNp linux-3.0.3/drivers/spi/spi.c linux-3.0.3/drivers/spi/spi.c
31643 --- linux-3.0.3/drivers/spi/spi.c 2011-07-21 22:17:23.000000000 -0400
31644 +++ linux-3.0.3/drivers/spi/spi.c 2011-08-23 21:47:56.000000000 -0400
31645 @@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
31646 EXPORT_SYMBOL_GPL(spi_bus_unlock);
31647
31648 /* portable code must never pass more than 32 bytes */
31649 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
31650 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
31651
31652 static u8 *buf;
31653
31654 diff -urNp linux-3.0.3/drivers/staging/ath6kl/os/linux/ar6000_drv.c linux-3.0.3/drivers/staging/ath6kl/os/linux/ar6000_drv.c
31655 --- linux-3.0.3/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-23 21:44:40.000000000 -0400
31656 +++ linux-3.0.3/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-23 21:48:14.000000000 -0400
31657 @@ -362,7 +362,7 @@ static struct ar_cookie s_ar_cookie_mem[
31658 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
31659
31660
31661 -static struct net_device_ops ar6000_netdev_ops = {
31662 +static net_device_ops_no_const ar6000_netdev_ops = {
31663 .ndo_init = NULL,
31664 .ndo_open = ar6000_open,
31665 .ndo_stop = ar6000_close,
31666 diff -urNp linux-3.0.3/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h linux-3.0.3/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
31667 --- linux-3.0.3/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-07-21 22:17:23.000000000 -0400
31668 +++ linux-3.0.3/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-08-23 21:47:56.000000000 -0400
31669 @@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void
31670 typedef struct ar6k_pal_config_s
31671 {
31672 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
31673 -}ar6k_pal_config_t;
31674 +} __no_const ar6k_pal_config_t;
31675
31676 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
31677 #endif /* _AR6K_PAL_H_ */
31678 diff -urNp linux-3.0.3/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-3.0.3/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
31679 --- linux-3.0.3/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-07-21 22:17:23.000000000 -0400
31680 +++ linux-3.0.3/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-08-23 21:47:56.000000000 -0400
31681 @@ -853,14 +853,14 @@ static void dhd_op_if(dhd_if_t *ifp)
31682 free_netdev(ifp->net);
31683 }
31684 /* Allocate etherdev, including space for private structure */
31685 - ifp->net = alloc_etherdev(sizeof(dhd));
31686 + ifp->net = alloc_etherdev(sizeof(*dhd));
31687 if (!ifp->net) {
31688 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
31689 ret = -ENOMEM;
31690 }
31691 if (ret == 0) {
31692 strcpy(ifp->net->name, ifp->name);
31693 - memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
31694 + memcpy(netdev_priv(ifp->net), dhd, sizeof(*dhd));
31695 err = dhd_net_attach(&dhd->pub, ifp->idx);
31696 if (err != 0) {
31697 DHD_ERROR(("%s: dhd_net_attach failed, "
31698 @@ -1872,7 +1872,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
31699 strcpy(nv_path, nvram_path);
31700
31701 /* Allocate etherdev, including space for private structure */
31702 - net = alloc_etherdev(sizeof(dhd));
31703 + net = alloc_etherdev(sizeof(*dhd));
31704 if (!net) {
31705 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
31706 goto fail;
31707 @@ -1888,7 +1888,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
31708 /*
31709 * Save the dhd_info into the priv
31710 */
31711 - memcpy(netdev_priv(net), &dhd, sizeof(dhd));
31712 + memcpy(netdev_priv(net), dhd, sizeof(*dhd));
31713
31714 /* Set network interface name if it was provided as module parameter */
31715 if (iface_name[0]) {
31716 @@ -2004,7 +2004,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
31717 /*
31718 * Save the dhd_info into the priv
31719 */
31720 - memcpy(netdev_priv(net), &dhd, sizeof(dhd));
31721 + memcpy(netdev_priv(net), dhd, sizeof(*dhd));
31722
31723 #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
31724 g_bus = bus;
31725 diff -urNp linux-3.0.3/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h linux-3.0.3/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h
31726 --- linux-3.0.3/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-07-21 22:17:23.000000000 -0400
31727 +++ linux-3.0.3/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-08-23 21:47:56.000000000 -0400
31728 @@ -593,7 +593,7 @@ struct phy_func_ptr {
31729 initfn_t carrsuppr;
31730 rxsigpwrfn_t rxsigpwr;
31731 detachfn_t detach;
31732 -};
31733 +} __no_const;
31734 typedef struct phy_func_ptr phy_func_ptr_t;
31735
31736 struct phy_info {
31737 diff -urNp linux-3.0.3/drivers/staging/brcm80211/include/bcmsdh.h linux-3.0.3/drivers/staging/brcm80211/include/bcmsdh.h
31738 --- linux-3.0.3/drivers/staging/brcm80211/include/bcmsdh.h 2011-07-21 22:17:23.000000000 -0400
31739 +++ linux-3.0.3/drivers/staging/brcm80211/include/bcmsdh.h 2011-08-23 21:47:56.000000000 -0400
31740 @@ -185,7 +185,7 @@ typedef struct {
31741 u16 func, uint bustype, void *regsva, void *param);
31742 /* detach from device */
31743 void (*detach) (void *ch);
31744 -} bcmsdh_driver_t;
31745 +} __no_const bcmsdh_driver_t;
31746
31747 /* platform specific/high level functions */
31748 extern int bcmsdh_register(bcmsdh_driver_t *driver);
31749 diff -urNp linux-3.0.3/drivers/staging/et131x/et1310_tx.c linux-3.0.3/drivers/staging/et131x/et1310_tx.c
31750 --- linux-3.0.3/drivers/staging/et131x/et1310_tx.c 2011-07-21 22:17:23.000000000 -0400
31751 +++ linux-3.0.3/drivers/staging/et131x/et1310_tx.c 2011-08-23 21:47:56.000000000 -0400
31752 @@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
31753 struct net_device_stats *stats = &etdev->net_stats;
31754
31755 if (tcb->flags & fMP_DEST_BROAD)
31756 - atomic_inc(&etdev->Stats.brdcstxmt);
31757 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
31758 else if (tcb->flags & fMP_DEST_MULTI)
31759 - atomic_inc(&etdev->Stats.multixmt);
31760 + atomic_inc_unchecked(&etdev->Stats.multixmt);
31761 else
31762 - atomic_inc(&etdev->Stats.unixmt);
31763 + atomic_inc_unchecked(&etdev->Stats.unixmt);
31764
31765 if (tcb->skb) {
31766 stats->tx_bytes += tcb->skb->len;
31767 diff -urNp linux-3.0.3/drivers/staging/et131x/et131x_adapter.h linux-3.0.3/drivers/staging/et131x/et131x_adapter.h
31768 --- linux-3.0.3/drivers/staging/et131x/et131x_adapter.h 2011-07-21 22:17:23.000000000 -0400
31769 +++ linux-3.0.3/drivers/staging/et131x/et131x_adapter.h 2011-08-23 21:47:56.000000000 -0400
31770 @@ -110,11 +110,11 @@ typedef struct _ce_stats_t {
31771 * operations
31772 */
31773 u32 unircv; /* # multicast packets received */
31774 - atomic_t unixmt; /* # multicast packets for Tx */
31775 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
31776 u32 multircv; /* # multicast packets received */
31777 - atomic_t multixmt; /* # multicast packets for Tx */
31778 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
31779 u32 brdcstrcv; /* # broadcast packets received */
31780 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
31781 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
31782 u32 norcvbuf; /* # Rx packets discarded */
31783 u32 noxmtbuf; /* # Tx packets discarded */
31784
31785 diff -urNp linux-3.0.3/drivers/staging/hv/channel.c linux-3.0.3/drivers/staging/hv/channel.c
31786 --- linux-3.0.3/drivers/staging/hv/channel.c 2011-08-23 21:44:40.000000000 -0400
31787 +++ linux-3.0.3/drivers/staging/hv/channel.c 2011-08-23 21:47:56.000000000 -0400
31788 @@ -433,8 +433,8 @@ int vmbus_establish_gpadl(struct vmbus_c
31789 int ret = 0;
31790 int t;
31791
31792 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
31793 - atomic_inc(&vmbus_connection.next_gpadl_handle);
31794 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
31795 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
31796
31797 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
31798 if (ret)
31799 diff -urNp linux-3.0.3/drivers/staging/hv/hv.c linux-3.0.3/drivers/staging/hv/hv.c
31800 --- linux-3.0.3/drivers/staging/hv/hv.c 2011-07-21 22:17:23.000000000 -0400
31801 +++ linux-3.0.3/drivers/staging/hv/hv.c 2011-08-23 21:47:56.000000000 -0400
31802 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, voi
31803 u64 output_address = (output) ? virt_to_phys(output) : 0;
31804 u32 output_address_hi = output_address >> 32;
31805 u32 output_address_lo = output_address & 0xFFFFFFFF;
31806 - volatile void *hypercall_page = hv_context.hypercall_page;
31807 + volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
31808
31809 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
31810 "=a"(hv_status_lo) : "d" (control_hi),
31811 diff -urNp linux-3.0.3/drivers/staging/hv/hv_mouse.c linux-3.0.3/drivers/staging/hv/hv_mouse.c
31812 --- linux-3.0.3/drivers/staging/hv/hv_mouse.c 2011-07-21 22:17:23.000000000 -0400
31813 +++ linux-3.0.3/drivers/staging/hv/hv_mouse.c 2011-08-23 21:47:56.000000000 -0400
31814 @@ -879,8 +879,10 @@ static void reportdesc_callback(struct h
31815 if (hid_dev) {
31816 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
31817
31818 - hid_dev->ll_driver->open = mousevsc_hid_open;
31819 - hid_dev->ll_driver->close = mousevsc_hid_close;
31820 + pax_open_kernel();
31821 + *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
31822 + *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
31823 + pax_close_kernel();
31824
31825 hid_dev->bus = BUS_VIRTUAL;
31826 hid_dev->vendor = input_device_ctx->device_info.vendor;
31827 diff -urNp linux-3.0.3/drivers/staging/hv/hyperv_vmbus.h linux-3.0.3/drivers/staging/hv/hyperv_vmbus.h
31828 --- linux-3.0.3/drivers/staging/hv/hyperv_vmbus.h 2011-07-21 22:17:23.000000000 -0400
31829 +++ linux-3.0.3/drivers/staging/hv/hyperv_vmbus.h 2011-08-23 21:47:56.000000000 -0400
31830 @@ -559,7 +559,7 @@ enum vmbus_connect_state {
31831 struct vmbus_connection {
31832 enum vmbus_connect_state conn_state;
31833
31834 - atomic_t next_gpadl_handle;
31835 + atomic_unchecked_t next_gpadl_handle;
31836
31837 /*
31838 * Represents channel interrupts. Each bit position represents a
31839 diff -urNp linux-3.0.3/drivers/staging/hv/rndis_filter.c linux-3.0.3/drivers/staging/hv/rndis_filter.c
31840 --- linux-3.0.3/drivers/staging/hv/rndis_filter.c 2011-08-23 21:44:40.000000000 -0400
31841 +++ linux-3.0.3/drivers/staging/hv/rndis_filter.c 2011-08-23 21:47:56.000000000 -0400
31842 @@ -43,7 +43,7 @@ struct rndis_device {
31843
31844 enum rndis_device_state state;
31845 u32 link_stat;
31846 - atomic_t new_req_id;
31847 + atomic_unchecked_t new_req_id;
31848
31849 spinlock_t request_lock;
31850 struct list_head req_list;
31851 @@ -117,7 +117,7 @@ static struct rndis_request *get_rndis_r
31852 * template
31853 */
31854 set = &rndis_msg->msg.set_req;
31855 - set->req_id = atomic_inc_return(&dev->new_req_id);
31856 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
31857
31858 /* Add to the request list */
31859 spin_lock_irqsave(&dev->request_lock, flags);
31860 @@ -637,7 +637,7 @@ static void rndis_filter_halt_device(str
31861
31862 /* Setup the rndis set */
31863 halt = &request->request_msg.msg.halt_req;
31864 - halt->req_id = atomic_inc_return(&dev->new_req_id);
31865 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
31866
31867 /* Ignore return since this msg is optional. */
31868 rndis_filter_send_request(dev, request);
31869 diff -urNp linux-3.0.3/drivers/staging/hv/vmbus_drv.c linux-3.0.3/drivers/staging/hv/vmbus_drv.c
31870 --- linux-3.0.3/drivers/staging/hv/vmbus_drv.c 2011-07-21 22:17:23.000000000 -0400
31871 +++ linux-3.0.3/drivers/staging/hv/vmbus_drv.c 2011-08-23 21:47:56.000000000 -0400
31872 @@ -668,11 +668,11 @@ int vmbus_child_device_register(struct h
31873 {
31874 int ret = 0;
31875
31876 - static atomic_t device_num = ATOMIC_INIT(0);
31877 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
31878
31879 /* Set the device name. Otherwise, device_register() will fail. */
31880 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
31881 - atomic_inc_return(&device_num));
31882 + atomic_inc_return_unchecked(&device_num));
31883
31884 /* The new device belongs to this bus */
31885 child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
31886 diff -urNp linux-3.0.3/drivers/staging/iio/ring_generic.h linux-3.0.3/drivers/staging/iio/ring_generic.h
31887 --- linux-3.0.3/drivers/staging/iio/ring_generic.h 2011-07-21 22:17:23.000000000 -0400
31888 +++ linux-3.0.3/drivers/staging/iio/ring_generic.h 2011-08-23 21:47:56.000000000 -0400
31889 @@ -62,7 +62,7 @@ struct iio_ring_access_funcs {
31890
31891 int (*is_enabled)(struct iio_ring_buffer *ring);
31892 int (*enable)(struct iio_ring_buffer *ring);
31893 -};
31894 +} __no_const;
31895
31896 struct iio_ring_setup_ops {
31897 int (*preenable)(struct iio_dev *);
31898 diff -urNp linux-3.0.3/drivers/staging/octeon/ethernet.c linux-3.0.3/drivers/staging/octeon/ethernet.c
31899 --- linux-3.0.3/drivers/staging/octeon/ethernet.c 2011-07-21 22:17:23.000000000 -0400
31900 +++ linux-3.0.3/drivers/staging/octeon/ethernet.c 2011-08-23 21:47:56.000000000 -0400
31901 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
31902 * since the RX tasklet also increments it.
31903 */
31904 #ifdef CONFIG_64BIT
31905 - atomic64_add(rx_status.dropped_packets,
31906 - (atomic64_t *)&priv->stats.rx_dropped);
31907 + atomic64_add_unchecked(rx_status.dropped_packets,
31908 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
31909 #else
31910 - atomic_add(rx_status.dropped_packets,
31911 - (atomic_t *)&priv->stats.rx_dropped);
31912 + atomic_add_unchecked(rx_status.dropped_packets,
31913 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
31914 #endif
31915 }
31916
31917 diff -urNp linux-3.0.3/drivers/staging/octeon/ethernet-rx.c linux-3.0.3/drivers/staging/octeon/ethernet-rx.c
31918 --- linux-3.0.3/drivers/staging/octeon/ethernet-rx.c 2011-07-21 22:17:23.000000000 -0400
31919 +++ linux-3.0.3/drivers/staging/octeon/ethernet-rx.c 2011-08-23 21:47:56.000000000 -0400
31920 @@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi
31921 /* Increment RX stats for virtual ports */
31922 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
31923 #ifdef CONFIG_64BIT
31924 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
31925 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
31926 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
31927 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
31928 #else
31929 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
31930 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
31931 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
31932 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
31933 #endif
31934 }
31935 netif_receive_skb(skb);
31936 @@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi
31937 dev->name);
31938 */
31939 #ifdef CONFIG_64BIT
31940 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
31941 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
31942 #else
31943 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
31944 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
31945 #endif
31946 dev_kfree_skb_irq(skb);
31947 }
31948 diff -urNp linux-3.0.3/drivers/staging/pohmelfs/inode.c linux-3.0.3/drivers/staging/pohmelfs/inode.c
31949 --- linux-3.0.3/drivers/staging/pohmelfs/inode.c 2011-07-21 22:17:23.000000000 -0400
31950 +++ linux-3.0.3/drivers/staging/pohmelfs/inode.c 2011-08-23 21:47:56.000000000 -0400
31951 @@ -1856,7 +1856,7 @@ static int pohmelfs_fill_super(struct su
31952 mutex_init(&psb->mcache_lock);
31953 psb->mcache_root = RB_ROOT;
31954 psb->mcache_timeout = msecs_to_jiffies(5000);
31955 - atomic_long_set(&psb->mcache_gen, 0);
31956 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
31957
31958 psb->trans_max_pages = 100;
31959
31960 @@ -1871,7 +1871,7 @@ static int pohmelfs_fill_super(struct su
31961 INIT_LIST_HEAD(&psb->crypto_ready_list);
31962 INIT_LIST_HEAD(&psb->crypto_active_list);
31963
31964 - atomic_set(&psb->trans_gen, 1);
31965 + atomic_set_unchecked(&psb->trans_gen, 1);
31966 atomic_long_set(&psb->total_inodes, 0);
31967
31968 mutex_init(&psb->state_lock);
31969 diff -urNp linux-3.0.3/drivers/staging/pohmelfs/mcache.c linux-3.0.3/drivers/staging/pohmelfs/mcache.c
31970 --- linux-3.0.3/drivers/staging/pohmelfs/mcache.c 2011-07-21 22:17:23.000000000 -0400
31971 +++ linux-3.0.3/drivers/staging/pohmelfs/mcache.c 2011-08-23 21:47:56.000000000 -0400
31972 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
31973 m->data = data;
31974 m->start = start;
31975 m->size = size;
31976 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
31977 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
31978
31979 mutex_lock(&psb->mcache_lock);
31980 err = pohmelfs_mcache_insert(psb, m);
31981 diff -urNp linux-3.0.3/drivers/staging/pohmelfs/netfs.h linux-3.0.3/drivers/staging/pohmelfs/netfs.h
31982 --- linux-3.0.3/drivers/staging/pohmelfs/netfs.h 2011-07-21 22:17:23.000000000 -0400
31983 +++ linux-3.0.3/drivers/staging/pohmelfs/netfs.h 2011-08-23 21:47:56.000000000 -0400
31984 @@ -571,14 +571,14 @@ struct pohmelfs_config;
31985 struct pohmelfs_sb {
31986 struct rb_root mcache_root;
31987 struct mutex mcache_lock;
31988 - atomic_long_t mcache_gen;
31989 + atomic_long_unchecked_t mcache_gen;
31990 unsigned long mcache_timeout;
31991
31992 unsigned int idx;
31993
31994 unsigned int trans_retries;
31995
31996 - atomic_t trans_gen;
31997 + atomic_unchecked_t trans_gen;
31998
31999 unsigned int crypto_attached_size;
32000 unsigned int crypto_align_size;
32001 diff -urNp linux-3.0.3/drivers/staging/pohmelfs/trans.c linux-3.0.3/drivers/staging/pohmelfs/trans.c
32002 --- linux-3.0.3/drivers/staging/pohmelfs/trans.c 2011-07-21 22:17:23.000000000 -0400
32003 +++ linux-3.0.3/drivers/staging/pohmelfs/trans.c 2011-08-23 21:47:56.000000000 -0400
32004 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
32005 int err;
32006 struct netfs_cmd *cmd = t->iovec.iov_base;
32007
32008 - t->gen = atomic_inc_return(&psb->trans_gen);
32009 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
32010
32011 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
32012 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
32013 diff -urNp linux-3.0.3/drivers/staging/rtl8712/rtl871x_io.h linux-3.0.3/drivers/staging/rtl8712/rtl871x_io.h
32014 --- linux-3.0.3/drivers/staging/rtl8712/rtl871x_io.h 2011-07-21 22:17:23.000000000 -0400
32015 +++ linux-3.0.3/drivers/staging/rtl8712/rtl871x_io.h 2011-08-23 21:47:56.000000000 -0400
32016 @@ -83,7 +83,7 @@ struct _io_ops {
32017 u8 *pmem);
32018 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
32019 u8 *pmem);
32020 -};
32021 +} __no_const;
32022
32023 struct io_req {
32024 struct list_head list;
32025 diff -urNp linux-3.0.3/drivers/staging/sbe-2t3e3/netdev.c linux-3.0.3/drivers/staging/sbe-2t3e3/netdev.c
32026 --- linux-3.0.3/drivers/staging/sbe-2t3e3/netdev.c 2011-07-21 22:17:23.000000000 -0400
32027 +++ linux-3.0.3/drivers/staging/sbe-2t3e3/netdev.c 2011-08-23 21:48:14.000000000 -0400
32028 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, s
32029 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
32030
32031 if (rlen)
32032 - if (copy_to_user(data, &resp, rlen))
32033 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
32034 return -EFAULT;
32035
32036 return 0;
32037 diff -urNp linux-3.0.3/drivers/staging/tty/stallion.c linux-3.0.3/drivers/staging/tty/stallion.c
32038 --- linux-3.0.3/drivers/staging/tty/stallion.c 2011-07-21 22:17:23.000000000 -0400
32039 +++ linux-3.0.3/drivers/staging/tty/stallion.c 2011-08-23 21:48:14.000000000 -0400
32040 @@ -2406,6 +2406,8 @@ static int stl_getportstruct(struct stlp
32041 struct stlport stl_dummyport;
32042 struct stlport *portp;
32043
32044 + pax_track_stack();
32045 +
32046 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
32047 return -EFAULT;
32048 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
32049 diff -urNp linux-3.0.3/drivers/staging/usbip/usbip_common.h linux-3.0.3/drivers/staging/usbip/usbip_common.h
32050 --- linux-3.0.3/drivers/staging/usbip/usbip_common.h 2011-07-21 22:17:23.000000000 -0400
32051 +++ linux-3.0.3/drivers/staging/usbip/usbip_common.h 2011-08-23 21:47:56.000000000 -0400
32052 @@ -315,7 +315,7 @@ struct usbip_device {
32053 void (*shutdown)(struct usbip_device *);
32054 void (*reset)(struct usbip_device *);
32055 void (*unusable)(struct usbip_device *);
32056 - } eh_ops;
32057 + } __no_const eh_ops;
32058 };
32059
32060 void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd,
32061 diff -urNp linux-3.0.3/drivers/staging/usbip/vhci.h linux-3.0.3/drivers/staging/usbip/vhci.h
32062 --- linux-3.0.3/drivers/staging/usbip/vhci.h 2011-07-21 22:17:23.000000000 -0400
32063 +++ linux-3.0.3/drivers/staging/usbip/vhci.h 2011-08-23 21:47:56.000000000 -0400
32064 @@ -94,7 +94,7 @@ struct vhci_hcd {
32065 unsigned resuming:1;
32066 unsigned long re_timeout;
32067
32068 - atomic_t seqnum;
32069 + atomic_unchecked_t seqnum;
32070
32071 /*
32072 * NOTE:
32073 diff -urNp linux-3.0.3/drivers/staging/usbip/vhci_hcd.c linux-3.0.3/drivers/staging/usbip/vhci_hcd.c
32074 --- linux-3.0.3/drivers/staging/usbip/vhci_hcd.c 2011-08-23 21:44:40.000000000 -0400
32075 +++ linux-3.0.3/drivers/staging/usbip/vhci_hcd.c 2011-08-23 21:47:56.000000000 -0400
32076 @@ -511,7 +511,7 @@ static void vhci_tx_urb(struct urb *urb)
32077 return;
32078 }
32079
32080 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
32081 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32082 if (priv->seqnum == 0xffff)
32083 dev_info(&urb->dev->dev, "seqnum max\n");
32084
32085 @@ -765,7 +765,7 @@ static int vhci_urb_dequeue(struct usb_h
32086 return -ENOMEM;
32087 }
32088
32089 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
32090 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32091 if (unlink->seqnum == 0xffff)
32092 pr_info("seqnum max\n");
32093
32094 @@ -955,7 +955,7 @@ static int vhci_start(struct usb_hcd *hc
32095 vdev->rhport = rhport;
32096 }
32097
32098 - atomic_set(&vhci->seqnum, 0);
32099 + atomic_set_unchecked(&vhci->seqnum, 0);
32100 spin_lock_init(&vhci->lock);
32101
32102 hcd->power_budget = 0; /* no limit */
32103 diff -urNp linux-3.0.3/drivers/staging/usbip/vhci_rx.c linux-3.0.3/drivers/staging/usbip/vhci_rx.c
32104 --- linux-3.0.3/drivers/staging/usbip/vhci_rx.c 2011-07-21 22:17:23.000000000 -0400
32105 +++ linux-3.0.3/drivers/staging/usbip/vhci_rx.c 2011-08-23 21:47:56.000000000 -0400
32106 @@ -76,7 +76,7 @@ static void vhci_recv_ret_submit(struct
32107 if (!urb) {
32108 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
32109 pr_info("max seqnum %d\n",
32110 - atomic_read(&the_controller->seqnum));
32111 + atomic_read_unchecked(&the_controller->seqnum));
32112 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
32113 return;
32114 }
32115 diff -urNp linux-3.0.3/drivers/staging/vt6655/hostap.c linux-3.0.3/drivers/staging/vt6655/hostap.c
32116 --- linux-3.0.3/drivers/staging/vt6655/hostap.c 2011-07-21 22:17:23.000000000 -0400
32117 +++ linux-3.0.3/drivers/staging/vt6655/hostap.c 2011-08-23 21:47:56.000000000 -0400
32118 @@ -79,14 +79,13 @@ static int msglevel
32119 *
32120 */
32121
32122 +static net_device_ops_no_const apdev_netdev_ops;
32123 +
32124 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
32125 {
32126 PSDevice apdev_priv;
32127 struct net_device *dev = pDevice->dev;
32128 int ret;
32129 - const struct net_device_ops apdev_netdev_ops = {
32130 - .ndo_start_xmit = pDevice->tx_80211,
32131 - };
32132
32133 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
32134
32135 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevic
32136 *apdev_priv = *pDevice;
32137 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
32138
32139 + /* only half broken now */
32140 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
32141 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
32142
32143 pDevice->apdev->type = ARPHRD_IEEE80211;
32144 diff -urNp linux-3.0.3/drivers/staging/vt6656/hostap.c linux-3.0.3/drivers/staging/vt6656/hostap.c
32145 --- linux-3.0.3/drivers/staging/vt6656/hostap.c 2011-07-21 22:17:23.000000000 -0400
32146 +++ linux-3.0.3/drivers/staging/vt6656/hostap.c 2011-08-23 21:47:56.000000000 -0400
32147 @@ -80,14 +80,13 @@ static int msglevel
32148 *
32149 */
32150
32151 +static net_device_ops_no_const apdev_netdev_ops;
32152 +
32153 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
32154 {
32155 PSDevice apdev_priv;
32156 struct net_device *dev = pDevice->dev;
32157 int ret;
32158 - const struct net_device_ops apdev_netdev_ops = {
32159 - .ndo_start_xmit = pDevice->tx_80211,
32160 - };
32161
32162 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
32163
32164 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevic
32165 *apdev_priv = *pDevice;
32166 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
32167
32168 + /* only half broken now */
32169 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
32170 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
32171
32172 pDevice->apdev->type = ARPHRD_IEEE80211;
32173 diff -urNp linux-3.0.3/drivers/staging/wlan-ng/hfa384x_usb.c linux-3.0.3/drivers/staging/wlan-ng/hfa384x_usb.c
32174 --- linux-3.0.3/drivers/staging/wlan-ng/hfa384x_usb.c 2011-07-21 22:17:23.000000000 -0400
32175 +++ linux-3.0.3/drivers/staging/wlan-ng/hfa384x_usb.c 2011-08-23 21:47:56.000000000 -0400
32176 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
32177
32178 struct usbctlx_completor {
32179 int (*complete) (struct usbctlx_completor *);
32180 -};
32181 +} __no_const;
32182
32183 static int
32184 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
32185 diff -urNp linux-3.0.3/drivers/staging/zcache/tmem.c linux-3.0.3/drivers/staging/zcache/tmem.c
32186 --- linux-3.0.3/drivers/staging/zcache/tmem.c 2011-07-21 22:17:23.000000000 -0400
32187 +++ linux-3.0.3/drivers/staging/zcache/tmem.c 2011-08-23 21:47:56.000000000 -0400
32188 @@ -39,7 +39,7 @@
32189 * A tmem host implementation must use this function to register callbacks
32190 * for memory allocation.
32191 */
32192 -static struct tmem_hostops tmem_hostops;
32193 +static tmem_hostops_no_const tmem_hostops;
32194
32195 static void tmem_objnode_tree_init(void);
32196
32197 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_h
32198 * A tmem host implementation must use this function to register
32199 * callbacks for a page-accessible memory (PAM) implementation
32200 */
32201 -static struct tmem_pamops tmem_pamops;
32202 +static tmem_pamops_no_const tmem_pamops;
32203
32204 void tmem_register_pamops(struct tmem_pamops *m)
32205 {
32206 diff -urNp linux-3.0.3/drivers/staging/zcache/tmem.h linux-3.0.3/drivers/staging/zcache/tmem.h
32207 --- linux-3.0.3/drivers/staging/zcache/tmem.h 2011-07-21 22:17:23.000000000 -0400
32208 +++ linux-3.0.3/drivers/staging/zcache/tmem.h 2011-08-23 21:47:56.000000000 -0400
32209 @@ -171,6 +171,7 @@ struct tmem_pamops {
32210 int (*get_data)(struct page *, void *, struct tmem_pool *);
32211 void (*free)(void *, struct tmem_pool *);
32212 };
32213 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
32214 extern void tmem_register_pamops(struct tmem_pamops *m);
32215
32216 /* memory allocation methods provided by the host implementation */
32217 @@ -180,6 +181,7 @@ struct tmem_hostops {
32218 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
32219 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
32220 };
32221 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
32222 extern void tmem_register_hostops(struct tmem_hostops *m);
32223
32224 /* core tmem accessor functions */
32225 diff -urNp linux-3.0.3/drivers/target/target_core_alua.c linux-3.0.3/drivers/target/target_core_alua.c
32226 --- linux-3.0.3/drivers/target/target_core_alua.c 2011-07-21 22:17:23.000000000 -0400
32227 +++ linux-3.0.3/drivers/target/target_core_alua.c 2011-08-23 21:48:14.000000000 -0400
32228 @@ -675,6 +675,8 @@ static int core_alua_update_tpg_primary_
32229 char path[ALUA_METADATA_PATH_LEN];
32230 int len;
32231
32232 + pax_track_stack();
32233 +
32234 memset(path, 0, ALUA_METADATA_PATH_LEN);
32235
32236 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
32237 @@ -938,6 +940,8 @@ static int core_alua_update_tpg_secondar
32238 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
32239 int len;
32240
32241 + pax_track_stack();
32242 +
32243 memset(path, 0, ALUA_METADATA_PATH_LEN);
32244 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
32245
32246 diff -urNp linux-3.0.3/drivers/target/target_core_cdb.c linux-3.0.3/drivers/target/target_core_cdb.c
32247 --- linux-3.0.3/drivers/target/target_core_cdb.c 2011-07-21 22:17:23.000000000 -0400
32248 +++ linux-3.0.3/drivers/target/target_core_cdb.c 2011-08-23 21:48:14.000000000 -0400
32249 @@ -838,6 +838,8 @@ target_emulate_modesense(struct se_cmd *
32250 int length = 0;
32251 unsigned char buf[SE_MODE_PAGE_BUF];
32252
32253 + pax_track_stack();
32254 +
32255 memset(buf, 0, SE_MODE_PAGE_BUF);
32256
32257 switch (cdb[2] & 0x3f) {
32258 diff -urNp linux-3.0.3/drivers/target/target_core_configfs.c linux-3.0.3/drivers/target/target_core_configfs.c
32259 --- linux-3.0.3/drivers/target/target_core_configfs.c 2011-07-21 22:17:23.000000000 -0400
32260 +++ linux-3.0.3/drivers/target/target_core_configfs.c 2011-08-23 21:48:14.000000000 -0400
32261 @@ -1276,6 +1276,8 @@ static ssize_t target_core_dev_pr_show_a
32262 ssize_t len = 0;
32263 int reg_count = 0, prf_isid;
32264
32265 + pax_track_stack();
32266 +
32267 if (!(su_dev->se_dev_ptr))
32268 return -ENODEV;
32269
32270 diff -urNp linux-3.0.3/drivers/target/target_core_pr.c linux-3.0.3/drivers/target/target_core_pr.c
32271 --- linux-3.0.3/drivers/target/target_core_pr.c 2011-07-21 22:17:23.000000000 -0400
32272 +++ linux-3.0.3/drivers/target/target_core_pr.c 2011-08-23 21:48:14.000000000 -0400
32273 @@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
32274 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
32275 u16 tpgt;
32276
32277 + pax_track_stack();
32278 +
32279 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
32280 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
32281 /*
32282 @@ -1861,6 +1863,8 @@ static int __core_scsi3_update_aptpl_buf
32283 ssize_t len = 0;
32284 int reg_count = 0;
32285
32286 + pax_track_stack();
32287 +
32288 memset(buf, 0, pr_aptpl_buf_len);
32289 /*
32290 * Called to clear metadata once APTPL has been deactivated.
32291 @@ -1983,6 +1987,8 @@ static int __core_scsi3_write_aptpl_to_f
32292 char path[512];
32293 int ret;
32294
32295 + pax_track_stack();
32296 +
32297 memset(iov, 0, sizeof(struct iovec));
32298 memset(path, 0, 512);
32299
32300 diff -urNp linux-3.0.3/drivers/target/target_core_tmr.c linux-3.0.3/drivers/target/target_core_tmr.c
32301 --- linux-3.0.3/drivers/target/target_core_tmr.c 2011-07-21 22:17:23.000000000 -0400
32302 +++ linux-3.0.3/drivers/target/target_core_tmr.c 2011-08-23 21:47:56.000000000 -0400
32303 @@ -269,7 +269,7 @@ int core_tmr_lun_reset(
32304 CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
32305 T_TASK(cmd)->t_task_cdbs,
32306 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32307 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32308 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32309 atomic_read(&T_TASK(cmd)->t_transport_active),
32310 atomic_read(&T_TASK(cmd)->t_transport_stop),
32311 atomic_read(&T_TASK(cmd)->t_transport_sent));
32312 @@ -311,7 +311,7 @@ int core_tmr_lun_reset(
32313 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
32314 " task: %p, t_fe_count: %d dev: %p\n", task,
32315 fe_count, dev);
32316 - atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32317 + atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32318 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
32319 flags);
32320 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32321 @@ -321,7 +321,7 @@ int core_tmr_lun_reset(
32322 }
32323 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
32324 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
32325 - atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32326 + atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32327 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
32328 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32329
32330 diff -urNp linux-3.0.3/drivers/target/target_core_transport.c linux-3.0.3/drivers/target/target_core_transport.c
32331 --- linux-3.0.3/drivers/target/target_core_transport.c 2011-07-21 22:17:23.000000000 -0400
32332 +++ linux-3.0.3/drivers/target/target_core_transport.c 2011-08-23 21:47:56.000000000 -0400
32333 @@ -1681,7 +1681,7 @@ struct se_device *transport_add_device_t
32334
32335 dev->queue_depth = dev_limits->queue_depth;
32336 atomic_set(&dev->depth_left, dev->queue_depth);
32337 - atomic_set(&dev->dev_ordered_id, 0);
32338 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
32339
32340 se_dev_set_default_attribs(dev, dev_limits);
32341
32342 @@ -1882,7 +1882,7 @@ static int transport_check_alloc_task_at
32343 * Used to determine when ORDERED commands should go from
32344 * Dormant to Active status.
32345 */
32346 - cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
32347 + cmd->se_ordered_id = atomic_inc_return_unchecked(&SE_DEV(cmd)->dev_ordered_id);
32348 smp_mb__after_atomic_inc();
32349 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
32350 cmd->se_ordered_id, cmd->sam_task_attr,
32351 @@ -2169,7 +2169,7 @@ static void transport_generic_request_fa
32352 " t_transport_active: %d t_transport_stop: %d"
32353 " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
32354 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32355 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32356 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32357 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
32358 atomic_read(&T_TASK(cmd)->t_transport_active),
32359 atomic_read(&T_TASK(cmd)->t_transport_stop),
32360 @@ -2673,9 +2673,9 @@ check_depth:
32361 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
32362 atomic_set(&task->task_active, 1);
32363 atomic_set(&task->task_sent, 1);
32364 - atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
32365 + atomic_inc_unchecked(&T_TASK(cmd)->t_task_cdbs_sent);
32366
32367 - if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
32368 + if (atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent) ==
32369 T_TASK(cmd)->t_task_cdbs)
32370 atomic_set(&cmd->transport_sent, 1);
32371
32372 @@ -5568,7 +5568,7 @@ static void transport_generic_wait_for_t
32373 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
32374 }
32375 if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
32376 - atomic_read(&T_TASK(cmd)->t_transport_aborted))
32377 + atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted))
32378 goto remove;
32379
32380 atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
32381 @@ -5797,7 +5797,7 @@ int transport_check_aborted_status(struc
32382 {
32383 int ret = 0;
32384
32385 - if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
32386 + if (atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted) != 0) {
32387 if (!(send_status) ||
32388 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
32389 return 1;
32390 @@ -5825,7 +5825,7 @@ void transport_send_task_abort(struct se
32391 */
32392 if (cmd->data_direction == DMA_TO_DEVICE) {
32393 if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
32394 - atomic_inc(&T_TASK(cmd)->t_transport_aborted);
32395 + atomic_inc_unchecked(&T_TASK(cmd)->t_transport_aborted);
32396 smp_mb__after_atomic_inc();
32397 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
32398 transport_new_cmd_failure(cmd);
32399 @@ -5949,7 +5949,7 @@ static void transport_processing_shutdow
32400 CMD_TFO(cmd)->get_task_tag(cmd),
32401 T_TASK(cmd)->t_task_cdbs,
32402 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32403 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32404 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32405 atomic_read(&T_TASK(cmd)->t_transport_active),
32406 atomic_read(&T_TASK(cmd)->t_transport_stop),
32407 atomic_read(&T_TASK(cmd)->t_transport_sent));
32408 diff -urNp linux-3.0.3/drivers/telephony/ixj.c linux-3.0.3/drivers/telephony/ixj.c
32409 --- linux-3.0.3/drivers/telephony/ixj.c 2011-07-21 22:17:23.000000000 -0400
32410 +++ linux-3.0.3/drivers/telephony/ixj.c 2011-08-23 21:48:14.000000000 -0400
32411 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
32412 bool mContinue;
32413 char *pIn, *pOut;
32414
32415 + pax_track_stack();
32416 +
32417 if (!SCI_Prepare(j))
32418 return 0;
32419
32420 diff -urNp linux-3.0.3/drivers/tty/hvc/hvcs.c linux-3.0.3/drivers/tty/hvc/hvcs.c
32421 --- linux-3.0.3/drivers/tty/hvc/hvcs.c 2011-07-21 22:17:23.000000000 -0400
32422 +++ linux-3.0.3/drivers/tty/hvc/hvcs.c 2011-08-23 21:47:56.000000000 -0400
32423 @@ -83,6 +83,7 @@
32424 #include <asm/hvcserver.h>
32425 #include <asm/uaccess.h>
32426 #include <asm/vio.h>
32427 +#include <asm/local.h>
32428
32429 /*
32430 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
32431 @@ -270,7 +271,7 @@ struct hvcs_struct {
32432 unsigned int index;
32433
32434 struct tty_struct *tty;
32435 - int open_count;
32436 + local_t open_count;
32437
32438 /*
32439 * Used to tell the driver kernel_thread what operations need to take
32440 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
32441
32442 spin_lock_irqsave(&hvcsd->lock, flags);
32443
32444 - if (hvcsd->open_count > 0) {
32445 + if (local_read(&hvcsd->open_count) > 0) {
32446 spin_unlock_irqrestore(&hvcsd->lock, flags);
32447 printk(KERN_INFO "HVCS: vterm state unchanged. "
32448 "The hvcs device node is still in use.\n");
32449 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
32450 if ((retval = hvcs_partner_connect(hvcsd)))
32451 goto error_release;
32452
32453 - hvcsd->open_count = 1;
32454 + local_set(&hvcsd->open_count, 1);
32455 hvcsd->tty = tty;
32456 tty->driver_data = hvcsd;
32457
32458 @@ -1179,7 +1180,7 @@ fast_open:
32459
32460 spin_lock_irqsave(&hvcsd->lock, flags);
32461 kref_get(&hvcsd->kref);
32462 - hvcsd->open_count++;
32463 + local_inc(&hvcsd->open_count);
32464 hvcsd->todo_mask |= HVCS_SCHED_READ;
32465 spin_unlock_irqrestore(&hvcsd->lock, flags);
32466
32467 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
32468 hvcsd = tty->driver_data;
32469
32470 spin_lock_irqsave(&hvcsd->lock, flags);
32471 - if (--hvcsd->open_count == 0) {
32472 + if (local_dec_and_test(&hvcsd->open_count)) {
32473
32474 vio_disable_interrupts(hvcsd->vdev);
32475
32476 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
32477 free_irq(irq, hvcsd);
32478 kref_put(&hvcsd->kref, destroy_hvcs_struct);
32479 return;
32480 - } else if (hvcsd->open_count < 0) {
32481 + } else if (local_read(&hvcsd->open_count) < 0) {
32482 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
32483 " is missmanaged.\n",
32484 - hvcsd->vdev->unit_address, hvcsd->open_count);
32485 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
32486 }
32487
32488 spin_unlock_irqrestore(&hvcsd->lock, flags);
32489 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
32490
32491 spin_lock_irqsave(&hvcsd->lock, flags);
32492 /* Preserve this so that we know how many kref refs to put */
32493 - temp_open_count = hvcsd->open_count;
32494 + temp_open_count = local_read(&hvcsd->open_count);
32495
32496 /*
32497 * Don't kref put inside the spinlock because the destruction
32498 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
32499 hvcsd->tty->driver_data = NULL;
32500 hvcsd->tty = NULL;
32501
32502 - hvcsd->open_count = 0;
32503 + local_set(&hvcsd->open_count, 0);
32504
32505 /* This will drop any buffered data on the floor which is OK in a hangup
32506 * scenario. */
32507 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
32508 * the middle of a write operation? This is a crummy place to do this
32509 * but we want to keep it all in the spinlock.
32510 */
32511 - if (hvcsd->open_count <= 0) {
32512 + if (local_read(&hvcsd->open_count) <= 0) {
32513 spin_unlock_irqrestore(&hvcsd->lock, flags);
32514 return -ENODEV;
32515 }
32516 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
32517 {
32518 struct hvcs_struct *hvcsd = tty->driver_data;
32519
32520 - if (!hvcsd || hvcsd->open_count <= 0)
32521 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
32522 return 0;
32523
32524 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
32525 diff -urNp linux-3.0.3/drivers/tty/ipwireless/tty.c linux-3.0.3/drivers/tty/ipwireless/tty.c
32526 --- linux-3.0.3/drivers/tty/ipwireless/tty.c 2011-07-21 22:17:23.000000000 -0400
32527 +++ linux-3.0.3/drivers/tty/ipwireless/tty.c 2011-08-23 21:47:56.000000000 -0400
32528 @@ -29,6 +29,7 @@
32529 #include <linux/tty_driver.h>
32530 #include <linux/tty_flip.h>
32531 #include <linux/uaccess.h>
32532 +#include <asm/local.h>
32533
32534 #include "tty.h"
32535 #include "network.h"
32536 @@ -51,7 +52,7 @@ struct ipw_tty {
32537 int tty_type;
32538 struct ipw_network *network;
32539 struct tty_struct *linux_tty;
32540 - int open_count;
32541 + local_t open_count;
32542 unsigned int control_lines;
32543 struct mutex ipw_tty_mutex;
32544 int tx_bytes_queued;
32545 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
32546 mutex_unlock(&tty->ipw_tty_mutex);
32547 return -ENODEV;
32548 }
32549 - if (tty->open_count == 0)
32550 + if (local_read(&tty->open_count) == 0)
32551 tty->tx_bytes_queued = 0;
32552
32553 - tty->open_count++;
32554 + local_inc(&tty->open_count);
32555
32556 tty->linux_tty = linux_tty;
32557 linux_tty->driver_data = tty;
32558 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
32559
32560 static void do_ipw_close(struct ipw_tty *tty)
32561 {
32562 - tty->open_count--;
32563 -
32564 - if (tty->open_count == 0) {
32565 + if (local_dec_return(&tty->open_count) == 0) {
32566 struct tty_struct *linux_tty = tty->linux_tty;
32567
32568 if (linux_tty != NULL) {
32569 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
32570 return;
32571
32572 mutex_lock(&tty->ipw_tty_mutex);
32573 - if (tty->open_count == 0) {
32574 + if (local_read(&tty->open_count) == 0) {
32575 mutex_unlock(&tty->ipw_tty_mutex);
32576 return;
32577 }
32578 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
32579 return;
32580 }
32581
32582 - if (!tty->open_count) {
32583 + if (!local_read(&tty->open_count)) {
32584 mutex_unlock(&tty->ipw_tty_mutex);
32585 return;
32586 }
32587 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
32588 return -ENODEV;
32589
32590 mutex_lock(&tty->ipw_tty_mutex);
32591 - if (!tty->open_count) {
32592 + if (!local_read(&tty->open_count)) {
32593 mutex_unlock(&tty->ipw_tty_mutex);
32594 return -EINVAL;
32595 }
32596 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
32597 if (!tty)
32598 return -ENODEV;
32599
32600 - if (!tty->open_count)
32601 + if (!local_read(&tty->open_count))
32602 return -EINVAL;
32603
32604 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
32605 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
32606 if (!tty)
32607 return 0;
32608
32609 - if (!tty->open_count)
32610 + if (!local_read(&tty->open_count))
32611 return 0;
32612
32613 return tty->tx_bytes_queued;
32614 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
32615 if (!tty)
32616 return -ENODEV;
32617
32618 - if (!tty->open_count)
32619 + if (!local_read(&tty->open_count))
32620 return -EINVAL;
32621
32622 return get_control_lines(tty);
32623 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
32624 if (!tty)
32625 return -ENODEV;
32626
32627 - if (!tty->open_count)
32628 + if (!local_read(&tty->open_count))
32629 return -EINVAL;
32630
32631 return set_control_lines(tty, set, clear);
32632 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
32633 if (!tty)
32634 return -ENODEV;
32635
32636 - if (!tty->open_count)
32637 + if (!local_read(&tty->open_count))
32638 return -EINVAL;
32639
32640 /* FIXME: Exactly how is the tty object locked here .. */
32641 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
32642 against a parallel ioctl etc */
32643 mutex_lock(&ttyj->ipw_tty_mutex);
32644 }
32645 - while (ttyj->open_count)
32646 + while (local_read(&ttyj->open_count))
32647 do_ipw_close(ttyj);
32648 ipwireless_disassociate_network_ttys(network,
32649 ttyj->channel_idx);
32650 diff -urNp linux-3.0.3/drivers/tty/n_gsm.c linux-3.0.3/drivers/tty/n_gsm.c
32651 --- linux-3.0.3/drivers/tty/n_gsm.c 2011-08-23 21:44:40.000000000 -0400
32652 +++ linux-3.0.3/drivers/tty/n_gsm.c 2011-08-23 21:47:56.000000000 -0400
32653 @@ -1589,7 +1589,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
32654 return NULL;
32655 spin_lock_init(&dlci->lock);
32656 dlci->fifo = &dlci->_fifo;
32657 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
32658 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
32659 kfree(dlci);
32660 return NULL;
32661 }
32662 diff -urNp linux-3.0.3/drivers/tty/n_tty.c linux-3.0.3/drivers/tty/n_tty.c
32663 --- linux-3.0.3/drivers/tty/n_tty.c 2011-07-21 22:17:23.000000000 -0400
32664 +++ linux-3.0.3/drivers/tty/n_tty.c 2011-08-23 21:47:56.000000000 -0400
32665 @@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
32666 {
32667 *ops = tty_ldisc_N_TTY;
32668 ops->owner = NULL;
32669 - ops->refcount = ops->flags = 0;
32670 + atomic_set(&ops->refcount, 0);
32671 + ops->flags = 0;
32672 }
32673 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
32674 diff -urNp linux-3.0.3/drivers/tty/pty.c linux-3.0.3/drivers/tty/pty.c
32675 --- linux-3.0.3/drivers/tty/pty.c 2011-07-21 22:17:23.000000000 -0400
32676 +++ linux-3.0.3/drivers/tty/pty.c 2011-08-23 21:47:56.000000000 -0400
32677 @@ -754,8 +754,10 @@ static void __init unix98_pty_init(void)
32678 register_sysctl_table(pty_root_table);
32679
32680 /* Now create the /dev/ptmx special device */
32681 + pax_open_kernel();
32682 tty_default_fops(&ptmx_fops);
32683 - ptmx_fops.open = ptmx_open;
32684 + *(void **)&ptmx_fops.open = ptmx_open;
32685 + pax_close_kernel();
32686
32687 cdev_init(&ptmx_cdev, &ptmx_fops);
32688 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
32689 diff -urNp linux-3.0.3/drivers/tty/rocket.c linux-3.0.3/drivers/tty/rocket.c
32690 --- linux-3.0.3/drivers/tty/rocket.c 2011-07-21 22:17:23.000000000 -0400
32691 +++ linux-3.0.3/drivers/tty/rocket.c 2011-08-23 21:48:14.000000000 -0400
32692 @@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
32693 struct rocket_ports tmp;
32694 int board;
32695
32696 + pax_track_stack();
32697 +
32698 if (!retports)
32699 return -EFAULT;
32700 memset(&tmp, 0, sizeof (tmp));
32701 diff -urNp linux-3.0.3/drivers/tty/serial/kgdboc.c linux-3.0.3/drivers/tty/serial/kgdboc.c
32702 --- linux-3.0.3/drivers/tty/serial/kgdboc.c 2011-07-21 22:17:23.000000000 -0400
32703 +++ linux-3.0.3/drivers/tty/serial/kgdboc.c 2011-08-23 21:47:56.000000000 -0400
32704 @@ -23,8 +23,9 @@
32705 #define MAX_CONFIG_LEN 40
32706
32707 static struct kgdb_io kgdboc_io_ops;
32708 +static struct kgdb_io kgdboc_io_ops_console;
32709
32710 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
32711 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
32712 static int configured = -1;
32713
32714 static char config[MAX_CONFIG_LEN];
32715 @@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
32716 kgdboc_unregister_kbd();
32717 if (configured == 1)
32718 kgdb_unregister_io_module(&kgdboc_io_ops);
32719 + else if (configured == 2)
32720 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
32721 }
32722
32723 static int configure_kgdboc(void)
32724 @@ -156,13 +159,13 @@ static int configure_kgdboc(void)
32725 int err;
32726 char *cptr = config;
32727 struct console *cons;
32728 + int is_console = 0;
32729
32730 err = kgdboc_option_setup(config);
32731 if (err || !strlen(config) || isspace(config[0]))
32732 goto noconfig;
32733
32734 err = -ENODEV;
32735 - kgdboc_io_ops.is_console = 0;
32736 kgdb_tty_driver = NULL;
32737
32738 kgdboc_use_kms = 0;
32739 @@ -183,7 +186,7 @@ static int configure_kgdboc(void)
32740 int idx;
32741 if (cons->device && cons->device(cons, &idx) == p &&
32742 idx == tty_line) {
32743 - kgdboc_io_ops.is_console = 1;
32744 + is_console = 1;
32745 break;
32746 }
32747 cons = cons->next;
32748 @@ -193,12 +196,16 @@ static int configure_kgdboc(void)
32749 kgdb_tty_line = tty_line;
32750
32751 do_register:
32752 - err = kgdb_register_io_module(&kgdboc_io_ops);
32753 + if (is_console) {
32754 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
32755 + configured = 2;
32756 + } else {
32757 + err = kgdb_register_io_module(&kgdboc_io_ops);
32758 + configured = 1;
32759 + }
32760 if (err)
32761 goto noconfig;
32762
32763 - configured = 1;
32764 -
32765 return 0;
32766
32767 noconfig:
32768 @@ -212,7 +219,7 @@ noconfig:
32769 static int __init init_kgdboc(void)
32770 {
32771 /* Already configured? */
32772 - if (configured == 1)
32773 + if (configured >= 1)
32774 return 0;
32775
32776 return configure_kgdboc();
32777 @@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
32778 if (config[len - 1] == '\n')
32779 config[len - 1] = '\0';
32780
32781 - if (configured == 1)
32782 + if (configured >= 1)
32783 cleanup_kgdboc();
32784
32785 /* Go and configure with the new params. */
32786 @@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
32787 .post_exception = kgdboc_post_exp_handler,
32788 };
32789
32790 +static struct kgdb_io kgdboc_io_ops_console = {
32791 + .name = "kgdboc",
32792 + .read_char = kgdboc_get_char,
32793 + .write_char = kgdboc_put_char,
32794 + .pre_exception = kgdboc_pre_exp_handler,
32795 + .post_exception = kgdboc_post_exp_handler,
32796 + .is_console = 1
32797 +};
32798 +
32799 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
32800 /* This is only available if kgdboc is a built in for early debugging */
32801 static int __init kgdboc_early_init(char *opt)
32802 diff -urNp linux-3.0.3/drivers/tty/serial/mrst_max3110.c linux-3.0.3/drivers/tty/serial/mrst_max3110.c
32803 --- linux-3.0.3/drivers/tty/serial/mrst_max3110.c 2011-07-21 22:17:23.000000000 -0400
32804 +++ linux-3.0.3/drivers/tty/serial/mrst_max3110.c 2011-08-23 21:48:14.000000000 -0400
32805 @@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
32806 int loop = 1, num, total = 0;
32807 u8 recv_buf[512], *pbuf;
32808
32809 + pax_track_stack();
32810 +
32811 pbuf = recv_buf;
32812 do {
32813 num = max3110_read_multi(max, pbuf);
32814 diff -urNp linux-3.0.3/drivers/tty/tty_io.c linux-3.0.3/drivers/tty/tty_io.c
32815 --- linux-3.0.3/drivers/tty/tty_io.c 2011-07-21 22:17:23.000000000 -0400
32816 +++ linux-3.0.3/drivers/tty/tty_io.c 2011-08-23 21:47:56.000000000 -0400
32817 @@ -3215,7 +3215,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
32818
32819 void tty_default_fops(struct file_operations *fops)
32820 {
32821 - *fops = tty_fops;
32822 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
32823 }
32824
32825 /*
32826 diff -urNp linux-3.0.3/drivers/tty/tty_ldisc.c linux-3.0.3/drivers/tty/tty_ldisc.c
32827 --- linux-3.0.3/drivers/tty/tty_ldisc.c 2011-07-21 22:17:23.000000000 -0400
32828 +++ linux-3.0.3/drivers/tty/tty_ldisc.c 2011-08-23 21:47:56.000000000 -0400
32829 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
32830 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
32831 struct tty_ldisc_ops *ldo = ld->ops;
32832
32833 - ldo->refcount--;
32834 + atomic_dec(&ldo->refcount);
32835 module_put(ldo->owner);
32836 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
32837
32838 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
32839 spin_lock_irqsave(&tty_ldisc_lock, flags);
32840 tty_ldiscs[disc] = new_ldisc;
32841 new_ldisc->num = disc;
32842 - new_ldisc->refcount = 0;
32843 + atomic_set(&new_ldisc->refcount, 0);
32844 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
32845
32846 return ret;
32847 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
32848 return -EINVAL;
32849
32850 spin_lock_irqsave(&tty_ldisc_lock, flags);
32851 - if (tty_ldiscs[disc]->refcount)
32852 + if (atomic_read(&tty_ldiscs[disc]->refcount))
32853 ret = -EBUSY;
32854 else
32855 tty_ldiscs[disc] = NULL;
32856 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
32857 if (ldops) {
32858 ret = ERR_PTR(-EAGAIN);
32859 if (try_module_get(ldops->owner)) {
32860 - ldops->refcount++;
32861 + atomic_inc(&ldops->refcount);
32862 ret = ldops;
32863 }
32864 }
32865 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
32866 unsigned long flags;
32867
32868 spin_lock_irqsave(&tty_ldisc_lock, flags);
32869 - ldops->refcount--;
32870 + atomic_dec(&ldops->refcount);
32871 module_put(ldops->owner);
32872 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
32873 }
32874 diff -urNp linux-3.0.3/drivers/tty/vt/keyboard.c linux-3.0.3/drivers/tty/vt/keyboard.c
32875 --- linux-3.0.3/drivers/tty/vt/keyboard.c 2011-07-21 22:17:23.000000000 -0400
32876 +++ linux-3.0.3/drivers/tty/vt/keyboard.c 2011-08-23 21:48:14.000000000 -0400
32877 @@ -656,6 +656,16 @@ static void k_spec(struct vc_data *vc, u
32878 kbd->kbdmode == VC_OFF) &&
32879 value != KVAL(K_SAK))
32880 return; /* SAK is allowed even in raw mode */
32881 +
32882 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
32883 + {
32884 + void *func = fn_handler[value];
32885 + if (func == fn_show_state || func == fn_show_ptregs ||
32886 + func == fn_show_mem)
32887 + return;
32888 + }
32889 +#endif
32890 +
32891 fn_handler[value](vc);
32892 }
32893
32894 diff -urNp linux-3.0.3/drivers/tty/vt/vt.c linux-3.0.3/drivers/tty/vt/vt.c
32895 --- linux-3.0.3/drivers/tty/vt/vt.c 2011-07-21 22:17:23.000000000 -0400
32896 +++ linux-3.0.3/drivers/tty/vt/vt.c 2011-08-23 21:47:56.000000000 -0400
32897 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
32898
32899 static void notify_write(struct vc_data *vc, unsigned int unicode)
32900 {
32901 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
32902 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
32903 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
32904 }
32905
32906 diff -urNp linux-3.0.3/drivers/tty/vt/vt_ioctl.c linux-3.0.3/drivers/tty/vt/vt_ioctl.c
32907 --- linux-3.0.3/drivers/tty/vt/vt_ioctl.c 2011-07-21 22:17:23.000000000 -0400
32908 +++ linux-3.0.3/drivers/tty/vt/vt_ioctl.c 2011-08-23 21:48:14.000000000 -0400
32909 @@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
32910 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
32911 return -EFAULT;
32912
32913 - if (!capable(CAP_SYS_TTY_CONFIG))
32914 - perm = 0;
32915 -
32916 switch (cmd) {
32917 case KDGKBENT:
32918 key_map = key_maps[s];
32919 @@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
32920 val = (i ? K_HOLE : K_NOSUCHMAP);
32921 return put_user(val, &user_kbe->kb_value);
32922 case KDSKBENT:
32923 + if (!capable(CAP_SYS_TTY_CONFIG))
32924 + perm = 0;
32925 +
32926 if (!perm)
32927 return -EPERM;
32928 if (!i && v == K_NOSUCHMAP) {
32929 @@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
32930 int i, j, k;
32931 int ret;
32932
32933 - if (!capable(CAP_SYS_TTY_CONFIG))
32934 - perm = 0;
32935 -
32936 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
32937 if (!kbs) {
32938 ret = -ENOMEM;
32939 @@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
32940 kfree(kbs);
32941 return ((p && *p) ? -EOVERFLOW : 0);
32942 case KDSKBSENT:
32943 + if (!capable(CAP_SYS_TTY_CONFIG))
32944 + perm = 0;
32945 +
32946 if (!perm) {
32947 ret = -EPERM;
32948 goto reterr;
32949 diff -urNp linux-3.0.3/drivers/uio/uio.c linux-3.0.3/drivers/uio/uio.c
32950 --- linux-3.0.3/drivers/uio/uio.c 2011-07-21 22:17:23.000000000 -0400
32951 +++ linux-3.0.3/drivers/uio/uio.c 2011-08-23 21:47:56.000000000 -0400
32952 @@ -25,6 +25,7 @@
32953 #include <linux/kobject.h>
32954 #include <linux/cdev.h>
32955 #include <linux/uio_driver.h>
32956 +#include <asm/local.h>
32957
32958 #define UIO_MAX_DEVICES (1U << MINORBITS)
32959
32960 @@ -32,10 +33,10 @@ struct uio_device {
32961 struct module *owner;
32962 struct device *dev;
32963 int minor;
32964 - atomic_t event;
32965 + atomic_unchecked_t event;
32966 struct fasync_struct *async_queue;
32967 wait_queue_head_t wait;
32968 - int vma_count;
32969 + local_t vma_count;
32970 struct uio_info *info;
32971 struct kobject *map_dir;
32972 struct kobject *portio_dir;
32973 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device
32974 struct device_attribute *attr, char *buf)
32975 {
32976 struct uio_device *idev = dev_get_drvdata(dev);
32977 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
32978 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
32979 }
32980
32981 static struct device_attribute uio_class_attributes[] = {
32982 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *i
32983 {
32984 struct uio_device *idev = info->uio_dev;
32985
32986 - atomic_inc(&idev->event);
32987 + atomic_inc_unchecked(&idev->event);
32988 wake_up_interruptible(&idev->wait);
32989 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
32990 }
32991 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode,
32992 }
32993
32994 listener->dev = idev;
32995 - listener->event_count = atomic_read(&idev->event);
32996 + listener->event_count = atomic_read_unchecked(&idev->event);
32997 filep->private_data = listener;
32998
32999 if (idev->info->open) {
33000 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file
33001 return -EIO;
33002
33003 poll_wait(filep, &idev->wait, wait);
33004 - if (listener->event_count != atomic_read(&idev->event))
33005 + if (listener->event_count != atomic_read_unchecked(&idev->event))
33006 return POLLIN | POLLRDNORM;
33007 return 0;
33008 }
33009 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *fil
33010 do {
33011 set_current_state(TASK_INTERRUPTIBLE);
33012
33013 - event_count = atomic_read(&idev->event);
33014 + event_count = atomic_read_unchecked(&idev->event);
33015 if (event_count != listener->event_count) {
33016 if (copy_to_user(buf, &event_count, count))
33017 retval = -EFAULT;
33018 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_
33019 static void uio_vma_open(struct vm_area_struct *vma)
33020 {
33021 struct uio_device *idev = vma->vm_private_data;
33022 - idev->vma_count++;
33023 + local_inc(&idev->vma_count);
33024 }
33025
33026 static void uio_vma_close(struct vm_area_struct *vma)
33027 {
33028 struct uio_device *idev = vma->vm_private_data;
33029 - idev->vma_count--;
33030 + local_dec(&idev->vma_count);
33031 }
33032
33033 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33034 @@ -823,7 +824,7 @@ int __uio_register_device(struct module
33035 idev->owner = owner;
33036 idev->info = info;
33037 init_waitqueue_head(&idev->wait);
33038 - atomic_set(&idev->event, 0);
33039 + atomic_set_unchecked(&idev->event, 0);
33040
33041 ret = uio_get_minor(idev);
33042 if (ret)
33043 diff -urNp linux-3.0.3/drivers/usb/atm/cxacru.c linux-3.0.3/drivers/usb/atm/cxacru.c
33044 --- linux-3.0.3/drivers/usb/atm/cxacru.c 2011-07-21 22:17:23.000000000 -0400
33045 +++ linux-3.0.3/drivers/usb/atm/cxacru.c 2011-08-23 21:47:56.000000000 -0400
33046 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
33047 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
33048 if (ret < 2)
33049 return -EINVAL;
33050 - if (index < 0 || index > 0x7f)
33051 + if (index > 0x7f)
33052 return -EINVAL;
33053 pos += tmp;
33054
33055 diff -urNp linux-3.0.3/drivers/usb/atm/usbatm.c linux-3.0.3/drivers/usb/atm/usbatm.c
33056 --- linux-3.0.3/drivers/usb/atm/usbatm.c 2011-07-21 22:17:23.000000000 -0400
33057 +++ linux-3.0.3/drivers/usb/atm/usbatm.c 2011-08-23 21:47:56.000000000 -0400
33058 @@ -332,7 +332,7 @@ static void usbatm_extract_one_cell(stru
33059 if (printk_ratelimit())
33060 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
33061 __func__, vpi, vci);
33062 - atomic_inc(&vcc->stats->rx_err);
33063 + atomic_inc_unchecked(&vcc->stats->rx_err);
33064 return;
33065 }
33066
33067 @@ -360,7 +360,7 @@ static void usbatm_extract_one_cell(stru
33068 if (length > ATM_MAX_AAL5_PDU) {
33069 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
33070 __func__, length, vcc);
33071 - atomic_inc(&vcc->stats->rx_err);
33072 + atomic_inc_unchecked(&vcc->stats->rx_err);
33073 goto out;
33074 }
33075
33076 @@ -369,14 +369,14 @@ static void usbatm_extract_one_cell(stru
33077 if (sarb->len < pdu_length) {
33078 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
33079 __func__, pdu_length, sarb->len, vcc);
33080 - atomic_inc(&vcc->stats->rx_err);
33081 + atomic_inc_unchecked(&vcc->stats->rx_err);
33082 goto out;
33083 }
33084
33085 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
33086 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
33087 __func__, vcc);
33088 - atomic_inc(&vcc->stats->rx_err);
33089 + atomic_inc_unchecked(&vcc->stats->rx_err);
33090 goto out;
33091 }
33092
33093 @@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(stru
33094 if (printk_ratelimit())
33095 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
33096 __func__, length);
33097 - atomic_inc(&vcc->stats->rx_drop);
33098 + atomic_inc_unchecked(&vcc->stats->rx_drop);
33099 goto out;
33100 }
33101
33102 @@ -411,7 +411,7 @@ static void usbatm_extract_one_cell(stru
33103
33104 vcc->push(vcc, skb);
33105
33106 - atomic_inc(&vcc->stats->rx);
33107 + atomic_inc_unchecked(&vcc->stats->rx);
33108 out:
33109 skb_trim(sarb, 0);
33110 }
33111 @@ -614,7 +614,7 @@ static void usbatm_tx_process(unsigned l
33112 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
33113
33114 usbatm_pop(vcc, skb);
33115 - atomic_inc(&vcc->stats->tx);
33116 + atomic_inc_unchecked(&vcc->stats->tx);
33117
33118 skb = skb_dequeue(&instance->sndqueue);
33119 }
33120 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
33121 if (!left--)
33122 return sprintf(page,
33123 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
33124 - atomic_read(&atm_dev->stats.aal5.tx),
33125 - atomic_read(&atm_dev->stats.aal5.tx_err),
33126 - atomic_read(&atm_dev->stats.aal5.rx),
33127 - atomic_read(&atm_dev->stats.aal5.rx_err),
33128 - atomic_read(&atm_dev->stats.aal5.rx_drop));
33129 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
33130 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
33131 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
33132 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
33133 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
33134
33135 if (!left--) {
33136 if (instance->disconnected)
33137 diff -urNp linux-3.0.3/drivers/usb/core/devices.c linux-3.0.3/drivers/usb/core/devices.c
33138 --- linux-3.0.3/drivers/usb/core/devices.c 2011-07-21 22:17:23.000000000 -0400
33139 +++ linux-3.0.3/drivers/usb/core/devices.c 2011-08-23 21:47:56.000000000 -0400
33140 @@ -126,7 +126,7 @@ static const char format_endpt[] =
33141 * time it gets called.
33142 */
33143 static struct device_connect_event {
33144 - atomic_t count;
33145 + atomic_unchecked_t count;
33146 wait_queue_head_t wait;
33147 } device_event = {
33148 .count = ATOMIC_INIT(1),
33149 @@ -164,7 +164,7 @@ static const struct class_info clas_info
33150
33151 void usbfs_conn_disc_event(void)
33152 {
33153 - atomic_add(2, &device_event.count);
33154 + atomic_add_unchecked(2, &device_event.count);
33155 wake_up(&device_event.wait);
33156 }
33157
33158 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
33159
33160 poll_wait(file, &device_event.wait, wait);
33161
33162 - event_count = atomic_read(&device_event.count);
33163 + event_count = atomic_read_unchecked(&device_event.count);
33164 if (file->f_version != event_count) {
33165 file->f_version = event_count;
33166 return POLLIN | POLLRDNORM;
33167 diff -urNp linux-3.0.3/drivers/usb/core/message.c linux-3.0.3/drivers/usb/core/message.c
33168 --- linux-3.0.3/drivers/usb/core/message.c 2011-07-21 22:17:23.000000000 -0400
33169 +++ linux-3.0.3/drivers/usb/core/message.c 2011-08-23 21:47:56.000000000 -0400
33170 @@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
33171 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
33172 if (buf) {
33173 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
33174 - if (len > 0) {
33175 - smallbuf = kmalloc(++len, GFP_NOIO);
33176 + if (len++ > 0) {
33177 + smallbuf = kmalloc(len, GFP_NOIO);
33178 if (!smallbuf)
33179 return buf;
33180 memcpy(smallbuf, buf, len);
33181 diff -urNp linux-3.0.3/drivers/usb/early/ehci-dbgp.c linux-3.0.3/drivers/usb/early/ehci-dbgp.c
33182 --- linux-3.0.3/drivers/usb/early/ehci-dbgp.c 2011-07-21 22:17:23.000000000 -0400
33183 +++ linux-3.0.3/drivers/usb/early/ehci-dbgp.c 2011-08-23 21:47:56.000000000 -0400
33184 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
33185
33186 #ifdef CONFIG_KGDB
33187 static struct kgdb_io kgdbdbgp_io_ops;
33188 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
33189 +static struct kgdb_io kgdbdbgp_io_ops_console;
33190 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
33191 #else
33192 #define dbgp_kgdb_mode (0)
33193 #endif
33194 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
33195 .write_char = kgdbdbgp_write_char,
33196 };
33197
33198 +static struct kgdb_io kgdbdbgp_io_ops_console = {
33199 + .name = "kgdbdbgp",
33200 + .read_char = kgdbdbgp_read_char,
33201 + .write_char = kgdbdbgp_write_char,
33202 + .is_console = 1
33203 +};
33204 +
33205 static int kgdbdbgp_wait_time;
33206
33207 static int __init kgdbdbgp_parse_config(char *str)
33208 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(
33209 ptr++;
33210 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
33211 }
33212 - kgdb_register_io_module(&kgdbdbgp_io_ops);
33213 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
33214 + if (early_dbgp_console.index != -1)
33215 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
33216 + else
33217 + kgdb_register_io_module(&kgdbdbgp_io_ops);
33218
33219 return 0;
33220 }
33221 diff -urNp linux-3.0.3/drivers/usb/host/xhci-mem.c linux-3.0.3/drivers/usb/host/xhci-mem.c
33222 --- linux-3.0.3/drivers/usb/host/xhci-mem.c 2011-07-21 22:17:23.000000000 -0400
33223 +++ linux-3.0.3/drivers/usb/host/xhci-mem.c 2011-08-23 21:48:14.000000000 -0400
33224 @@ -1685,6 +1685,8 @@ static int xhci_check_trb_in_td_math(str
33225 unsigned int num_tests;
33226 int i, ret;
33227
33228 + pax_track_stack();
33229 +
33230 num_tests = ARRAY_SIZE(simple_test_vector);
33231 for (i = 0; i < num_tests; i++) {
33232 ret = xhci_test_trb_in_td(xhci,
33233 diff -urNp linux-3.0.3/drivers/usb/wusbcore/wa-hc.h linux-3.0.3/drivers/usb/wusbcore/wa-hc.h
33234 --- linux-3.0.3/drivers/usb/wusbcore/wa-hc.h 2011-07-21 22:17:23.000000000 -0400
33235 +++ linux-3.0.3/drivers/usb/wusbcore/wa-hc.h 2011-08-23 21:47:56.000000000 -0400
33236 @@ -192,7 +192,7 @@ struct wahc {
33237 struct list_head xfer_delayed_list;
33238 spinlock_t xfer_list_lock;
33239 struct work_struct xfer_work;
33240 - atomic_t xfer_id_count;
33241 + atomic_unchecked_t xfer_id_count;
33242 };
33243
33244
33245 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
33246 INIT_LIST_HEAD(&wa->xfer_delayed_list);
33247 spin_lock_init(&wa->xfer_list_lock);
33248 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
33249 - atomic_set(&wa->xfer_id_count, 1);
33250 + atomic_set_unchecked(&wa->xfer_id_count, 1);
33251 }
33252
33253 /**
33254 diff -urNp linux-3.0.3/drivers/usb/wusbcore/wa-xfer.c linux-3.0.3/drivers/usb/wusbcore/wa-xfer.c
33255 --- linux-3.0.3/drivers/usb/wusbcore/wa-xfer.c 2011-07-21 22:17:23.000000000 -0400
33256 +++ linux-3.0.3/drivers/usb/wusbcore/wa-xfer.c 2011-08-23 21:47:56.000000000 -0400
33257 @@ -294,7 +294,7 @@ out:
33258 */
33259 static void wa_xfer_id_init(struct wa_xfer *xfer)
33260 {
33261 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
33262 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
33263 }
33264
33265 /*
33266 diff -urNp linux-3.0.3/drivers/vhost/vhost.c linux-3.0.3/drivers/vhost/vhost.c
33267 --- linux-3.0.3/drivers/vhost/vhost.c 2011-07-21 22:17:23.000000000 -0400
33268 +++ linux-3.0.3/drivers/vhost/vhost.c 2011-08-23 21:47:56.000000000 -0400
33269 @@ -589,7 +589,7 @@ static int init_used(struct vhost_virtqu
33270 return get_user(vq->last_used_idx, &used->idx);
33271 }
33272
33273 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
33274 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
33275 {
33276 struct file *eventfp, *filep = NULL,
33277 *pollstart = NULL, *pollstop = NULL;
33278 diff -urNp linux-3.0.3/drivers/video/fbcmap.c linux-3.0.3/drivers/video/fbcmap.c
33279 --- linux-3.0.3/drivers/video/fbcmap.c 2011-07-21 22:17:23.000000000 -0400
33280 +++ linux-3.0.3/drivers/video/fbcmap.c 2011-08-23 21:47:56.000000000 -0400
33281 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
33282 rc = -ENODEV;
33283 goto out;
33284 }
33285 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
33286 - !info->fbops->fb_setcmap)) {
33287 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
33288 rc = -EINVAL;
33289 goto out1;
33290 }
33291 diff -urNp linux-3.0.3/drivers/video/fbmem.c linux-3.0.3/drivers/video/fbmem.c
33292 --- linux-3.0.3/drivers/video/fbmem.c 2011-07-21 22:17:23.000000000 -0400
33293 +++ linux-3.0.3/drivers/video/fbmem.c 2011-08-23 21:48:14.000000000 -0400
33294 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
33295 image->dx += image->width + 8;
33296 }
33297 } else if (rotate == FB_ROTATE_UD) {
33298 - for (x = 0; x < num && image->dx >= 0; x++) {
33299 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
33300 info->fbops->fb_imageblit(info, image);
33301 image->dx -= image->width + 8;
33302 }
33303 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
33304 image->dy += image->height + 8;
33305 }
33306 } else if (rotate == FB_ROTATE_CCW) {
33307 - for (x = 0; x < num && image->dy >= 0; x++) {
33308 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
33309 info->fbops->fb_imageblit(info, image);
33310 image->dy -= image->height + 8;
33311 }
33312 @@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
33313 int flags = info->flags;
33314 int ret = 0;
33315
33316 + pax_track_stack();
33317 +
33318 if (var->activate & FB_ACTIVATE_INV_MODE) {
33319 struct fb_videomode mode1, mode2;
33320
33321 @@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
33322 void __user *argp = (void __user *)arg;
33323 long ret = 0;
33324
33325 + pax_track_stack();
33326 +
33327 switch (cmd) {
33328 case FBIOGET_VSCREENINFO:
33329 if (!lock_fb_info(info))
33330 @@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
33331 return -EFAULT;
33332 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
33333 return -EINVAL;
33334 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
33335 + if (con2fb.framebuffer >= FB_MAX)
33336 return -EINVAL;
33337 if (!registered_fb[con2fb.framebuffer])
33338 request_module("fb%d", con2fb.framebuffer);
33339 diff -urNp linux-3.0.3/drivers/video/i810/i810_accel.c linux-3.0.3/drivers/video/i810/i810_accel.c
33340 --- linux-3.0.3/drivers/video/i810/i810_accel.c 2011-07-21 22:17:23.000000000 -0400
33341 +++ linux-3.0.3/drivers/video/i810/i810_accel.c 2011-08-23 21:47:56.000000000 -0400
33342 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct
33343 }
33344 }
33345 printk("ringbuffer lockup!!!\n");
33346 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
33347 i810_report_error(mmio);
33348 par->dev_flags |= LOCKUP;
33349 info->pixmap.scan_align = 1;
33350 diff -urNp linux-3.0.3/drivers/video/udlfb.c linux-3.0.3/drivers/video/udlfb.c
33351 --- linux-3.0.3/drivers/video/udlfb.c 2011-07-21 22:17:23.000000000 -0400
33352 +++ linux-3.0.3/drivers/video/udlfb.c 2011-08-23 21:47:56.000000000 -0400
33353 @@ -586,11 +586,11 @@ int dlfb_handle_damage(struct dlfb_data
33354 dlfb_urb_completion(urb);
33355
33356 error:
33357 - atomic_add(bytes_sent, &dev->bytes_sent);
33358 - atomic_add(bytes_identical, &dev->bytes_identical);
33359 - atomic_add(width*height*2, &dev->bytes_rendered);
33360 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
33361 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
33362 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
33363 end_cycles = get_cycles();
33364 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
33365 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
33366 >> 10)), /* Kcycles */
33367 &dev->cpu_kcycles_used);
33368
33369 @@ -711,11 +711,11 @@ static void dlfb_dpy_deferred_io(struct
33370 dlfb_urb_completion(urb);
33371
33372 error:
33373 - atomic_add(bytes_sent, &dev->bytes_sent);
33374 - atomic_add(bytes_identical, &dev->bytes_identical);
33375 - atomic_add(bytes_rendered, &dev->bytes_rendered);
33376 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
33377 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
33378 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
33379 end_cycles = get_cycles();
33380 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
33381 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
33382 >> 10)), /* Kcycles */
33383 &dev->cpu_kcycles_used);
33384 }
33385 @@ -1307,7 +1307,7 @@ static ssize_t metrics_bytes_rendered_sh
33386 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33387 struct dlfb_data *dev = fb_info->par;
33388 return snprintf(buf, PAGE_SIZE, "%u\n",
33389 - atomic_read(&dev->bytes_rendered));
33390 + atomic_read_unchecked(&dev->bytes_rendered));
33391 }
33392
33393 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
33394 @@ -1315,7 +1315,7 @@ static ssize_t metrics_bytes_identical_s
33395 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33396 struct dlfb_data *dev = fb_info->par;
33397 return snprintf(buf, PAGE_SIZE, "%u\n",
33398 - atomic_read(&dev->bytes_identical));
33399 + atomic_read_unchecked(&dev->bytes_identical));
33400 }
33401
33402 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
33403 @@ -1323,7 +1323,7 @@ static ssize_t metrics_bytes_sent_show(s
33404 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33405 struct dlfb_data *dev = fb_info->par;
33406 return snprintf(buf, PAGE_SIZE, "%u\n",
33407 - atomic_read(&dev->bytes_sent));
33408 + atomic_read_unchecked(&dev->bytes_sent));
33409 }
33410
33411 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
33412 @@ -1331,7 +1331,7 @@ static ssize_t metrics_cpu_kcycles_used_
33413 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33414 struct dlfb_data *dev = fb_info->par;
33415 return snprintf(buf, PAGE_SIZE, "%u\n",
33416 - atomic_read(&dev->cpu_kcycles_used));
33417 + atomic_read_unchecked(&dev->cpu_kcycles_used));
33418 }
33419
33420 static ssize_t edid_show(
33421 @@ -1388,10 +1388,10 @@ static ssize_t metrics_reset_store(struc
33422 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33423 struct dlfb_data *dev = fb_info->par;
33424
33425 - atomic_set(&dev->bytes_rendered, 0);
33426 - atomic_set(&dev->bytes_identical, 0);
33427 - atomic_set(&dev->bytes_sent, 0);
33428 - atomic_set(&dev->cpu_kcycles_used, 0);
33429 + atomic_set_unchecked(&dev->bytes_rendered, 0);
33430 + atomic_set_unchecked(&dev->bytes_identical, 0);
33431 + atomic_set_unchecked(&dev->bytes_sent, 0);
33432 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
33433
33434 return count;
33435 }
33436 diff -urNp linux-3.0.3/drivers/video/uvesafb.c linux-3.0.3/drivers/video/uvesafb.c
33437 --- linux-3.0.3/drivers/video/uvesafb.c 2011-07-21 22:17:23.000000000 -0400
33438 +++ linux-3.0.3/drivers/video/uvesafb.c 2011-08-23 21:47:56.000000000 -0400
33439 @@ -19,6 +19,7 @@
33440 #include <linux/io.h>
33441 #include <linux/mutex.h>
33442 #include <linux/slab.h>
33443 +#include <linux/moduleloader.h>
33444 #include <video/edid.h>
33445 #include <video/uvesafb.h>
33446 #ifdef CONFIG_X86
33447 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
33448 NULL,
33449 };
33450
33451 - return call_usermodehelper(v86d_path, argv, envp, 1);
33452 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
33453 }
33454
33455 /*
33456 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
33457 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
33458 par->pmi_setpal = par->ypan = 0;
33459 } else {
33460 +
33461 +#ifdef CONFIG_PAX_KERNEXEC
33462 +#ifdef CONFIG_MODULES
33463 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
33464 +#endif
33465 + if (!par->pmi_code) {
33466 + par->pmi_setpal = par->ypan = 0;
33467 + return 0;
33468 + }
33469 +#endif
33470 +
33471 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
33472 + task->t.regs.edi);
33473 +
33474 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33475 + pax_open_kernel();
33476 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
33477 + pax_close_kernel();
33478 +
33479 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
33480 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
33481 +#else
33482 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
33483 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
33484 +#endif
33485 +
33486 printk(KERN_INFO "uvesafb: protected mode interface info at "
33487 "%04x:%04x\n",
33488 (u16)task->t.regs.es, (u16)task->t.regs.edi);
33489 @@ -1821,6 +1844,11 @@ out:
33490 if (par->vbe_modes)
33491 kfree(par->vbe_modes);
33492
33493 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33494 + if (par->pmi_code)
33495 + module_free_exec(NULL, par->pmi_code);
33496 +#endif
33497 +
33498 framebuffer_release(info);
33499 return err;
33500 }
33501 @@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
33502 kfree(par->vbe_state_orig);
33503 if (par->vbe_state_saved)
33504 kfree(par->vbe_state_saved);
33505 +
33506 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33507 + if (par->pmi_code)
33508 + module_free_exec(NULL, par->pmi_code);
33509 +#endif
33510 +
33511 }
33512
33513 framebuffer_release(info);
33514 diff -urNp linux-3.0.3/drivers/video/vesafb.c linux-3.0.3/drivers/video/vesafb.c
33515 --- linux-3.0.3/drivers/video/vesafb.c 2011-07-21 22:17:23.000000000 -0400
33516 +++ linux-3.0.3/drivers/video/vesafb.c 2011-08-23 21:47:56.000000000 -0400
33517 @@ -9,6 +9,7 @@
33518 */
33519
33520 #include <linux/module.h>
33521 +#include <linux/moduleloader.h>
33522 #include <linux/kernel.h>
33523 #include <linux/errno.h>
33524 #include <linux/string.h>
33525 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
33526 static int vram_total __initdata; /* Set total amount of memory */
33527 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
33528 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
33529 -static void (*pmi_start)(void) __read_mostly;
33530 -static void (*pmi_pal) (void) __read_mostly;
33531 +static void (*pmi_start)(void) __read_only;
33532 +static void (*pmi_pal) (void) __read_only;
33533 static int depth __read_mostly;
33534 static int vga_compat __read_mostly;
33535 /* --------------------------------------------------------------------- */
33536 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
33537 unsigned int size_vmode;
33538 unsigned int size_remap;
33539 unsigned int size_total;
33540 + void *pmi_code = NULL;
33541
33542 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
33543 return -ENODEV;
33544 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
33545 size_remap = size_total;
33546 vesafb_fix.smem_len = size_remap;
33547
33548 -#ifndef __i386__
33549 - screen_info.vesapm_seg = 0;
33550 -#endif
33551 -
33552 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
33553 printk(KERN_WARNING
33554 "vesafb: cannot reserve video memory at 0x%lx\n",
33555 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct pl
33556 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
33557 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
33558
33559 +#ifdef __i386__
33560 +
33561 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33562 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
33563 + if (!pmi_code)
33564 +#elif !defined(CONFIG_PAX_KERNEXEC)
33565 + if (0)
33566 +#endif
33567 +
33568 +#endif
33569 + screen_info.vesapm_seg = 0;
33570 +
33571 if (screen_info.vesapm_seg) {
33572 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
33573 - screen_info.vesapm_seg,screen_info.vesapm_off);
33574 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
33575 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
33576 }
33577
33578 if (screen_info.vesapm_seg < 0xc000)
33579 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct pl
33580
33581 if (ypan || pmi_setpal) {
33582 unsigned short *pmi_base;
33583 +
33584 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
33585 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
33586 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
33587 +
33588 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33589 + pax_open_kernel();
33590 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
33591 +#else
33592 + pmi_code = pmi_base;
33593 +#endif
33594 +
33595 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
33596 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
33597 +
33598 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33599 + pmi_start = ktva_ktla(pmi_start);
33600 + pmi_pal = ktva_ktla(pmi_pal);
33601 + pax_close_kernel();
33602 +#endif
33603 +
33604 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
33605 if (pmi_base[3]) {
33606 printk(KERN_INFO "vesafb: pmi: ports = ");
33607 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct pl
33608 info->node, info->fix.id);
33609 return 0;
33610 err:
33611 +
33612 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33613 + module_free_exec(NULL, pmi_code);
33614 +#endif
33615 +
33616 if (info->screen_base)
33617 iounmap(info->screen_base);
33618 framebuffer_release(info);
33619 diff -urNp linux-3.0.3/drivers/video/via/via_clock.h linux-3.0.3/drivers/video/via/via_clock.h
33620 --- linux-3.0.3/drivers/video/via/via_clock.h 2011-07-21 22:17:23.000000000 -0400
33621 +++ linux-3.0.3/drivers/video/via/via_clock.h 2011-08-23 21:47:56.000000000 -0400
33622 @@ -56,7 +56,7 @@ struct via_clock {
33623
33624 void (*set_engine_pll_state)(u8 state);
33625 void (*set_engine_pll)(struct via_pll_config config);
33626 -};
33627 +} __no_const;
33628
33629
33630 static inline u32 get_pll_internal_frequency(u32 ref_freq,
33631 diff -urNp linux-3.0.3/drivers/virtio/virtio_balloon.c linux-3.0.3/drivers/virtio/virtio_balloon.c
33632 --- linux-3.0.3/drivers/virtio/virtio_balloon.c 2011-07-21 22:17:23.000000000 -0400
33633 +++ linux-3.0.3/drivers/virtio/virtio_balloon.c 2011-08-23 21:48:14.000000000 -0400
33634 @@ -174,6 +174,8 @@ static void update_balloon_stats(struct
33635 struct sysinfo i;
33636 int idx = 0;
33637
33638 + pax_track_stack();
33639 +
33640 all_vm_events(events);
33641 si_meminfo(&i);
33642
33643 diff -urNp linux-3.0.3/fs/9p/vfs_inode.c linux-3.0.3/fs/9p/vfs_inode.c
33644 --- linux-3.0.3/fs/9p/vfs_inode.c 2011-07-21 22:17:23.000000000 -0400
33645 +++ linux-3.0.3/fs/9p/vfs_inode.c 2011-08-23 21:47:56.000000000 -0400
33646 @@ -1210,7 +1210,7 @@ static void *v9fs_vfs_follow_link(struct
33647 void
33648 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
33649 {
33650 - char *s = nd_get_link(nd);
33651 + const char *s = nd_get_link(nd);
33652
33653 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
33654 IS_ERR(s) ? "<error>" : s);
33655 diff -urNp linux-3.0.3/fs/aio.c linux-3.0.3/fs/aio.c
33656 --- linux-3.0.3/fs/aio.c 2011-07-21 22:17:23.000000000 -0400
33657 +++ linux-3.0.3/fs/aio.c 2011-08-23 21:48:14.000000000 -0400
33658 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
33659 size += sizeof(struct io_event) * nr_events;
33660 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
33661
33662 - if (nr_pages < 0)
33663 + if (nr_pages <= 0)
33664 return -EINVAL;
33665
33666 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
33667 @@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
33668 struct aio_timeout to;
33669 int retry = 0;
33670
33671 + pax_track_stack();
33672 +
33673 /* needed to zero any padding within an entry (there shouldn't be
33674 * any, but C is fun!
33675 */
33676 @@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
33677 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
33678 {
33679 ssize_t ret;
33680 + struct iovec iovstack;
33681
33682 #ifdef CONFIG_COMPAT
33683 if (compat)
33684 ret = compat_rw_copy_check_uvector(type,
33685 (struct compat_iovec __user *)kiocb->ki_buf,
33686 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
33687 + kiocb->ki_nbytes, 1, &iovstack,
33688 &kiocb->ki_iovec);
33689 else
33690 #endif
33691 ret = rw_copy_check_uvector(type,
33692 (struct iovec __user *)kiocb->ki_buf,
33693 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
33694 + kiocb->ki_nbytes, 1, &iovstack,
33695 &kiocb->ki_iovec);
33696 if (ret < 0)
33697 goto out;
33698
33699 + if (kiocb->ki_iovec == &iovstack) {
33700 + kiocb->ki_inline_vec = iovstack;
33701 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
33702 + }
33703 kiocb->ki_nr_segs = kiocb->ki_nbytes;
33704 kiocb->ki_cur_seg = 0;
33705 /* ki_nbytes/left now reflect bytes instead of segs */
33706 diff -urNp linux-3.0.3/fs/attr.c linux-3.0.3/fs/attr.c
33707 --- linux-3.0.3/fs/attr.c 2011-07-21 22:17:23.000000000 -0400
33708 +++ linux-3.0.3/fs/attr.c 2011-08-23 21:48:14.000000000 -0400
33709 @@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
33710 unsigned long limit;
33711
33712 limit = rlimit(RLIMIT_FSIZE);
33713 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
33714 if (limit != RLIM_INFINITY && offset > limit)
33715 goto out_sig;
33716 if (offset > inode->i_sb->s_maxbytes)
33717 diff -urNp linux-3.0.3/fs/befs/linuxvfs.c linux-3.0.3/fs/befs/linuxvfs.c
33718 --- linux-3.0.3/fs/befs/linuxvfs.c 2011-07-21 22:17:23.000000000 -0400
33719 +++ linux-3.0.3/fs/befs/linuxvfs.c 2011-08-23 21:47:56.000000000 -0400
33720 @@ -498,7 +498,7 @@ static void befs_put_link(struct dentry
33721 {
33722 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
33723 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
33724 - char *link = nd_get_link(nd);
33725 + const char *link = nd_get_link(nd);
33726 if (!IS_ERR(link))
33727 kfree(link);
33728 }
33729 diff -urNp linux-3.0.3/fs/binfmt_aout.c linux-3.0.3/fs/binfmt_aout.c
33730 --- linux-3.0.3/fs/binfmt_aout.c 2011-07-21 22:17:23.000000000 -0400
33731 +++ linux-3.0.3/fs/binfmt_aout.c 2011-08-23 21:48:14.000000000 -0400
33732 @@ -16,6 +16,7 @@
33733 #include <linux/string.h>
33734 #include <linux/fs.h>
33735 #include <linux/file.h>
33736 +#include <linux/security.h>
33737 #include <linux/stat.h>
33738 #include <linux/fcntl.h>
33739 #include <linux/ptrace.h>
33740 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
33741 #endif
33742 # define START_STACK(u) ((void __user *)u.start_stack)
33743
33744 + memset(&dump, 0, sizeof(dump));
33745 +
33746 fs = get_fs();
33747 set_fs(KERNEL_DS);
33748 has_dumped = 1;
33749 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
33750
33751 /* If the size of the dump file exceeds the rlimit, then see what would happen
33752 if we wrote the stack, but not the data area. */
33753 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
33754 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
33755 dump.u_dsize = 0;
33756
33757 /* Make sure we have enough room to write the stack and data areas. */
33758 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
33759 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
33760 dump.u_ssize = 0;
33761
33762 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
33763 rlim = rlimit(RLIMIT_DATA);
33764 if (rlim >= RLIM_INFINITY)
33765 rlim = ~0;
33766 +
33767 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
33768 if (ex.a_data + ex.a_bss > rlim)
33769 return -ENOMEM;
33770
33771 @@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
33772 install_exec_creds(bprm);
33773 current->flags &= ~PF_FORKNOEXEC;
33774
33775 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
33776 + current->mm->pax_flags = 0UL;
33777 +#endif
33778 +
33779 +#ifdef CONFIG_PAX_PAGEEXEC
33780 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
33781 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
33782 +
33783 +#ifdef CONFIG_PAX_EMUTRAMP
33784 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
33785 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
33786 +#endif
33787 +
33788 +#ifdef CONFIG_PAX_MPROTECT
33789 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
33790 + current->mm->pax_flags |= MF_PAX_MPROTECT;
33791 +#endif
33792 +
33793 + }
33794 +#endif
33795 +
33796 if (N_MAGIC(ex) == OMAGIC) {
33797 unsigned long text_addr, map_size;
33798 loff_t pos;
33799 @@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
33800
33801 down_write(&current->mm->mmap_sem);
33802 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
33803 - PROT_READ | PROT_WRITE | PROT_EXEC,
33804 + PROT_READ | PROT_WRITE,
33805 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
33806 fd_offset + ex.a_text);
33807 up_write(&current->mm->mmap_sem);
33808 diff -urNp linux-3.0.3/fs/binfmt_elf.c linux-3.0.3/fs/binfmt_elf.c
33809 --- linux-3.0.3/fs/binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
33810 +++ linux-3.0.3/fs/binfmt_elf.c 2011-08-23 21:48:14.000000000 -0400
33811 @@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
33812 #define elf_core_dump NULL
33813 #endif
33814
33815 +#ifdef CONFIG_PAX_MPROTECT
33816 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
33817 +#endif
33818 +
33819 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
33820 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
33821 #else
33822 @@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
33823 .load_binary = load_elf_binary,
33824 .load_shlib = load_elf_library,
33825 .core_dump = elf_core_dump,
33826 +
33827 +#ifdef CONFIG_PAX_MPROTECT
33828 + .handle_mprotect= elf_handle_mprotect,
33829 +#endif
33830 +
33831 .min_coredump = ELF_EXEC_PAGESIZE,
33832 };
33833
33834 @@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
33835
33836 static int set_brk(unsigned long start, unsigned long end)
33837 {
33838 + unsigned long e = end;
33839 +
33840 start = ELF_PAGEALIGN(start);
33841 end = ELF_PAGEALIGN(end);
33842 if (end > start) {
33843 @@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
33844 if (BAD_ADDR(addr))
33845 return addr;
33846 }
33847 - current->mm->start_brk = current->mm->brk = end;
33848 + current->mm->start_brk = current->mm->brk = e;
33849 return 0;
33850 }
33851
33852 @@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
33853 elf_addr_t __user *u_rand_bytes;
33854 const char *k_platform = ELF_PLATFORM;
33855 const char *k_base_platform = ELF_BASE_PLATFORM;
33856 - unsigned char k_rand_bytes[16];
33857 + u32 k_rand_bytes[4];
33858 int items;
33859 elf_addr_t *elf_info;
33860 int ei_index = 0;
33861 const struct cred *cred = current_cred();
33862 struct vm_area_struct *vma;
33863 + unsigned long saved_auxv[AT_VECTOR_SIZE];
33864 +
33865 + pax_track_stack();
33866
33867 /*
33868 * In some cases (e.g. Hyper-Threading), we want to avoid L1
33869 @@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
33870 * Generate 16 random bytes for userspace PRNG seeding.
33871 */
33872 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
33873 - u_rand_bytes = (elf_addr_t __user *)
33874 - STACK_ALLOC(p, sizeof(k_rand_bytes));
33875 + srandom32(k_rand_bytes[0] ^ random32());
33876 + srandom32(k_rand_bytes[1] ^ random32());
33877 + srandom32(k_rand_bytes[2] ^ random32());
33878 + srandom32(k_rand_bytes[3] ^ random32());
33879 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
33880 + u_rand_bytes = (elf_addr_t __user *) p;
33881 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
33882 return -EFAULT;
33883
33884 @@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
33885 return -EFAULT;
33886 current->mm->env_end = p;
33887
33888 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
33889 +
33890 /* Put the elf_info on the stack in the right place. */
33891 sp = (elf_addr_t __user *)envp + 1;
33892 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
33893 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
33894 return -EFAULT;
33895 return 0;
33896 }
33897 @@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
33898 {
33899 struct elf_phdr *elf_phdata;
33900 struct elf_phdr *eppnt;
33901 - unsigned long load_addr = 0;
33902 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
33903 int load_addr_set = 0;
33904 unsigned long last_bss = 0, elf_bss = 0;
33905 - unsigned long error = ~0UL;
33906 + unsigned long error = -EINVAL;
33907 unsigned long total_size;
33908 int retval, i, size;
33909
33910 @@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
33911 goto out_close;
33912 }
33913
33914 +#ifdef CONFIG_PAX_SEGMEXEC
33915 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
33916 + pax_task_size = SEGMEXEC_TASK_SIZE;
33917 +#endif
33918 +
33919 eppnt = elf_phdata;
33920 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
33921 if (eppnt->p_type == PT_LOAD) {
33922 @@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
33923 k = load_addr + eppnt->p_vaddr;
33924 if (BAD_ADDR(k) ||
33925 eppnt->p_filesz > eppnt->p_memsz ||
33926 - eppnt->p_memsz > TASK_SIZE ||
33927 - TASK_SIZE - eppnt->p_memsz < k) {
33928 + eppnt->p_memsz > pax_task_size ||
33929 + pax_task_size - eppnt->p_memsz < k) {
33930 error = -ENOMEM;
33931 goto out_close;
33932 }
33933 @@ -528,6 +553,193 @@ out:
33934 return error;
33935 }
33936
33937 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
33938 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
33939 +{
33940 + unsigned long pax_flags = 0UL;
33941 +
33942 +#ifdef CONFIG_PAX_PAGEEXEC
33943 + if (elf_phdata->p_flags & PF_PAGEEXEC)
33944 + pax_flags |= MF_PAX_PAGEEXEC;
33945 +#endif
33946 +
33947 +#ifdef CONFIG_PAX_SEGMEXEC
33948 + if (elf_phdata->p_flags & PF_SEGMEXEC)
33949 + pax_flags |= MF_PAX_SEGMEXEC;
33950 +#endif
33951 +
33952 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
33953 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
33954 + if ((__supported_pte_mask & _PAGE_NX))
33955 + pax_flags &= ~MF_PAX_SEGMEXEC;
33956 + else
33957 + pax_flags &= ~MF_PAX_PAGEEXEC;
33958 + }
33959 +#endif
33960 +
33961 +#ifdef CONFIG_PAX_EMUTRAMP
33962 + if (elf_phdata->p_flags & PF_EMUTRAMP)
33963 + pax_flags |= MF_PAX_EMUTRAMP;
33964 +#endif
33965 +
33966 +#ifdef CONFIG_PAX_MPROTECT
33967 + if (elf_phdata->p_flags & PF_MPROTECT)
33968 + pax_flags |= MF_PAX_MPROTECT;
33969 +#endif
33970 +
33971 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
33972 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
33973 + pax_flags |= MF_PAX_RANDMMAP;
33974 +#endif
33975 +
33976 + return pax_flags;
33977 +}
33978 +#endif
33979 +
33980 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
33981 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
33982 +{
33983 + unsigned long pax_flags = 0UL;
33984 +
33985 +#ifdef CONFIG_PAX_PAGEEXEC
33986 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
33987 + pax_flags |= MF_PAX_PAGEEXEC;
33988 +#endif
33989 +
33990 +#ifdef CONFIG_PAX_SEGMEXEC
33991 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
33992 + pax_flags |= MF_PAX_SEGMEXEC;
33993 +#endif
33994 +
33995 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
33996 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
33997 + if ((__supported_pte_mask & _PAGE_NX))
33998 + pax_flags &= ~MF_PAX_SEGMEXEC;
33999 + else
34000 + pax_flags &= ~MF_PAX_PAGEEXEC;
34001 + }
34002 +#endif
34003 +
34004 +#ifdef CONFIG_PAX_EMUTRAMP
34005 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
34006 + pax_flags |= MF_PAX_EMUTRAMP;
34007 +#endif
34008 +
34009 +#ifdef CONFIG_PAX_MPROTECT
34010 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
34011 + pax_flags |= MF_PAX_MPROTECT;
34012 +#endif
34013 +
34014 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
34015 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
34016 + pax_flags |= MF_PAX_RANDMMAP;
34017 +#endif
34018 +
34019 + return pax_flags;
34020 +}
34021 +#endif
34022 +
34023 +#ifdef CONFIG_PAX_EI_PAX
34024 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
34025 +{
34026 + unsigned long pax_flags = 0UL;
34027 +
34028 +#ifdef CONFIG_PAX_PAGEEXEC
34029 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
34030 + pax_flags |= MF_PAX_PAGEEXEC;
34031 +#endif
34032 +
34033 +#ifdef CONFIG_PAX_SEGMEXEC
34034 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
34035 + pax_flags |= MF_PAX_SEGMEXEC;
34036 +#endif
34037 +
34038 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34039 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34040 + if ((__supported_pte_mask & _PAGE_NX))
34041 + pax_flags &= ~MF_PAX_SEGMEXEC;
34042 + else
34043 + pax_flags &= ~MF_PAX_PAGEEXEC;
34044 + }
34045 +#endif
34046 +
34047 +#ifdef CONFIG_PAX_EMUTRAMP
34048 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
34049 + pax_flags |= MF_PAX_EMUTRAMP;
34050 +#endif
34051 +
34052 +#ifdef CONFIG_PAX_MPROTECT
34053 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
34054 + pax_flags |= MF_PAX_MPROTECT;
34055 +#endif
34056 +
34057 +#ifdef CONFIG_PAX_ASLR
34058 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
34059 + pax_flags |= MF_PAX_RANDMMAP;
34060 +#endif
34061 +
34062 + return pax_flags;
34063 +}
34064 +#endif
34065 +
34066 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
34067 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
34068 +{
34069 + unsigned long pax_flags = 0UL;
34070 +
34071 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
34072 + unsigned long i;
34073 + int found_flags = 0;
34074 +#endif
34075 +
34076 +#ifdef CONFIG_PAX_EI_PAX
34077 + pax_flags = pax_parse_ei_pax(elf_ex);
34078 +#endif
34079 +
34080 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
34081 + for (i = 0UL; i < elf_ex->e_phnum; i++)
34082 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
34083 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
34084 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
34085 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
34086 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
34087 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
34088 + return -EINVAL;
34089 +
34090 +#ifdef CONFIG_PAX_SOFTMODE
34091 + if (pax_softmode)
34092 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
34093 + else
34094 +#endif
34095 +
34096 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
34097 + found_flags = 1;
34098 + break;
34099 + }
34100 +#endif
34101 +
34102 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
34103 + if (found_flags == 0) {
34104 + struct elf_phdr phdr;
34105 + memset(&phdr, 0, sizeof(phdr));
34106 + phdr.p_flags = PF_NOEMUTRAMP;
34107 +#ifdef CONFIG_PAX_SOFTMODE
34108 + if (pax_softmode)
34109 + pax_flags = pax_parse_softmode(&phdr);
34110 + else
34111 +#endif
34112 + pax_flags = pax_parse_hardmode(&phdr);
34113 + }
34114 +#endif
34115 +
34116 + if (0 > pax_check_flags(&pax_flags))
34117 + return -EINVAL;
34118 +
34119 + current->mm->pax_flags = pax_flags;
34120 + return 0;
34121 +}
34122 +#endif
34123 +
34124 /*
34125 * These are the functions used to load ELF style executables and shared
34126 * libraries. There is no binary dependent code anywhere else.
34127 @@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
34128 {
34129 unsigned int random_variable = 0;
34130
34131 +#ifdef CONFIG_PAX_RANDUSTACK
34132 + if (randomize_va_space)
34133 + return stack_top - current->mm->delta_stack;
34134 +#endif
34135 +
34136 if ((current->flags & PF_RANDOMIZE) &&
34137 !(current->personality & ADDR_NO_RANDOMIZE)) {
34138 random_variable = get_random_int() & STACK_RND_MASK;
34139 @@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
34140 unsigned long load_addr = 0, load_bias = 0;
34141 int load_addr_set = 0;
34142 char * elf_interpreter = NULL;
34143 - unsigned long error;
34144 + unsigned long error = 0;
34145 struct elf_phdr *elf_ppnt, *elf_phdata;
34146 unsigned long elf_bss, elf_brk;
34147 int retval, i;
34148 @@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
34149 unsigned long start_code, end_code, start_data, end_data;
34150 unsigned long reloc_func_desc __maybe_unused = 0;
34151 int executable_stack = EXSTACK_DEFAULT;
34152 - unsigned long def_flags = 0;
34153 struct {
34154 struct elfhdr elf_ex;
34155 struct elfhdr interp_elf_ex;
34156 } *loc;
34157 + unsigned long pax_task_size = TASK_SIZE;
34158
34159 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
34160 if (!loc) {
34161 @@ -714,11 +931,81 @@ static int load_elf_binary(struct linux_
34162
34163 /* OK, This is the point of no return */
34164 current->flags &= ~PF_FORKNOEXEC;
34165 - current->mm->def_flags = def_flags;
34166 +
34167 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
34168 + current->mm->pax_flags = 0UL;
34169 +#endif
34170 +
34171 +#ifdef CONFIG_PAX_DLRESOLVE
34172 + current->mm->call_dl_resolve = 0UL;
34173 +#endif
34174 +
34175 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
34176 + current->mm->call_syscall = 0UL;
34177 +#endif
34178 +
34179 +#ifdef CONFIG_PAX_ASLR
34180 + current->mm->delta_mmap = 0UL;
34181 + current->mm->delta_stack = 0UL;
34182 +#endif
34183 +
34184 + current->mm->def_flags = 0;
34185 +
34186 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
34187 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
34188 + send_sig(SIGKILL, current, 0);
34189 + goto out_free_dentry;
34190 + }
34191 +#endif
34192 +
34193 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
34194 + pax_set_initial_flags(bprm);
34195 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
34196 + if (pax_set_initial_flags_func)
34197 + (pax_set_initial_flags_func)(bprm);
34198 +#endif
34199 +
34200 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
34201 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
34202 + current->mm->context.user_cs_limit = PAGE_SIZE;
34203 + current->mm->def_flags |= VM_PAGEEXEC;
34204 + }
34205 +#endif
34206 +
34207 +#ifdef CONFIG_PAX_SEGMEXEC
34208 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
34209 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
34210 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
34211 + pax_task_size = SEGMEXEC_TASK_SIZE;
34212 + current->mm->def_flags |= VM_NOHUGEPAGE;
34213 + }
34214 +#endif
34215 +
34216 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
34217 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34218 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
34219 + put_cpu();
34220 + }
34221 +#endif
34222
34223 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
34224 may depend on the personality. */
34225 SET_PERSONALITY(loc->elf_ex);
34226 +
34227 +#ifdef CONFIG_PAX_ASLR
34228 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
34229 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
34230 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
34231 + }
34232 +#endif
34233 +
34234 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
34235 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34236 + executable_stack = EXSTACK_DISABLE_X;
34237 + current->personality &= ~READ_IMPLIES_EXEC;
34238 + } else
34239 +#endif
34240 +
34241 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
34242 current->personality |= READ_IMPLIES_EXEC;
34243
34244 @@ -800,6 +1087,20 @@ static int load_elf_binary(struct linux_
34245 #else
34246 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
34247 #endif
34248 +
34249 +#ifdef CONFIG_PAX_RANDMMAP
34250 + /* PaX: randomize base address at the default exe base if requested */
34251 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
34252 +#ifdef CONFIG_SPARC64
34253 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
34254 +#else
34255 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
34256 +#endif
34257 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
34258 + elf_flags |= MAP_FIXED;
34259 + }
34260 +#endif
34261 +
34262 }
34263
34264 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
34265 @@ -832,9 +1133,9 @@ static int load_elf_binary(struct linux_
34266 * allowed task size. Note that p_filesz must always be
34267 * <= p_memsz so it is only necessary to check p_memsz.
34268 */
34269 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
34270 - elf_ppnt->p_memsz > TASK_SIZE ||
34271 - TASK_SIZE - elf_ppnt->p_memsz < k) {
34272 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
34273 + elf_ppnt->p_memsz > pax_task_size ||
34274 + pax_task_size - elf_ppnt->p_memsz < k) {
34275 /* set_brk can never work. Avoid overflows. */
34276 send_sig(SIGKILL, current, 0);
34277 retval = -EINVAL;
34278 @@ -862,6 +1163,11 @@ static int load_elf_binary(struct linux_
34279 start_data += load_bias;
34280 end_data += load_bias;
34281
34282 +#ifdef CONFIG_PAX_RANDMMAP
34283 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
34284 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
34285 +#endif
34286 +
34287 /* Calling set_brk effectively mmaps the pages that we need
34288 * for the bss and break sections. We must do this before
34289 * mapping in the interpreter, to make sure it doesn't wind
34290 @@ -873,9 +1179,11 @@ static int load_elf_binary(struct linux_
34291 goto out_free_dentry;
34292 }
34293 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
34294 - send_sig(SIGSEGV, current, 0);
34295 - retval = -EFAULT; /* Nobody gets to see this, but.. */
34296 - goto out_free_dentry;
34297 + /*
34298 + * This bss-zeroing can fail if the ELF
34299 + * file specifies odd protections. So
34300 + * we don't check the return value
34301 + */
34302 }
34303
34304 if (elf_interpreter) {
34305 @@ -1090,7 +1398,7 @@ out:
34306 * Decide what to dump of a segment, part, all or none.
34307 */
34308 static unsigned long vma_dump_size(struct vm_area_struct *vma,
34309 - unsigned long mm_flags)
34310 + unsigned long mm_flags, long signr)
34311 {
34312 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
34313
34314 @@ -1124,7 +1432,7 @@ static unsigned long vma_dump_size(struc
34315 if (vma->vm_file == NULL)
34316 return 0;
34317
34318 - if (FILTER(MAPPED_PRIVATE))
34319 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
34320 goto whole;
34321
34322 /*
34323 @@ -1346,9 +1654,9 @@ static void fill_auxv_note(struct memelf
34324 {
34325 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
34326 int i = 0;
34327 - do
34328 + do {
34329 i += 2;
34330 - while (auxv[i - 2] != AT_NULL);
34331 + } while (auxv[i - 2] != AT_NULL);
34332 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
34333 }
34334
34335 @@ -1854,14 +2162,14 @@ static void fill_extnum_info(struct elfh
34336 }
34337
34338 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
34339 - unsigned long mm_flags)
34340 + struct coredump_params *cprm)
34341 {
34342 struct vm_area_struct *vma;
34343 size_t size = 0;
34344
34345 for (vma = first_vma(current, gate_vma); vma != NULL;
34346 vma = next_vma(vma, gate_vma))
34347 - size += vma_dump_size(vma, mm_flags);
34348 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34349 return size;
34350 }
34351
34352 @@ -1955,7 +2263,7 @@ static int elf_core_dump(struct coredump
34353
34354 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
34355
34356 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
34357 + offset += elf_core_vma_data_size(gate_vma, cprm);
34358 offset += elf_core_extra_data_size();
34359 e_shoff = offset;
34360
34361 @@ -1969,10 +2277,12 @@ static int elf_core_dump(struct coredump
34362 offset = dataoff;
34363
34364 size += sizeof(*elf);
34365 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34366 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
34367 goto end_coredump;
34368
34369 size += sizeof(*phdr4note);
34370 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34371 if (size > cprm->limit
34372 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
34373 goto end_coredump;
34374 @@ -1986,7 +2296,7 @@ static int elf_core_dump(struct coredump
34375 phdr.p_offset = offset;
34376 phdr.p_vaddr = vma->vm_start;
34377 phdr.p_paddr = 0;
34378 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
34379 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34380 phdr.p_memsz = vma->vm_end - vma->vm_start;
34381 offset += phdr.p_filesz;
34382 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
34383 @@ -1997,6 +2307,7 @@ static int elf_core_dump(struct coredump
34384 phdr.p_align = ELF_EXEC_PAGESIZE;
34385
34386 size += sizeof(phdr);
34387 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34388 if (size > cprm->limit
34389 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
34390 goto end_coredump;
34391 @@ -2021,7 +2332,7 @@ static int elf_core_dump(struct coredump
34392 unsigned long addr;
34393 unsigned long end;
34394
34395 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
34396 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34397
34398 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
34399 struct page *page;
34400 @@ -2030,6 +2341,7 @@ static int elf_core_dump(struct coredump
34401 page = get_dump_page(addr);
34402 if (page) {
34403 void *kaddr = kmap(page);
34404 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
34405 stop = ((size += PAGE_SIZE) > cprm->limit) ||
34406 !dump_write(cprm->file, kaddr,
34407 PAGE_SIZE);
34408 @@ -2047,6 +2359,7 @@ static int elf_core_dump(struct coredump
34409
34410 if (e_phnum == PN_XNUM) {
34411 size += sizeof(*shdr4extnum);
34412 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34413 if (size > cprm->limit
34414 || !dump_write(cprm->file, shdr4extnum,
34415 sizeof(*shdr4extnum)))
34416 @@ -2067,6 +2380,97 @@ out:
34417
34418 #endif /* CONFIG_ELF_CORE */
34419
34420 +#ifdef CONFIG_PAX_MPROTECT
34421 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
34422 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
34423 + * we'll remove VM_MAYWRITE for good on RELRO segments.
34424 + *
34425 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
34426 + * basis because we want to allow the common case and not the special ones.
34427 + */
34428 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
34429 +{
34430 + struct elfhdr elf_h;
34431 + struct elf_phdr elf_p;
34432 + unsigned long i;
34433 + unsigned long oldflags;
34434 + bool is_textrel_rw, is_textrel_rx, is_relro;
34435 +
34436 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
34437 + return;
34438 +
34439 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
34440 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
34441 +
34442 +#ifdef CONFIG_PAX_ELFRELOCS
34443 + /* possible TEXTREL */
34444 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
34445 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
34446 +#else
34447 + is_textrel_rw = false;
34448 + is_textrel_rx = false;
34449 +#endif
34450 +
34451 + /* possible RELRO */
34452 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
34453 +
34454 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
34455 + return;
34456 +
34457 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
34458 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
34459 +
34460 +#ifdef CONFIG_PAX_ETEXECRELOCS
34461 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
34462 +#else
34463 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
34464 +#endif
34465 +
34466 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
34467 + !elf_check_arch(&elf_h) ||
34468 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
34469 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
34470 + return;
34471 +
34472 + for (i = 0UL; i < elf_h.e_phnum; i++) {
34473 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
34474 + return;
34475 + switch (elf_p.p_type) {
34476 + case PT_DYNAMIC:
34477 + if (!is_textrel_rw && !is_textrel_rx)
34478 + continue;
34479 + i = 0UL;
34480 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
34481 + elf_dyn dyn;
34482 +
34483 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
34484 + return;
34485 + if (dyn.d_tag == DT_NULL)
34486 + return;
34487 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
34488 + gr_log_textrel(vma);
34489 + if (is_textrel_rw)
34490 + vma->vm_flags |= VM_MAYWRITE;
34491 + else
34492 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
34493 + vma->vm_flags &= ~VM_MAYWRITE;
34494 + return;
34495 + }
34496 + i++;
34497 + }
34498 + return;
34499 +
34500 + case PT_GNU_RELRO:
34501 + if (!is_relro)
34502 + continue;
34503 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
34504 + vma->vm_flags &= ~VM_MAYWRITE;
34505 + return;
34506 + }
34507 + }
34508 +}
34509 +#endif
34510 +
34511 static int __init init_elf_binfmt(void)
34512 {
34513 return register_binfmt(&elf_format);
34514 diff -urNp linux-3.0.3/fs/binfmt_flat.c linux-3.0.3/fs/binfmt_flat.c
34515 --- linux-3.0.3/fs/binfmt_flat.c 2011-07-21 22:17:23.000000000 -0400
34516 +++ linux-3.0.3/fs/binfmt_flat.c 2011-08-23 21:47:56.000000000 -0400
34517 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
34518 realdatastart = (unsigned long) -ENOMEM;
34519 printk("Unable to allocate RAM for process data, errno %d\n",
34520 (int)-realdatastart);
34521 + down_write(&current->mm->mmap_sem);
34522 do_munmap(current->mm, textpos, text_len);
34523 + up_write(&current->mm->mmap_sem);
34524 ret = realdatastart;
34525 goto err;
34526 }
34527 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
34528 }
34529 if (IS_ERR_VALUE(result)) {
34530 printk("Unable to read data+bss, errno %d\n", (int)-result);
34531 + down_write(&current->mm->mmap_sem);
34532 do_munmap(current->mm, textpos, text_len);
34533 do_munmap(current->mm, realdatastart, len);
34534 + up_write(&current->mm->mmap_sem);
34535 ret = result;
34536 goto err;
34537 }
34538 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
34539 }
34540 if (IS_ERR_VALUE(result)) {
34541 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
34542 + down_write(&current->mm->mmap_sem);
34543 do_munmap(current->mm, textpos, text_len + data_len + extra +
34544 MAX_SHARED_LIBS * sizeof(unsigned long));
34545 + up_write(&current->mm->mmap_sem);
34546 ret = result;
34547 goto err;
34548 }
34549 diff -urNp linux-3.0.3/fs/bio.c linux-3.0.3/fs/bio.c
34550 --- linux-3.0.3/fs/bio.c 2011-07-21 22:17:23.000000000 -0400
34551 +++ linux-3.0.3/fs/bio.c 2011-08-23 21:47:56.000000000 -0400
34552 @@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
34553 const int read = bio_data_dir(bio) == READ;
34554 struct bio_map_data *bmd = bio->bi_private;
34555 int i;
34556 - char *p = bmd->sgvecs[0].iov_base;
34557 + char *p = (__force char *)bmd->sgvecs[0].iov_base;
34558
34559 __bio_for_each_segment(bvec, bio, i, 0) {
34560 char *addr = page_address(bvec->bv_page);
34561 diff -urNp linux-3.0.3/fs/block_dev.c linux-3.0.3/fs/block_dev.c
34562 --- linux-3.0.3/fs/block_dev.c 2011-07-21 22:17:23.000000000 -0400
34563 +++ linux-3.0.3/fs/block_dev.c 2011-08-23 21:47:56.000000000 -0400
34564 @@ -671,7 +671,7 @@ static bool bd_may_claim(struct block_de
34565 else if (bdev->bd_contains == bdev)
34566 return true; /* is a whole device which isn't held */
34567
34568 - else if (whole->bd_holder == bd_may_claim)
34569 + else if (whole->bd_holder == (void *)bd_may_claim)
34570 return true; /* is a partition of a device that is being partitioned */
34571 else if (whole->bd_holder != NULL)
34572 return false; /* is a partition of a held device */
34573 diff -urNp linux-3.0.3/fs/btrfs/ctree.c linux-3.0.3/fs/btrfs/ctree.c
34574 --- linux-3.0.3/fs/btrfs/ctree.c 2011-07-21 22:17:23.000000000 -0400
34575 +++ linux-3.0.3/fs/btrfs/ctree.c 2011-08-23 21:47:56.000000000 -0400
34576 @@ -454,9 +454,12 @@ static noinline int __btrfs_cow_block(st
34577 free_extent_buffer(buf);
34578 add_root_to_dirty_list(root);
34579 } else {
34580 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
34581 - parent_start = parent->start;
34582 - else
34583 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
34584 + if (parent)
34585 + parent_start = parent->start;
34586 + else
34587 + parent_start = 0;
34588 + } else
34589 parent_start = 0;
34590
34591 WARN_ON(trans->transid != btrfs_header_generation(parent));
34592 diff -urNp linux-3.0.3/fs/btrfs/inode.c linux-3.0.3/fs/btrfs/inode.c
34593 --- linux-3.0.3/fs/btrfs/inode.c 2011-07-21 22:17:23.000000000 -0400
34594 +++ linux-3.0.3/fs/btrfs/inode.c 2011-08-23 21:48:14.000000000 -0400
34595 @@ -6895,7 +6895,7 @@ fail:
34596 return -ENOMEM;
34597 }
34598
34599 -static int btrfs_getattr(struct vfsmount *mnt,
34600 +int btrfs_getattr(struct vfsmount *mnt,
34601 struct dentry *dentry, struct kstat *stat)
34602 {
34603 struct inode *inode = dentry->d_inode;
34604 @@ -6907,6 +6907,14 @@ static int btrfs_getattr(struct vfsmount
34605 return 0;
34606 }
34607
34608 +EXPORT_SYMBOL(btrfs_getattr);
34609 +
34610 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
34611 +{
34612 + return BTRFS_I(inode)->root->anon_super.s_dev;
34613 +}
34614 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
34615 +
34616 /*
34617 * If a file is moved, it will inherit the cow and compression flags of the new
34618 * directory.
34619 diff -urNp linux-3.0.3/fs/btrfs/ioctl.c linux-3.0.3/fs/btrfs/ioctl.c
34620 --- linux-3.0.3/fs/btrfs/ioctl.c 2011-07-21 22:17:23.000000000 -0400
34621 +++ linux-3.0.3/fs/btrfs/ioctl.c 2011-08-23 21:48:14.000000000 -0400
34622 @@ -2676,9 +2676,12 @@ long btrfs_ioctl_space_info(struct btrfs
34623 for (i = 0; i < num_types; i++) {
34624 struct btrfs_space_info *tmp;
34625
34626 + /* Don't copy in more than we allocated */
34627 if (!slot_count)
34628 break;
34629
34630 + slot_count--;
34631 +
34632 info = NULL;
34633 rcu_read_lock();
34634 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
34635 @@ -2700,10 +2703,7 @@ long btrfs_ioctl_space_info(struct btrfs
34636 memcpy(dest, &space, sizeof(space));
34637 dest++;
34638 space_args.total_spaces++;
34639 - slot_count--;
34640 }
34641 - if (!slot_count)
34642 - break;
34643 }
34644 up_read(&info->groups_sem);
34645 }
34646 diff -urNp linux-3.0.3/fs/btrfs/relocation.c linux-3.0.3/fs/btrfs/relocation.c
34647 --- linux-3.0.3/fs/btrfs/relocation.c 2011-07-21 22:17:23.000000000 -0400
34648 +++ linux-3.0.3/fs/btrfs/relocation.c 2011-08-23 21:47:56.000000000 -0400
34649 @@ -1242,7 +1242,7 @@ static int __update_reloc_root(struct bt
34650 }
34651 spin_unlock(&rc->reloc_root_tree.lock);
34652
34653 - BUG_ON((struct btrfs_root *)node->data != root);
34654 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
34655
34656 if (!del) {
34657 spin_lock(&rc->reloc_root_tree.lock);
34658 diff -urNp linux-3.0.3/fs/cachefiles/bind.c linux-3.0.3/fs/cachefiles/bind.c
34659 --- linux-3.0.3/fs/cachefiles/bind.c 2011-07-21 22:17:23.000000000 -0400
34660 +++ linux-3.0.3/fs/cachefiles/bind.c 2011-08-23 21:47:56.000000000 -0400
34661 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
34662 args);
34663
34664 /* start by checking things over */
34665 - ASSERT(cache->fstop_percent >= 0 &&
34666 - cache->fstop_percent < cache->fcull_percent &&
34667 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
34668 cache->fcull_percent < cache->frun_percent &&
34669 cache->frun_percent < 100);
34670
34671 - ASSERT(cache->bstop_percent >= 0 &&
34672 - cache->bstop_percent < cache->bcull_percent &&
34673 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
34674 cache->bcull_percent < cache->brun_percent &&
34675 cache->brun_percent < 100);
34676
34677 diff -urNp linux-3.0.3/fs/cachefiles/daemon.c linux-3.0.3/fs/cachefiles/daemon.c
34678 --- linux-3.0.3/fs/cachefiles/daemon.c 2011-07-21 22:17:23.000000000 -0400
34679 +++ linux-3.0.3/fs/cachefiles/daemon.c 2011-08-23 21:47:56.000000000 -0400
34680 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
34681 if (n > buflen)
34682 return -EMSGSIZE;
34683
34684 - if (copy_to_user(_buffer, buffer, n) != 0)
34685 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
34686 return -EFAULT;
34687
34688 return n;
34689 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
34690 if (test_bit(CACHEFILES_DEAD, &cache->flags))
34691 return -EIO;
34692
34693 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
34694 + if (datalen > PAGE_SIZE - 1)
34695 return -EOPNOTSUPP;
34696
34697 /* drag the command string into the kernel so we can parse it */
34698 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
34699 if (args[0] != '%' || args[1] != '\0')
34700 return -EINVAL;
34701
34702 - if (fstop < 0 || fstop >= cache->fcull_percent)
34703 + if (fstop >= cache->fcull_percent)
34704 return cachefiles_daemon_range_error(cache, args);
34705
34706 cache->fstop_percent = fstop;
34707 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
34708 if (args[0] != '%' || args[1] != '\0')
34709 return -EINVAL;
34710
34711 - if (bstop < 0 || bstop >= cache->bcull_percent)
34712 + if (bstop >= cache->bcull_percent)
34713 return cachefiles_daemon_range_error(cache, args);
34714
34715 cache->bstop_percent = bstop;
34716 diff -urNp linux-3.0.3/fs/cachefiles/internal.h linux-3.0.3/fs/cachefiles/internal.h
34717 --- linux-3.0.3/fs/cachefiles/internal.h 2011-07-21 22:17:23.000000000 -0400
34718 +++ linux-3.0.3/fs/cachefiles/internal.h 2011-08-23 21:47:56.000000000 -0400
34719 @@ -57,7 +57,7 @@ struct cachefiles_cache {
34720 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
34721 struct rb_root active_nodes; /* active nodes (can't be culled) */
34722 rwlock_t active_lock; /* lock for active_nodes */
34723 - atomic_t gravecounter; /* graveyard uniquifier */
34724 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
34725 unsigned frun_percent; /* when to stop culling (% files) */
34726 unsigned fcull_percent; /* when to start culling (% files) */
34727 unsigned fstop_percent; /* when to stop allocating (% files) */
34728 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
34729 * proc.c
34730 */
34731 #ifdef CONFIG_CACHEFILES_HISTOGRAM
34732 -extern atomic_t cachefiles_lookup_histogram[HZ];
34733 -extern atomic_t cachefiles_mkdir_histogram[HZ];
34734 -extern atomic_t cachefiles_create_histogram[HZ];
34735 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
34736 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
34737 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
34738
34739 extern int __init cachefiles_proc_init(void);
34740 extern void cachefiles_proc_cleanup(void);
34741 static inline
34742 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
34743 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
34744 {
34745 unsigned long jif = jiffies - start_jif;
34746 if (jif >= HZ)
34747 jif = HZ - 1;
34748 - atomic_inc(&histogram[jif]);
34749 + atomic_inc_unchecked(&histogram[jif]);
34750 }
34751
34752 #else
34753 diff -urNp linux-3.0.3/fs/cachefiles/namei.c linux-3.0.3/fs/cachefiles/namei.c
34754 --- linux-3.0.3/fs/cachefiles/namei.c 2011-07-21 22:17:23.000000000 -0400
34755 +++ linux-3.0.3/fs/cachefiles/namei.c 2011-08-23 21:47:56.000000000 -0400
34756 @@ -318,7 +318,7 @@ try_again:
34757 /* first step is to make up a grave dentry in the graveyard */
34758 sprintf(nbuffer, "%08x%08x",
34759 (uint32_t) get_seconds(),
34760 - (uint32_t) atomic_inc_return(&cache->gravecounter));
34761 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
34762
34763 /* do the multiway lock magic */
34764 trap = lock_rename(cache->graveyard, dir);
34765 diff -urNp linux-3.0.3/fs/cachefiles/proc.c linux-3.0.3/fs/cachefiles/proc.c
34766 --- linux-3.0.3/fs/cachefiles/proc.c 2011-07-21 22:17:23.000000000 -0400
34767 +++ linux-3.0.3/fs/cachefiles/proc.c 2011-08-23 21:47:56.000000000 -0400
34768 @@ -14,9 +14,9 @@
34769 #include <linux/seq_file.h>
34770 #include "internal.h"
34771
34772 -atomic_t cachefiles_lookup_histogram[HZ];
34773 -atomic_t cachefiles_mkdir_histogram[HZ];
34774 -atomic_t cachefiles_create_histogram[HZ];
34775 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
34776 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
34777 +atomic_unchecked_t cachefiles_create_histogram[HZ];
34778
34779 /*
34780 * display the latency histogram
34781 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
34782 return 0;
34783 default:
34784 index = (unsigned long) v - 3;
34785 - x = atomic_read(&cachefiles_lookup_histogram[index]);
34786 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
34787 - z = atomic_read(&cachefiles_create_histogram[index]);
34788 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
34789 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
34790 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
34791 if (x == 0 && y == 0 && z == 0)
34792 return 0;
34793
34794 diff -urNp linux-3.0.3/fs/cachefiles/rdwr.c linux-3.0.3/fs/cachefiles/rdwr.c
34795 --- linux-3.0.3/fs/cachefiles/rdwr.c 2011-07-21 22:17:23.000000000 -0400
34796 +++ linux-3.0.3/fs/cachefiles/rdwr.c 2011-08-23 21:47:56.000000000 -0400
34797 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
34798 old_fs = get_fs();
34799 set_fs(KERNEL_DS);
34800 ret = file->f_op->write(
34801 - file, (const void __user *) data, len, &pos);
34802 + file, (__force const void __user *) data, len, &pos);
34803 set_fs(old_fs);
34804 kunmap(page);
34805 if (ret != len)
34806 diff -urNp linux-3.0.3/fs/ceph/dir.c linux-3.0.3/fs/ceph/dir.c
34807 --- linux-3.0.3/fs/ceph/dir.c 2011-07-21 22:17:23.000000000 -0400
34808 +++ linux-3.0.3/fs/ceph/dir.c 2011-08-23 21:47:56.000000000 -0400
34809 @@ -226,7 +226,7 @@ static int ceph_readdir(struct file *fil
34810 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
34811 struct ceph_mds_client *mdsc = fsc->mdsc;
34812 unsigned frag = fpos_frag(filp->f_pos);
34813 - int off = fpos_off(filp->f_pos);
34814 + unsigned int off = fpos_off(filp->f_pos);
34815 int err;
34816 u32 ftype;
34817 struct ceph_mds_reply_info_parsed *rinfo;
34818 diff -urNp linux-3.0.3/fs/cifs/cifs_debug.c linux-3.0.3/fs/cifs/cifs_debug.c
34819 --- linux-3.0.3/fs/cifs/cifs_debug.c 2011-07-21 22:17:23.000000000 -0400
34820 +++ linux-3.0.3/fs/cifs/cifs_debug.c 2011-08-23 21:47:56.000000000 -0400
34821 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
34822 tcon = list_entry(tmp3,
34823 struct cifs_tcon,
34824 tcon_list);
34825 - atomic_set(&tcon->num_smbs_sent, 0);
34826 - atomic_set(&tcon->num_writes, 0);
34827 - atomic_set(&tcon->num_reads, 0);
34828 - atomic_set(&tcon->num_oplock_brks, 0);
34829 - atomic_set(&tcon->num_opens, 0);
34830 - atomic_set(&tcon->num_posixopens, 0);
34831 - atomic_set(&tcon->num_posixmkdirs, 0);
34832 - atomic_set(&tcon->num_closes, 0);
34833 - atomic_set(&tcon->num_deletes, 0);
34834 - atomic_set(&tcon->num_mkdirs, 0);
34835 - atomic_set(&tcon->num_rmdirs, 0);
34836 - atomic_set(&tcon->num_renames, 0);
34837 - atomic_set(&tcon->num_t2renames, 0);
34838 - atomic_set(&tcon->num_ffirst, 0);
34839 - atomic_set(&tcon->num_fnext, 0);
34840 - atomic_set(&tcon->num_fclose, 0);
34841 - atomic_set(&tcon->num_hardlinks, 0);
34842 - atomic_set(&tcon->num_symlinks, 0);
34843 - atomic_set(&tcon->num_locks, 0);
34844 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
34845 + atomic_set_unchecked(&tcon->num_writes, 0);
34846 + atomic_set_unchecked(&tcon->num_reads, 0);
34847 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
34848 + atomic_set_unchecked(&tcon->num_opens, 0);
34849 + atomic_set_unchecked(&tcon->num_posixopens, 0);
34850 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
34851 + atomic_set_unchecked(&tcon->num_closes, 0);
34852 + atomic_set_unchecked(&tcon->num_deletes, 0);
34853 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
34854 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
34855 + atomic_set_unchecked(&tcon->num_renames, 0);
34856 + atomic_set_unchecked(&tcon->num_t2renames, 0);
34857 + atomic_set_unchecked(&tcon->num_ffirst, 0);
34858 + atomic_set_unchecked(&tcon->num_fnext, 0);
34859 + atomic_set_unchecked(&tcon->num_fclose, 0);
34860 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
34861 + atomic_set_unchecked(&tcon->num_symlinks, 0);
34862 + atomic_set_unchecked(&tcon->num_locks, 0);
34863 }
34864 }
34865 }
34866 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
34867 if (tcon->need_reconnect)
34868 seq_puts(m, "\tDISCONNECTED ");
34869 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
34870 - atomic_read(&tcon->num_smbs_sent),
34871 - atomic_read(&tcon->num_oplock_brks));
34872 + atomic_read_unchecked(&tcon->num_smbs_sent),
34873 + atomic_read_unchecked(&tcon->num_oplock_brks));
34874 seq_printf(m, "\nReads: %d Bytes: %lld",
34875 - atomic_read(&tcon->num_reads),
34876 + atomic_read_unchecked(&tcon->num_reads),
34877 (long long)(tcon->bytes_read));
34878 seq_printf(m, "\nWrites: %d Bytes: %lld",
34879 - atomic_read(&tcon->num_writes),
34880 + atomic_read_unchecked(&tcon->num_writes),
34881 (long long)(tcon->bytes_written));
34882 seq_printf(m, "\nFlushes: %d",
34883 - atomic_read(&tcon->num_flushes));
34884 + atomic_read_unchecked(&tcon->num_flushes));
34885 seq_printf(m, "\nLocks: %d HardLinks: %d "
34886 "Symlinks: %d",
34887 - atomic_read(&tcon->num_locks),
34888 - atomic_read(&tcon->num_hardlinks),
34889 - atomic_read(&tcon->num_symlinks));
34890 + atomic_read_unchecked(&tcon->num_locks),
34891 + atomic_read_unchecked(&tcon->num_hardlinks),
34892 + atomic_read_unchecked(&tcon->num_symlinks));
34893 seq_printf(m, "\nOpens: %d Closes: %d "
34894 "Deletes: %d",
34895 - atomic_read(&tcon->num_opens),
34896 - atomic_read(&tcon->num_closes),
34897 - atomic_read(&tcon->num_deletes));
34898 + atomic_read_unchecked(&tcon->num_opens),
34899 + atomic_read_unchecked(&tcon->num_closes),
34900 + atomic_read_unchecked(&tcon->num_deletes));
34901 seq_printf(m, "\nPosix Opens: %d "
34902 "Posix Mkdirs: %d",
34903 - atomic_read(&tcon->num_posixopens),
34904 - atomic_read(&tcon->num_posixmkdirs));
34905 + atomic_read_unchecked(&tcon->num_posixopens),
34906 + atomic_read_unchecked(&tcon->num_posixmkdirs));
34907 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
34908 - atomic_read(&tcon->num_mkdirs),
34909 - atomic_read(&tcon->num_rmdirs));
34910 + atomic_read_unchecked(&tcon->num_mkdirs),
34911 + atomic_read_unchecked(&tcon->num_rmdirs));
34912 seq_printf(m, "\nRenames: %d T2 Renames %d",
34913 - atomic_read(&tcon->num_renames),
34914 - atomic_read(&tcon->num_t2renames));
34915 + atomic_read_unchecked(&tcon->num_renames),
34916 + atomic_read_unchecked(&tcon->num_t2renames));
34917 seq_printf(m, "\nFindFirst: %d FNext %d "
34918 "FClose %d",
34919 - atomic_read(&tcon->num_ffirst),
34920 - atomic_read(&tcon->num_fnext),
34921 - atomic_read(&tcon->num_fclose));
34922 + atomic_read_unchecked(&tcon->num_ffirst),
34923 + atomic_read_unchecked(&tcon->num_fnext),
34924 + atomic_read_unchecked(&tcon->num_fclose));
34925 }
34926 }
34927 }
34928 diff -urNp linux-3.0.3/fs/cifs/cifsglob.h linux-3.0.3/fs/cifs/cifsglob.h
34929 --- linux-3.0.3/fs/cifs/cifsglob.h 2011-07-21 22:17:23.000000000 -0400
34930 +++ linux-3.0.3/fs/cifs/cifsglob.h 2011-08-23 21:47:56.000000000 -0400
34931 @@ -381,28 +381,28 @@ struct cifs_tcon {
34932 __u16 Flags; /* optional support bits */
34933 enum statusEnum tidStatus;
34934 #ifdef CONFIG_CIFS_STATS
34935 - atomic_t num_smbs_sent;
34936 - atomic_t num_writes;
34937 - atomic_t num_reads;
34938 - atomic_t num_flushes;
34939 - atomic_t num_oplock_brks;
34940 - atomic_t num_opens;
34941 - atomic_t num_closes;
34942 - atomic_t num_deletes;
34943 - atomic_t num_mkdirs;
34944 - atomic_t num_posixopens;
34945 - atomic_t num_posixmkdirs;
34946 - atomic_t num_rmdirs;
34947 - atomic_t num_renames;
34948 - atomic_t num_t2renames;
34949 - atomic_t num_ffirst;
34950 - atomic_t num_fnext;
34951 - atomic_t num_fclose;
34952 - atomic_t num_hardlinks;
34953 - atomic_t num_symlinks;
34954 - atomic_t num_locks;
34955 - atomic_t num_acl_get;
34956 - atomic_t num_acl_set;
34957 + atomic_unchecked_t num_smbs_sent;
34958 + atomic_unchecked_t num_writes;
34959 + atomic_unchecked_t num_reads;
34960 + atomic_unchecked_t num_flushes;
34961 + atomic_unchecked_t num_oplock_brks;
34962 + atomic_unchecked_t num_opens;
34963 + atomic_unchecked_t num_closes;
34964 + atomic_unchecked_t num_deletes;
34965 + atomic_unchecked_t num_mkdirs;
34966 + atomic_unchecked_t num_posixopens;
34967 + atomic_unchecked_t num_posixmkdirs;
34968 + atomic_unchecked_t num_rmdirs;
34969 + atomic_unchecked_t num_renames;
34970 + atomic_unchecked_t num_t2renames;
34971 + atomic_unchecked_t num_ffirst;
34972 + atomic_unchecked_t num_fnext;
34973 + atomic_unchecked_t num_fclose;
34974 + atomic_unchecked_t num_hardlinks;
34975 + atomic_unchecked_t num_symlinks;
34976 + atomic_unchecked_t num_locks;
34977 + atomic_unchecked_t num_acl_get;
34978 + atomic_unchecked_t num_acl_set;
34979 #ifdef CONFIG_CIFS_STATS2
34980 unsigned long long time_writes;
34981 unsigned long long time_reads;
34982 @@ -613,7 +613,7 @@ convert_delimiter(char *path, char delim
34983 }
34984
34985 #ifdef CONFIG_CIFS_STATS
34986 -#define cifs_stats_inc atomic_inc
34987 +#define cifs_stats_inc atomic_inc_unchecked
34988
34989 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
34990 unsigned int bytes)
34991 diff -urNp linux-3.0.3/fs/cifs/link.c linux-3.0.3/fs/cifs/link.c
34992 --- linux-3.0.3/fs/cifs/link.c 2011-07-21 22:17:23.000000000 -0400
34993 +++ linux-3.0.3/fs/cifs/link.c 2011-08-23 21:47:56.000000000 -0400
34994 @@ -587,7 +587,7 @@ symlink_exit:
34995
34996 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
34997 {
34998 - char *p = nd_get_link(nd);
34999 + const char *p = nd_get_link(nd);
35000 if (!IS_ERR(p))
35001 kfree(p);
35002 }
35003 diff -urNp linux-3.0.3/fs/coda/cache.c linux-3.0.3/fs/coda/cache.c
35004 --- linux-3.0.3/fs/coda/cache.c 2011-07-21 22:17:23.000000000 -0400
35005 +++ linux-3.0.3/fs/coda/cache.c 2011-08-23 21:47:56.000000000 -0400
35006 @@ -24,7 +24,7 @@
35007 #include "coda_linux.h"
35008 #include "coda_cache.h"
35009
35010 -static atomic_t permission_epoch = ATOMIC_INIT(0);
35011 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
35012
35013 /* replace or extend an acl cache hit */
35014 void coda_cache_enter(struct inode *inode, int mask)
35015 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
35016 struct coda_inode_info *cii = ITOC(inode);
35017
35018 spin_lock(&cii->c_lock);
35019 - cii->c_cached_epoch = atomic_read(&permission_epoch);
35020 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
35021 if (cii->c_uid != current_fsuid()) {
35022 cii->c_uid = current_fsuid();
35023 cii->c_cached_perm = mask;
35024 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
35025 {
35026 struct coda_inode_info *cii = ITOC(inode);
35027 spin_lock(&cii->c_lock);
35028 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
35029 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
35030 spin_unlock(&cii->c_lock);
35031 }
35032
35033 /* remove all acl caches */
35034 void coda_cache_clear_all(struct super_block *sb)
35035 {
35036 - atomic_inc(&permission_epoch);
35037 + atomic_inc_unchecked(&permission_epoch);
35038 }
35039
35040
35041 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
35042 spin_lock(&cii->c_lock);
35043 hit = (mask & cii->c_cached_perm) == mask &&
35044 cii->c_uid == current_fsuid() &&
35045 - cii->c_cached_epoch == atomic_read(&permission_epoch);
35046 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
35047 spin_unlock(&cii->c_lock);
35048
35049 return hit;
35050 diff -urNp linux-3.0.3/fs/compat_binfmt_elf.c linux-3.0.3/fs/compat_binfmt_elf.c
35051 --- linux-3.0.3/fs/compat_binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
35052 +++ linux-3.0.3/fs/compat_binfmt_elf.c 2011-08-23 21:47:56.000000000 -0400
35053 @@ -30,11 +30,13 @@
35054 #undef elf_phdr
35055 #undef elf_shdr
35056 #undef elf_note
35057 +#undef elf_dyn
35058 #undef elf_addr_t
35059 #define elfhdr elf32_hdr
35060 #define elf_phdr elf32_phdr
35061 #define elf_shdr elf32_shdr
35062 #define elf_note elf32_note
35063 +#define elf_dyn Elf32_Dyn
35064 #define elf_addr_t Elf32_Addr
35065
35066 /*
35067 diff -urNp linux-3.0.3/fs/compat.c linux-3.0.3/fs/compat.c
35068 --- linux-3.0.3/fs/compat.c 2011-07-21 22:17:23.000000000 -0400
35069 +++ linux-3.0.3/fs/compat.c 2011-08-23 22:49:33.000000000 -0400
35070 @@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
35071 goto out;
35072
35073 ret = -EINVAL;
35074 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
35075 + if (nr_segs > UIO_MAXIOV)
35076 goto out;
35077 if (nr_segs > fast_segs) {
35078 ret = -ENOMEM;
35079 @@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
35080
35081 struct compat_readdir_callback {
35082 struct compat_old_linux_dirent __user *dirent;
35083 + struct file * file;
35084 int result;
35085 };
35086
35087 @@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
35088 buf->result = -EOVERFLOW;
35089 return -EOVERFLOW;
35090 }
35091 +
35092 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35093 + return 0;
35094 +
35095 buf->result++;
35096 dirent = buf->dirent;
35097 if (!access_ok(VERIFY_WRITE, dirent,
35098 @@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
35099
35100 buf.result = 0;
35101 buf.dirent = dirent;
35102 + buf.file = file;
35103
35104 error = vfs_readdir(file, compat_fillonedir, &buf);
35105 if (buf.result)
35106 @@ -917,6 +923,7 @@ struct compat_linux_dirent {
35107 struct compat_getdents_callback {
35108 struct compat_linux_dirent __user *current_dir;
35109 struct compat_linux_dirent __user *previous;
35110 + struct file * file;
35111 int count;
35112 int error;
35113 };
35114 @@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
35115 buf->error = -EOVERFLOW;
35116 return -EOVERFLOW;
35117 }
35118 +
35119 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35120 + return 0;
35121 +
35122 dirent = buf->previous;
35123 if (dirent) {
35124 if (__put_user(offset, &dirent->d_off))
35125 @@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
35126 buf.previous = NULL;
35127 buf.count = count;
35128 buf.error = 0;
35129 + buf.file = file;
35130
35131 error = vfs_readdir(file, compat_filldir, &buf);
35132 if (error >= 0)
35133 @@ -1006,6 +1018,7 @@ out:
35134 struct compat_getdents_callback64 {
35135 struct linux_dirent64 __user *current_dir;
35136 struct linux_dirent64 __user *previous;
35137 + struct file * file;
35138 int count;
35139 int error;
35140 };
35141 @@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
35142 buf->error = -EINVAL; /* only used if we fail.. */
35143 if (reclen > buf->count)
35144 return -EINVAL;
35145 +
35146 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35147 + return 0;
35148 +
35149 dirent = buf->previous;
35150
35151 if (dirent) {
35152 @@ -1073,6 +1090,7 @@ asmlinkage long compat_sys_getdents64(un
35153 buf.previous = NULL;
35154 buf.count = count;
35155 buf.error = 0;
35156 + buf.file = file;
35157
35158 error = vfs_readdir(file, compat_filldir64, &buf);
35159 if (error >= 0)
35160 @@ -1446,6 +1464,8 @@ int compat_core_sys_select(int n, compat
35161 struct fdtable *fdt;
35162 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
35163
35164 + pax_track_stack();
35165 +
35166 if (n < 0)
35167 goto out_nofds;
35168
35169 diff -urNp linux-3.0.3/fs/compat_ioctl.c linux-3.0.3/fs/compat_ioctl.c
35170 --- linux-3.0.3/fs/compat_ioctl.c 2011-07-21 22:17:23.000000000 -0400
35171 +++ linux-3.0.3/fs/compat_ioctl.c 2011-08-23 21:47:56.000000000 -0400
35172 @@ -208,6 +208,8 @@ static int do_video_set_spu_palette(unsi
35173
35174 err = get_user(palp, &up->palette);
35175 err |= get_user(length, &up->length);
35176 + if (err)
35177 + return -EFAULT;
35178
35179 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
35180 err = put_user(compat_ptr(palp), &up_native->palette);
35181 @@ -1638,8 +1640,8 @@ asmlinkage long compat_sys_ioctl(unsigne
35182 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
35183 {
35184 unsigned int a, b;
35185 - a = *(unsigned int *)p;
35186 - b = *(unsigned int *)q;
35187 + a = *(const unsigned int *)p;
35188 + b = *(const unsigned int *)q;
35189 if (a > b)
35190 return 1;
35191 if (a < b)
35192 diff -urNp linux-3.0.3/fs/configfs/dir.c linux-3.0.3/fs/configfs/dir.c
35193 --- linux-3.0.3/fs/configfs/dir.c 2011-07-21 22:17:23.000000000 -0400
35194 +++ linux-3.0.3/fs/configfs/dir.c 2011-08-23 21:47:56.000000000 -0400
35195 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
35196 }
35197 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
35198 struct configfs_dirent *next;
35199 - const char * name;
35200 + const unsigned char * name;
35201 + char d_name[sizeof(next->s_dentry->d_iname)];
35202 int len;
35203 struct inode *inode = NULL;
35204
35205 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
35206 continue;
35207
35208 name = configfs_get_name(next);
35209 - len = strlen(name);
35210 + if (next->s_dentry && name == next->s_dentry->d_iname) {
35211 + len = next->s_dentry->d_name.len;
35212 + memcpy(d_name, name, len);
35213 + name = d_name;
35214 + } else
35215 + len = strlen(name);
35216
35217 /*
35218 * We'll have a dentry and an inode for
35219 diff -urNp linux-3.0.3/fs/dcache.c linux-3.0.3/fs/dcache.c
35220 --- linux-3.0.3/fs/dcache.c 2011-07-21 22:17:23.000000000 -0400
35221 +++ linux-3.0.3/fs/dcache.c 2011-08-23 21:47:56.000000000 -0400
35222 @@ -3089,7 +3089,7 @@ void __init vfs_caches_init(unsigned lon
35223 mempages -= reserve;
35224
35225 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
35226 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
35227 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
35228
35229 dcache_init();
35230 inode_init();
35231 diff -urNp linux-3.0.3/fs/ecryptfs/inode.c linux-3.0.3/fs/ecryptfs/inode.c
35232 --- linux-3.0.3/fs/ecryptfs/inode.c 2011-08-23 21:44:40.000000000 -0400
35233 +++ linux-3.0.3/fs/ecryptfs/inode.c 2011-08-23 21:47:56.000000000 -0400
35234 @@ -704,7 +704,7 @@ static int ecryptfs_readlink_lower(struc
35235 old_fs = get_fs();
35236 set_fs(get_ds());
35237 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
35238 - (char __user *)lower_buf,
35239 + (__force char __user *)lower_buf,
35240 lower_bufsiz);
35241 set_fs(old_fs);
35242 if (rc < 0)
35243 @@ -750,7 +750,7 @@ static void *ecryptfs_follow_link(struct
35244 }
35245 old_fs = get_fs();
35246 set_fs(get_ds());
35247 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
35248 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
35249 set_fs(old_fs);
35250 if (rc < 0) {
35251 kfree(buf);
35252 @@ -765,7 +765,7 @@ out:
35253 static void
35254 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
35255 {
35256 - char *buf = nd_get_link(nd);
35257 + const char *buf = nd_get_link(nd);
35258 if (!IS_ERR(buf)) {
35259 /* Free the char* */
35260 kfree(buf);
35261 diff -urNp linux-3.0.3/fs/ecryptfs/miscdev.c linux-3.0.3/fs/ecryptfs/miscdev.c
35262 --- linux-3.0.3/fs/ecryptfs/miscdev.c 2011-07-21 22:17:23.000000000 -0400
35263 +++ linux-3.0.3/fs/ecryptfs/miscdev.c 2011-08-23 21:47:56.000000000 -0400
35264 @@ -328,7 +328,7 @@ check_list:
35265 goto out_unlock_msg_ctx;
35266 i = 5;
35267 if (msg_ctx->msg) {
35268 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
35269 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
35270 goto out_unlock_msg_ctx;
35271 i += packet_length_size;
35272 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
35273 diff -urNp linux-3.0.3/fs/exec.c linux-3.0.3/fs/exec.c
35274 --- linux-3.0.3/fs/exec.c 2011-07-21 22:17:23.000000000 -0400
35275 +++ linux-3.0.3/fs/exec.c 2011-08-23 21:48:14.000000000 -0400
35276 @@ -55,12 +55,24 @@
35277 #include <linux/pipe_fs_i.h>
35278 #include <linux/oom.h>
35279 #include <linux/compat.h>
35280 +#include <linux/random.h>
35281 +#include <linux/seq_file.h>
35282 +
35283 +#ifdef CONFIG_PAX_REFCOUNT
35284 +#include <linux/kallsyms.h>
35285 +#include <linux/kdebug.h>
35286 +#endif
35287
35288 #include <asm/uaccess.h>
35289 #include <asm/mmu_context.h>
35290 #include <asm/tlb.h>
35291 #include "internal.h"
35292
35293 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
35294 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
35295 +EXPORT_SYMBOL(pax_set_initial_flags_func);
35296 +#endif
35297 +
35298 int core_uses_pid;
35299 char core_pattern[CORENAME_MAX_SIZE] = "core";
35300 unsigned int core_pipe_limit;
35301 @@ -70,7 +82,7 @@ struct core_name {
35302 char *corename;
35303 int used, size;
35304 };
35305 -static atomic_t call_count = ATOMIC_INIT(1);
35306 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
35307
35308 /* The maximal length of core_pattern is also specified in sysctl.c */
35309
35310 @@ -116,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __use
35311 char *tmp = getname(library);
35312 int error = PTR_ERR(tmp);
35313 static const struct open_flags uselib_flags = {
35314 - .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
35315 + .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
35316 .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
35317 .intent = LOOKUP_OPEN
35318 };
35319 @@ -195,18 +207,10 @@ static struct page *get_arg_page(struct
35320 int write)
35321 {
35322 struct page *page;
35323 - int ret;
35324
35325 -#ifdef CONFIG_STACK_GROWSUP
35326 - if (write) {
35327 - ret = expand_downwards(bprm->vma, pos);
35328 - if (ret < 0)
35329 - return NULL;
35330 - }
35331 -#endif
35332 - ret = get_user_pages(current, bprm->mm, pos,
35333 - 1, write, 1, &page, NULL);
35334 - if (ret <= 0)
35335 + if (0 > expand_downwards(bprm->vma, pos))
35336 + return NULL;
35337 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
35338 return NULL;
35339
35340 if (write) {
35341 @@ -281,6 +285,11 @@ static int __bprm_mm_init(struct linux_b
35342 vma->vm_end = STACK_TOP_MAX;
35343 vma->vm_start = vma->vm_end - PAGE_SIZE;
35344 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
35345 +
35346 +#ifdef CONFIG_PAX_SEGMEXEC
35347 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
35348 +#endif
35349 +
35350 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
35351 INIT_LIST_HEAD(&vma->anon_vma_chain);
35352
35353 @@ -295,6 +304,12 @@ static int __bprm_mm_init(struct linux_b
35354 mm->stack_vm = mm->total_vm = 1;
35355 up_write(&mm->mmap_sem);
35356 bprm->p = vma->vm_end - sizeof(void *);
35357 +
35358 +#ifdef CONFIG_PAX_RANDUSTACK
35359 + if (randomize_va_space)
35360 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
35361 +#endif
35362 +
35363 return 0;
35364 err:
35365 up_write(&mm->mmap_sem);
35366 @@ -403,19 +418,7 @@ err:
35367 return err;
35368 }
35369
35370 -struct user_arg_ptr {
35371 -#ifdef CONFIG_COMPAT
35372 - bool is_compat;
35373 -#endif
35374 - union {
35375 - const char __user *const __user *native;
35376 -#ifdef CONFIG_COMPAT
35377 - compat_uptr_t __user *compat;
35378 -#endif
35379 - } ptr;
35380 -};
35381 -
35382 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
35383 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
35384 {
35385 const char __user *native;
35386
35387 @@ -566,7 +569,7 @@ int copy_strings_kernel(int argc, const
35388 int r;
35389 mm_segment_t oldfs = get_fs();
35390 struct user_arg_ptr argv = {
35391 - .ptr.native = (const char __user *const __user *)__argv,
35392 + .ptr.native = (__force const char __user *const __user *)__argv,
35393 };
35394
35395 set_fs(KERNEL_DS);
35396 @@ -601,7 +604,8 @@ static int shift_arg_pages(struct vm_are
35397 unsigned long new_end = old_end - shift;
35398 struct mmu_gather tlb;
35399
35400 - BUG_ON(new_start > new_end);
35401 + if (new_start >= new_end || new_start < mmap_min_addr)
35402 + return -ENOMEM;
35403
35404 /*
35405 * ensure there are no vmas between where we want to go
35406 @@ -610,6 +614,10 @@ static int shift_arg_pages(struct vm_are
35407 if (vma != find_vma(mm, new_start))
35408 return -EFAULT;
35409
35410 +#ifdef CONFIG_PAX_SEGMEXEC
35411 + BUG_ON(pax_find_mirror_vma(vma));
35412 +#endif
35413 +
35414 /*
35415 * cover the whole range: [new_start, old_end)
35416 */
35417 @@ -690,10 +698,6 @@ int setup_arg_pages(struct linux_binprm
35418 stack_top = arch_align_stack(stack_top);
35419 stack_top = PAGE_ALIGN(stack_top);
35420
35421 - if (unlikely(stack_top < mmap_min_addr) ||
35422 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
35423 - return -ENOMEM;
35424 -
35425 stack_shift = vma->vm_end - stack_top;
35426
35427 bprm->p -= stack_shift;
35428 @@ -705,8 +709,28 @@ int setup_arg_pages(struct linux_binprm
35429 bprm->exec -= stack_shift;
35430
35431 down_write(&mm->mmap_sem);
35432 +
35433 + /* Move stack pages down in memory. */
35434 + if (stack_shift) {
35435 + ret = shift_arg_pages(vma, stack_shift);
35436 + if (ret)
35437 + goto out_unlock;
35438 + }
35439 +
35440 vm_flags = VM_STACK_FLAGS;
35441
35442 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
35443 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
35444 + vm_flags &= ~VM_EXEC;
35445 +
35446 +#ifdef CONFIG_PAX_MPROTECT
35447 + if (mm->pax_flags & MF_PAX_MPROTECT)
35448 + vm_flags &= ~VM_MAYEXEC;
35449 +#endif
35450 +
35451 + }
35452 +#endif
35453 +
35454 /*
35455 * Adjust stack execute permissions; explicitly enable for
35456 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
35457 @@ -725,13 +749,6 @@ int setup_arg_pages(struct linux_binprm
35458 goto out_unlock;
35459 BUG_ON(prev != vma);
35460
35461 - /* Move stack pages down in memory. */
35462 - if (stack_shift) {
35463 - ret = shift_arg_pages(vma, stack_shift);
35464 - if (ret)
35465 - goto out_unlock;
35466 - }
35467 -
35468 /* mprotect_fixup is overkill to remove the temporary stack flags */
35469 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
35470
35471 @@ -771,7 +788,7 @@ struct file *open_exec(const char *name)
35472 struct file *file;
35473 int err;
35474 static const struct open_flags open_exec_flags = {
35475 - .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
35476 + .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
35477 .acc_mode = MAY_EXEC | MAY_OPEN,
35478 .intent = LOOKUP_OPEN
35479 };
35480 @@ -812,7 +829,7 @@ int kernel_read(struct file *file, loff_
35481 old_fs = get_fs();
35482 set_fs(get_ds());
35483 /* The cast to a user pointer is valid due to the set_fs() */
35484 - result = vfs_read(file, (void __user *)addr, count, &pos);
35485 + result = vfs_read(file, (__force void __user *)addr, count, &pos);
35486 set_fs(old_fs);
35487 return result;
35488 }
35489 @@ -1236,7 +1253,7 @@ int check_unsafe_exec(struct linux_binpr
35490 }
35491 rcu_read_unlock();
35492
35493 - if (p->fs->users > n_fs) {
35494 + if (atomic_read(&p->fs->users) > n_fs) {
35495 bprm->unsafe |= LSM_UNSAFE_SHARE;
35496 } else {
35497 res = -EAGAIN;
35498 @@ -1428,6 +1445,11 @@ static int do_execve_common(const char *
35499 struct user_arg_ptr envp,
35500 struct pt_regs *regs)
35501 {
35502 +#ifdef CONFIG_GRKERNSEC
35503 + struct file *old_exec_file;
35504 + struct acl_subject_label *old_acl;
35505 + struct rlimit old_rlim[RLIM_NLIMITS];
35506 +#endif
35507 struct linux_binprm *bprm;
35508 struct file *file;
35509 struct files_struct *displaced;
35510 @@ -1464,6 +1486,23 @@ static int do_execve_common(const char *
35511 bprm->filename = filename;
35512 bprm->interp = filename;
35513
35514 + if (gr_process_user_ban()) {
35515 + retval = -EPERM;
35516 + goto out_file;
35517 + }
35518 +
35519 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
35520 +
35521 + if (gr_handle_nproc()) {
35522 + retval = -EAGAIN;
35523 + goto out_file;
35524 + }
35525 +
35526 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
35527 + retval = -EACCES;
35528 + goto out_file;
35529 + }
35530 +
35531 retval = bprm_mm_init(bprm);
35532 if (retval)
35533 goto out_file;
35534 @@ -1493,9 +1532,40 @@ static int do_execve_common(const char *
35535 if (retval < 0)
35536 goto out;
35537
35538 + if (!gr_tpe_allow(file)) {
35539 + retval = -EACCES;
35540 + goto out;
35541 + }
35542 +
35543 + if (gr_check_crash_exec(file)) {
35544 + retval = -EACCES;
35545 + goto out;
35546 + }
35547 +
35548 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
35549 +
35550 + gr_handle_exec_args(bprm, argv);
35551 +
35552 +#ifdef CONFIG_GRKERNSEC
35553 + old_acl = current->acl;
35554 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
35555 + old_exec_file = current->exec_file;
35556 + get_file(file);
35557 + current->exec_file = file;
35558 +#endif
35559 +
35560 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
35561 + bprm->unsafe & LSM_UNSAFE_SHARE);
35562 + if (retval < 0)
35563 + goto out_fail;
35564 +
35565 retval = search_binary_handler(bprm,regs);
35566 if (retval < 0)
35567 - goto out;
35568 + goto out_fail;
35569 +#ifdef CONFIG_GRKERNSEC
35570 + if (old_exec_file)
35571 + fput(old_exec_file);
35572 +#endif
35573
35574 /* execve succeeded */
35575 current->fs->in_exec = 0;
35576 @@ -1506,6 +1576,14 @@ static int do_execve_common(const char *
35577 put_files_struct(displaced);
35578 return retval;
35579
35580 +out_fail:
35581 +#ifdef CONFIG_GRKERNSEC
35582 + current->acl = old_acl;
35583 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
35584 + fput(current->exec_file);
35585 + current->exec_file = old_exec_file;
35586 +#endif
35587 +
35588 out:
35589 if (bprm->mm) {
35590 acct_arg_size(bprm, 0);
35591 @@ -1579,7 +1657,7 @@ static int expand_corename(struct core_n
35592 {
35593 char *old_corename = cn->corename;
35594
35595 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
35596 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
35597 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
35598
35599 if (!cn->corename) {
35600 @@ -1667,7 +1745,7 @@ static int format_corename(struct core_n
35601 int pid_in_pattern = 0;
35602 int err = 0;
35603
35604 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
35605 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
35606 cn->corename = kmalloc(cn->size, GFP_KERNEL);
35607 cn->used = 0;
35608
35609 @@ -1758,6 +1836,219 @@ out:
35610 return ispipe;
35611 }
35612
35613 +int pax_check_flags(unsigned long *flags)
35614 +{
35615 + int retval = 0;
35616 +
35617 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
35618 + if (*flags & MF_PAX_SEGMEXEC)
35619 + {
35620 + *flags &= ~MF_PAX_SEGMEXEC;
35621 + retval = -EINVAL;
35622 + }
35623 +#endif
35624 +
35625 + if ((*flags & MF_PAX_PAGEEXEC)
35626 +
35627 +#ifdef CONFIG_PAX_PAGEEXEC
35628 + && (*flags & MF_PAX_SEGMEXEC)
35629 +#endif
35630 +
35631 + )
35632 + {
35633 + *flags &= ~MF_PAX_PAGEEXEC;
35634 + retval = -EINVAL;
35635 + }
35636 +
35637 + if ((*flags & MF_PAX_MPROTECT)
35638 +
35639 +#ifdef CONFIG_PAX_MPROTECT
35640 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
35641 +#endif
35642 +
35643 + )
35644 + {
35645 + *flags &= ~MF_PAX_MPROTECT;
35646 + retval = -EINVAL;
35647 + }
35648 +
35649 + if ((*flags & MF_PAX_EMUTRAMP)
35650 +
35651 +#ifdef CONFIG_PAX_EMUTRAMP
35652 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
35653 +#endif
35654 +
35655 + )
35656 + {
35657 + *flags &= ~MF_PAX_EMUTRAMP;
35658 + retval = -EINVAL;
35659 + }
35660 +
35661 + return retval;
35662 +}
35663 +
35664 +EXPORT_SYMBOL(pax_check_flags);
35665 +
35666 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
35667 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
35668 +{
35669 + struct task_struct *tsk = current;
35670 + struct mm_struct *mm = current->mm;
35671 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
35672 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
35673 + char *path_exec = NULL;
35674 + char *path_fault = NULL;
35675 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
35676 +
35677 + if (buffer_exec && buffer_fault) {
35678 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
35679 +
35680 + down_read(&mm->mmap_sem);
35681 + vma = mm->mmap;
35682 + while (vma && (!vma_exec || !vma_fault)) {
35683 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
35684 + vma_exec = vma;
35685 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
35686 + vma_fault = vma;
35687 + vma = vma->vm_next;
35688 + }
35689 + if (vma_exec) {
35690 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
35691 + if (IS_ERR(path_exec))
35692 + path_exec = "<path too long>";
35693 + else {
35694 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
35695 + if (path_exec) {
35696 + *path_exec = 0;
35697 + path_exec = buffer_exec;
35698 + } else
35699 + path_exec = "<path too long>";
35700 + }
35701 + }
35702 + if (vma_fault) {
35703 + start = vma_fault->vm_start;
35704 + end = vma_fault->vm_end;
35705 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
35706 + if (vma_fault->vm_file) {
35707 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
35708 + if (IS_ERR(path_fault))
35709 + path_fault = "<path too long>";
35710 + else {
35711 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
35712 + if (path_fault) {
35713 + *path_fault = 0;
35714 + path_fault = buffer_fault;
35715 + } else
35716 + path_fault = "<path too long>";
35717 + }
35718 + } else
35719 + path_fault = "<anonymous mapping>";
35720 + }
35721 + up_read(&mm->mmap_sem);
35722 + }
35723 + if (tsk->signal->curr_ip)
35724 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
35725 + else
35726 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
35727 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
35728 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
35729 + task_uid(tsk), task_euid(tsk), pc, sp);
35730 + free_page((unsigned long)buffer_exec);
35731 + free_page((unsigned long)buffer_fault);
35732 + pax_report_insns(pc, sp);
35733 + do_coredump(SIGKILL, SIGKILL, regs);
35734 +}
35735 +#endif
35736 +
35737 +#ifdef CONFIG_PAX_REFCOUNT
35738 +void pax_report_refcount_overflow(struct pt_regs *regs)
35739 +{
35740 + if (current->signal->curr_ip)
35741 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
35742 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
35743 + else
35744 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
35745 + current->comm, task_pid_nr(current), current_uid(), current_euid());
35746 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
35747 + show_regs(regs);
35748 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
35749 +}
35750 +#endif
35751 +
35752 +#ifdef CONFIG_PAX_USERCOPY
35753 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
35754 +int object_is_on_stack(const void *obj, unsigned long len)
35755 +{
35756 + const void * const stack = task_stack_page(current);
35757 + const void * const stackend = stack + THREAD_SIZE;
35758 +
35759 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
35760 + const void *frame = NULL;
35761 + const void *oldframe;
35762 +#endif
35763 +
35764 + if (obj + len < obj)
35765 + return -1;
35766 +
35767 + if (obj + len <= stack || stackend <= obj)
35768 + return 0;
35769 +
35770 + if (obj < stack || stackend < obj + len)
35771 + return -1;
35772 +
35773 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
35774 + oldframe = __builtin_frame_address(1);
35775 + if (oldframe)
35776 + frame = __builtin_frame_address(2);
35777 + /*
35778 + low ----------------------------------------------> high
35779 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
35780 + ^----------------^
35781 + allow copies only within here
35782 + */
35783 + while (stack <= frame && frame < stackend) {
35784 + /* if obj + len extends past the last frame, this
35785 + check won't pass and the next frame will be 0,
35786 + causing us to bail out and correctly report
35787 + the copy as invalid
35788 + */
35789 + if (obj + len <= frame)
35790 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
35791 + oldframe = frame;
35792 + frame = *(const void * const *)frame;
35793 + }
35794 + return -1;
35795 +#else
35796 + return 1;
35797 +#endif
35798 +}
35799 +
35800 +
35801 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
35802 +{
35803 + if (current->signal->curr_ip)
35804 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
35805 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
35806 + else
35807 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
35808 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
35809 + dump_stack();
35810 + gr_handle_kernel_exploit();
35811 + do_group_exit(SIGKILL);
35812 +}
35813 +#endif
35814 +
35815 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
35816 +void pax_track_stack(void)
35817 +{
35818 + unsigned long sp = (unsigned long)&sp;
35819 + if (sp < current_thread_info()->lowest_stack &&
35820 + sp > (unsigned long)task_stack_page(current))
35821 + current_thread_info()->lowest_stack = sp;
35822 +}
35823 +EXPORT_SYMBOL(pax_track_stack);
35824 +#endif
35825 +
35826 static int zap_process(struct task_struct *start, int exit_code)
35827 {
35828 struct task_struct *t;
35829 @@ -1969,17 +2260,17 @@ static void wait_for_dump_helpers(struct
35830 pipe = file->f_path.dentry->d_inode->i_pipe;
35831
35832 pipe_lock(pipe);
35833 - pipe->readers++;
35834 - pipe->writers--;
35835 + atomic_inc(&pipe->readers);
35836 + atomic_dec(&pipe->writers);
35837
35838 - while ((pipe->readers > 1) && (!signal_pending(current))) {
35839 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
35840 wake_up_interruptible_sync(&pipe->wait);
35841 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
35842 pipe_wait(pipe);
35843 }
35844
35845 - pipe->readers--;
35846 - pipe->writers++;
35847 + atomic_dec(&pipe->readers);
35848 + atomic_inc(&pipe->writers);
35849 pipe_unlock(pipe);
35850
35851 }
35852 @@ -2040,7 +2331,7 @@ void do_coredump(long signr, int exit_co
35853 int retval = 0;
35854 int flag = 0;
35855 int ispipe;
35856 - static atomic_t core_dump_count = ATOMIC_INIT(0);
35857 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
35858 struct coredump_params cprm = {
35859 .signr = signr,
35860 .regs = regs,
35861 @@ -2055,6 +2346,9 @@ void do_coredump(long signr, int exit_co
35862
35863 audit_core_dumps(signr);
35864
35865 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
35866 + gr_handle_brute_attach(current, cprm.mm_flags);
35867 +
35868 binfmt = mm->binfmt;
35869 if (!binfmt || !binfmt->core_dump)
35870 goto fail;
35871 @@ -2095,6 +2389,8 @@ void do_coredump(long signr, int exit_co
35872 goto fail_corename;
35873 }
35874
35875 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
35876 +
35877 if (ispipe) {
35878 int dump_count;
35879 char **helper_argv;
35880 @@ -2122,7 +2418,7 @@ void do_coredump(long signr, int exit_co
35881 }
35882 cprm.limit = RLIM_INFINITY;
35883
35884 - dump_count = atomic_inc_return(&core_dump_count);
35885 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
35886 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
35887 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
35888 task_tgid_vnr(current), current->comm);
35889 @@ -2192,7 +2488,7 @@ close_fail:
35890 filp_close(cprm.file, NULL);
35891 fail_dropcount:
35892 if (ispipe)
35893 - atomic_dec(&core_dump_count);
35894 + atomic_dec_unchecked(&core_dump_count);
35895 fail_unlock:
35896 kfree(cn.corename);
35897 fail_corename:
35898 diff -urNp linux-3.0.3/fs/ext2/balloc.c linux-3.0.3/fs/ext2/balloc.c
35899 --- linux-3.0.3/fs/ext2/balloc.c 2011-07-21 22:17:23.000000000 -0400
35900 +++ linux-3.0.3/fs/ext2/balloc.c 2011-08-23 21:48:14.000000000 -0400
35901 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
35902
35903 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
35904 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
35905 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
35906 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
35907 sbi->s_resuid != current_fsuid() &&
35908 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
35909 return 0;
35910 diff -urNp linux-3.0.3/fs/ext3/balloc.c linux-3.0.3/fs/ext3/balloc.c
35911 --- linux-3.0.3/fs/ext3/balloc.c 2011-07-21 22:17:23.000000000 -0400
35912 +++ linux-3.0.3/fs/ext3/balloc.c 2011-08-23 21:48:14.000000000 -0400
35913 @@ -1441,7 +1441,7 @@ static int ext3_has_free_blocks(struct e
35914
35915 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
35916 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
35917 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
35918 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
35919 sbi->s_resuid != current_fsuid() &&
35920 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
35921 return 0;
35922 diff -urNp linux-3.0.3/fs/ext4/balloc.c linux-3.0.3/fs/ext4/balloc.c
35923 --- linux-3.0.3/fs/ext4/balloc.c 2011-07-21 22:17:23.000000000 -0400
35924 +++ linux-3.0.3/fs/ext4/balloc.c 2011-08-23 21:48:14.000000000 -0400
35925 @@ -394,8 +394,8 @@ static int ext4_has_free_blocks(struct e
35926 /* Hm, nope. Are (enough) root reserved blocks available? */
35927 if (sbi->s_resuid == current_fsuid() ||
35928 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
35929 - capable(CAP_SYS_RESOURCE) ||
35930 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
35931 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
35932 + capable_nolog(CAP_SYS_RESOURCE)) {
35933
35934 if (free_blocks >= (nblocks + dirty_blocks))
35935 return 1;
35936 diff -urNp linux-3.0.3/fs/ext4/ext4.h linux-3.0.3/fs/ext4/ext4.h
35937 --- linux-3.0.3/fs/ext4/ext4.h 2011-08-23 21:44:40.000000000 -0400
35938 +++ linux-3.0.3/fs/ext4/ext4.h 2011-08-23 21:47:56.000000000 -0400
35939 @@ -1177,19 +1177,19 @@ struct ext4_sb_info {
35940 unsigned long s_mb_last_start;
35941
35942 /* stats for buddy allocator */
35943 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
35944 - atomic_t s_bal_success; /* we found long enough chunks */
35945 - atomic_t s_bal_allocated; /* in blocks */
35946 - atomic_t s_bal_ex_scanned; /* total extents scanned */
35947 - atomic_t s_bal_goals; /* goal hits */
35948 - atomic_t s_bal_breaks; /* too long searches */
35949 - atomic_t s_bal_2orders; /* 2^order hits */
35950 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
35951 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
35952 + atomic_unchecked_t s_bal_allocated; /* in blocks */
35953 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
35954 + atomic_unchecked_t s_bal_goals; /* goal hits */
35955 + atomic_unchecked_t s_bal_breaks; /* too long searches */
35956 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
35957 spinlock_t s_bal_lock;
35958 unsigned long s_mb_buddies_generated;
35959 unsigned long long s_mb_generation_time;
35960 - atomic_t s_mb_lost_chunks;
35961 - atomic_t s_mb_preallocated;
35962 - atomic_t s_mb_discarded;
35963 + atomic_unchecked_t s_mb_lost_chunks;
35964 + atomic_unchecked_t s_mb_preallocated;
35965 + atomic_unchecked_t s_mb_discarded;
35966 atomic_t s_lock_busy;
35967
35968 /* locality groups */
35969 diff -urNp linux-3.0.3/fs/ext4/mballoc.c linux-3.0.3/fs/ext4/mballoc.c
35970 --- linux-3.0.3/fs/ext4/mballoc.c 2011-08-23 21:44:40.000000000 -0400
35971 +++ linux-3.0.3/fs/ext4/mballoc.c 2011-08-23 21:48:14.000000000 -0400
35972 @@ -1793,7 +1793,7 @@ void ext4_mb_simple_scan_group(struct ex
35973 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
35974
35975 if (EXT4_SB(sb)->s_mb_stats)
35976 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
35977 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
35978
35979 break;
35980 }
35981 @@ -2087,7 +2087,7 @@ repeat:
35982 ac->ac_status = AC_STATUS_CONTINUE;
35983 ac->ac_flags |= EXT4_MB_HINT_FIRST;
35984 cr = 3;
35985 - atomic_inc(&sbi->s_mb_lost_chunks);
35986 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
35987 goto repeat;
35988 }
35989 }
35990 @@ -2130,6 +2130,8 @@ static int ext4_mb_seq_groups_show(struc
35991 ext4_grpblk_t counters[16];
35992 } sg;
35993
35994 + pax_track_stack();
35995 +
35996 group--;
35997 if (group == 0)
35998 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
35999 @@ -2553,25 +2555,25 @@ int ext4_mb_release(struct super_block *
36000 if (sbi->s_mb_stats) {
36001 printk(KERN_INFO
36002 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
36003 - atomic_read(&sbi->s_bal_allocated),
36004 - atomic_read(&sbi->s_bal_reqs),
36005 - atomic_read(&sbi->s_bal_success));
36006 + atomic_read_unchecked(&sbi->s_bal_allocated),
36007 + atomic_read_unchecked(&sbi->s_bal_reqs),
36008 + atomic_read_unchecked(&sbi->s_bal_success));
36009 printk(KERN_INFO
36010 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
36011 "%u 2^N hits, %u breaks, %u lost\n",
36012 - atomic_read(&sbi->s_bal_ex_scanned),
36013 - atomic_read(&sbi->s_bal_goals),
36014 - atomic_read(&sbi->s_bal_2orders),
36015 - atomic_read(&sbi->s_bal_breaks),
36016 - atomic_read(&sbi->s_mb_lost_chunks));
36017 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
36018 + atomic_read_unchecked(&sbi->s_bal_goals),
36019 + atomic_read_unchecked(&sbi->s_bal_2orders),
36020 + atomic_read_unchecked(&sbi->s_bal_breaks),
36021 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
36022 printk(KERN_INFO
36023 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
36024 sbi->s_mb_buddies_generated++,
36025 sbi->s_mb_generation_time);
36026 printk(KERN_INFO
36027 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
36028 - atomic_read(&sbi->s_mb_preallocated),
36029 - atomic_read(&sbi->s_mb_discarded));
36030 + atomic_read_unchecked(&sbi->s_mb_preallocated),
36031 + atomic_read_unchecked(&sbi->s_mb_discarded));
36032 }
36033
36034 free_percpu(sbi->s_locality_groups);
36035 @@ -3041,16 +3043,16 @@ static void ext4_mb_collect_stats(struct
36036 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
36037
36038 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
36039 - atomic_inc(&sbi->s_bal_reqs);
36040 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
36041 + atomic_inc_unchecked(&sbi->s_bal_reqs);
36042 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
36043 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
36044 - atomic_inc(&sbi->s_bal_success);
36045 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
36046 + atomic_inc_unchecked(&sbi->s_bal_success);
36047 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
36048 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
36049 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
36050 - atomic_inc(&sbi->s_bal_goals);
36051 + atomic_inc_unchecked(&sbi->s_bal_goals);
36052 if (ac->ac_found > sbi->s_mb_max_to_scan)
36053 - atomic_inc(&sbi->s_bal_breaks);
36054 + atomic_inc_unchecked(&sbi->s_bal_breaks);
36055 }
36056
36057 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
36058 @@ -3448,7 +3450,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
36059 trace_ext4_mb_new_inode_pa(ac, pa);
36060
36061 ext4_mb_use_inode_pa(ac, pa);
36062 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36063 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36064
36065 ei = EXT4_I(ac->ac_inode);
36066 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
36067 @@ -3508,7 +3510,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
36068 trace_ext4_mb_new_group_pa(ac, pa);
36069
36070 ext4_mb_use_group_pa(ac, pa);
36071 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36072 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36073
36074 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
36075 lg = ac->ac_lg;
36076 @@ -3595,7 +3597,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
36077 * from the bitmap and continue.
36078 */
36079 }
36080 - atomic_add(free, &sbi->s_mb_discarded);
36081 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
36082
36083 return err;
36084 }
36085 @@ -3613,7 +3615,7 @@ ext4_mb_release_group_pa(struct ext4_bud
36086 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
36087 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
36088 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
36089 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
36090 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
36091 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
36092
36093 return 0;
36094 diff -urNp linux-3.0.3/fs/fcntl.c linux-3.0.3/fs/fcntl.c
36095 --- linux-3.0.3/fs/fcntl.c 2011-07-21 22:17:23.000000000 -0400
36096 +++ linux-3.0.3/fs/fcntl.c 2011-08-23 21:48:14.000000000 -0400
36097 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
36098 if (err)
36099 return err;
36100
36101 + if (gr_handle_chroot_fowner(pid, type))
36102 + return -ENOENT;
36103 + if (gr_check_protected_task_fowner(pid, type))
36104 + return -EACCES;
36105 +
36106 f_modown(filp, pid, type, force);
36107 return 0;
36108 }
36109 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
36110 switch (cmd) {
36111 case F_DUPFD:
36112 case F_DUPFD_CLOEXEC:
36113 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
36114 if (arg >= rlimit(RLIMIT_NOFILE))
36115 break;
36116 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
36117 @@ -835,14 +841,14 @@ static int __init fcntl_init(void)
36118 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
36119 * is defined as O_NONBLOCK on some platforms and not on others.
36120 */
36121 - BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
36122 + BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
36123 O_RDONLY | O_WRONLY | O_RDWR |
36124 O_CREAT | O_EXCL | O_NOCTTY |
36125 O_TRUNC | O_APPEND | /* O_NONBLOCK | */
36126 __O_SYNC | O_DSYNC | FASYNC |
36127 O_DIRECT | O_LARGEFILE | O_DIRECTORY |
36128 O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
36129 - __FMODE_EXEC | O_PATH
36130 + __FMODE_EXEC | O_PATH | FMODE_GREXEC
36131 ));
36132
36133 fasync_cache = kmem_cache_create("fasync_cache",
36134 diff -urNp linux-3.0.3/fs/fifo.c linux-3.0.3/fs/fifo.c
36135 --- linux-3.0.3/fs/fifo.c 2011-07-21 22:17:23.000000000 -0400
36136 +++ linux-3.0.3/fs/fifo.c 2011-08-23 21:47:56.000000000 -0400
36137 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
36138 */
36139 filp->f_op = &read_pipefifo_fops;
36140 pipe->r_counter++;
36141 - if (pipe->readers++ == 0)
36142 + if (atomic_inc_return(&pipe->readers) == 1)
36143 wake_up_partner(inode);
36144
36145 - if (!pipe->writers) {
36146 + if (!atomic_read(&pipe->writers)) {
36147 if ((filp->f_flags & O_NONBLOCK)) {
36148 /* suppress POLLHUP until we have
36149 * seen a writer */
36150 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
36151 * errno=ENXIO when there is no process reading the FIFO.
36152 */
36153 ret = -ENXIO;
36154 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
36155 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
36156 goto err;
36157
36158 filp->f_op = &write_pipefifo_fops;
36159 pipe->w_counter++;
36160 - if (!pipe->writers++)
36161 + if (atomic_inc_return(&pipe->writers) == 1)
36162 wake_up_partner(inode);
36163
36164 - if (!pipe->readers) {
36165 + if (!atomic_read(&pipe->readers)) {
36166 wait_for_partner(inode, &pipe->r_counter);
36167 if (signal_pending(current))
36168 goto err_wr;
36169 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
36170 */
36171 filp->f_op = &rdwr_pipefifo_fops;
36172
36173 - pipe->readers++;
36174 - pipe->writers++;
36175 + atomic_inc(&pipe->readers);
36176 + atomic_inc(&pipe->writers);
36177 pipe->r_counter++;
36178 pipe->w_counter++;
36179 - if (pipe->readers == 1 || pipe->writers == 1)
36180 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
36181 wake_up_partner(inode);
36182 break;
36183
36184 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
36185 return 0;
36186
36187 err_rd:
36188 - if (!--pipe->readers)
36189 + if (atomic_dec_and_test(&pipe->readers))
36190 wake_up_interruptible(&pipe->wait);
36191 ret = -ERESTARTSYS;
36192 goto err;
36193
36194 err_wr:
36195 - if (!--pipe->writers)
36196 + if (atomic_dec_and_test(&pipe->writers))
36197 wake_up_interruptible(&pipe->wait);
36198 ret = -ERESTARTSYS;
36199 goto err;
36200
36201 err:
36202 - if (!pipe->readers && !pipe->writers)
36203 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
36204 free_pipe_info(inode);
36205
36206 err_nocleanup:
36207 diff -urNp linux-3.0.3/fs/file.c linux-3.0.3/fs/file.c
36208 --- linux-3.0.3/fs/file.c 2011-07-21 22:17:23.000000000 -0400
36209 +++ linux-3.0.3/fs/file.c 2011-08-23 21:48:14.000000000 -0400
36210 @@ -15,6 +15,7 @@
36211 #include <linux/slab.h>
36212 #include <linux/vmalloc.h>
36213 #include <linux/file.h>
36214 +#include <linux/security.h>
36215 #include <linux/fdtable.h>
36216 #include <linux/bitops.h>
36217 #include <linux/interrupt.h>
36218 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
36219 * N.B. For clone tasks sharing a files structure, this test
36220 * will limit the total number of files that can be opened.
36221 */
36222 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
36223 if (nr >= rlimit(RLIMIT_NOFILE))
36224 return -EMFILE;
36225
36226 diff -urNp linux-3.0.3/fs/filesystems.c linux-3.0.3/fs/filesystems.c
36227 --- linux-3.0.3/fs/filesystems.c 2011-07-21 22:17:23.000000000 -0400
36228 +++ linux-3.0.3/fs/filesystems.c 2011-08-23 21:48:14.000000000 -0400
36229 @@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
36230 int len = dot ? dot - name : strlen(name);
36231
36232 fs = __get_fs_type(name, len);
36233 +
36234 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
36235 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
36236 +#else
36237 if (!fs && (request_module("%.*s", len, name) == 0))
36238 +#endif
36239 fs = __get_fs_type(name, len);
36240
36241 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
36242 diff -urNp linux-3.0.3/fs/fscache/cookie.c linux-3.0.3/fs/fscache/cookie.c
36243 --- linux-3.0.3/fs/fscache/cookie.c 2011-07-21 22:17:23.000000000 -0400
36244 +++ linux-3.0.3/fs/fscache/cookie.c 2011-08-23 21:47:56.000000000 -0400
36245 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
36246 parent ? (char *) parent->def->name : "<no-parent>",
36247 def->name, netfs_data);
36248
36249 - fscache_stat(&fscache_n_acquires);
36250 + fscache_stat_unchecked(&fscache_n_acquires);
36251
36252 /* if there's no parent cookie, then we don't create one here either */
36253 if (!parent) {
36254 - fscache_stat(&fscache_n_acquires_null);
36255 + fscache_stat_unchecked(&fscache_n_acquires_null);
36256 _leave(" [no parent]");
36257 return NULL;
36258 }
36259 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
36260 /* allocate and initialise a cookie */
36261 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
36262 if (!cookie) {
36263 - fscache_stat(&fscache_n_acquires_oom);
36264 + fscache_stat_unchecked(&fscache_n_acquires_oom);
36265 _leave(" [ENOMEM]");
36266 return NULL;
36267 }
36268 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
36269
36270 switch (cookie->def->type) {
36271 case FSCACHE_COOKIE_TYPE_INDEX:
36272 - fscache_stat(&fscache_n_cookie_index);
36273 + fscache_stat_unchecked(&fscache_n_cookie_index);
36274 break;
36275 case FSCACHE_COOKIE_TYPE_DATAFILE:
36276 - fscache_stat(&fscache_n_cookie_data);
36277 + fscache_stat_unchecked(&fscache_n_cookie_data);
36278 break;
36279 default:
36280 - fscache_stat(&fscache_n_cookie_special);
36281 + fscache_stat_unchecked(&fscache_n_cookie_special);
36282 break;
36283 }
36284
36285 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
36286 if (fscache_acquire_non_index_cookie(cookie) < 0) {
36287 atomic_dec(&parent->n_children);
36288 __fscache_cookie_put(cookie);
36289 - fscache_stat(&fscache_n_acquires_nobufs);
36290 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
36291 _leave(" = NULL");
36292 return NULL;
36293 }
36294 }
36295
36296 - fscache_stat(&fscache_n_acquires_ok);
36297 + fscache_stat_unchecked(&fscache_n_acquires_ok);
36298 _leave(" = %p", cookie);
36299 return cookie;
36300 }
36301 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
36302 cache = fscache_select_cache_for_object(cookie->parent);
36303 if (!cache) {
36304 up_read(&fscache_addremove_sem);
36305 - fscache_stat(&fscache_n_acquires_no_cache);
36306 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
36307 _leave(" = -ENOMEDIUM [no cache]");
36308 return -ENOMEDIUM;
36309 }
36310 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
36311 object = cache->ops->alloc_object(cache, cookie);
36312 fscache_stat_d(&fscache_n_cop_alloc_object);
36313 if (IS_ERR(object)) {
36314 - fscache_stat(&fscache_n_object_no_alloc);
36315 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
36316 ret = PTR_ERR(object);
36317 goto error;
36318 }
36319
36320 - fscache_stat(&fscache_n_object_alloc);
36321 + fscache_stat_unchecked(&fscache_n_object_alloc);
36322
36323 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
36324
36325 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
36326 struct fscache_object *object;
36327 struct hlist_node *_p;
36328
36329 - fscache_stat(&fscache_n_updates);
36330 + fscache_stat_unchecked(&fscache_n_updates);
36331
36332 if (!cookie) {
36333 - fscache_stat(&fscache_n_updates_null);
36334 + fscache_stat_unchecked(&fscache_n_updates_null);
36335 _leave(" [no cookie]");
36336 return;
36337 }
36338 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
36339 struct fscache_object *object;
36340 unsigned long event;
36341
36342 - fscache_stat(&fscache_n_relinquishes);
36343 + fscache_stat_unchecked(&fscache_n_relinquishes);
36344 if (retire)
36345 - fscache_stat(&fscache_n_relinquishes_retire);
36346 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
36347
36348 if (!cookie) {
36349 - fscache_stat(&fscache_n_relinquishes_null);
36350 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
36351 _leave(" [no cookie]");
36352 return;
36353 }
36354 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
36355
36356 /* wait for the cookie to finish being instantiated (or to fail) */
36357 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
36358 - fscache_stat(&fscache_n_relinquishes_waitcrt);
36359 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
36360 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
36361 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
36362 }
36363 diff -urNp linux-3.0.3/fs/fscache/internal.h linux-3.0.3/fs/fscache/internal.h
36364 --- linux-3.0.3/fs/fscache/internal.h 2011-07-21 22:17:23.000000000 -0400
36365 +++ linux-3.0.3/fs/fscache/internal.h 2011-08-23 21:47:56.000000000 -0400
36366 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
36367 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
36368 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
36369
36370 -extern atomic_t fscache_n_op_pend;
36371 -extern atomic_t fscache_n_op_run;
36372 -extern atomic_t fscache_n_op_enqueue;
36373 -extern atomic_t fscache_n_op_deferred_release;
36374 -extern atomic_t fscache_n_op_release;
36375 -extern atomic_t fscache_n_op_gc;
36376 -extern atomic_t fscache_n_op_cancelled;
36377 -extern atomic_t fscache_n_op_rejected;
36378 -
36379 -extern atomic_t fscache_n_attr_changed;
36380 -extern atomic_t fscache_n_attr_changed_ok;
36381 -extern atomic_t fscache_n_attr_changed_nobufs;
36382 -extern atomic_t fscache_n_attr_changed_nomem;
36383 -extern atomic_t fscache_n_attr_changed_calls;
36384 -
36385 -extern atomic_t fscache_n_allocs;
36386 -extern atomic_t fscache_n_allocs_ok;
36387 -extern atomic_t fscache_n_allocs_wait;
36388 -extern atomic_t fscache_n_allocs_nobufs;
36389 -extern atomic_t fscache_n_allocs_intr;
36390 -extern atomic_t fscache_n_allocs_object_dead;
36391 -extern atomic_t fscache_n_alloc_ops;
36392 -extern atomic_t fscache_n_alloc_op_waits;
36393 -
36394 -extern atomic_t fscache_n_retrievals;
36395 -extern atomic_t fscache_n_retrievals_ok;
36396 -extern atomic_t fscache_n_retrievals_wait;
36397 -extern atomic_t fscache_n_retrievals_nodata;
36398 -extern atomic_t fscache_n_retrievals_nobufs;
36399 -extern atomic_t fscache_n_retrievals_intr;
36400 -extern atomic_t fscache_n_retrievals_nomem;
36401 -extern atomic_t fscache_n_retrievals_object_dead;
36402 -extern atomic_t fscache_n_retrieval_ops;
36403 -extern atomic_t fscache_n_retrieval_op_waits;
36404 -
36405 -extern atomic_t fscache_n_stores;
36406 -extern atomic_t fscache_n_stores_ok;
36407 -extern atomic_t fscache_n_stores_again;
36408 -extern atomic_t fscache_n_stores_nobufs;
36409 -extern atomic_t fscache_n_stores_oom;
36410 -extern atomic_t fscache_n_store_ops;
36411 -extern atomic_t fscache_n_store_calls;
36412 -extern atomic_t fscache_n_store_pages;
36413 -extern atomic_t fscache_n_store_radix_deletes;
36414 -extern atomic_t fscache_n_store_pages_over_limit;
36415 -
36416 -extern atomic_t fscache_n_store_vmscan_not_storing;
36417 -extern atomic_t fscache_n_store_vmscan_gone;
36418 -extern atomic_t fscache_n_store_vmscan_busy;
36419 -extern atomic_t fscache_n_store_vmscan_cancelled;
36420 -
36421 -extern atomic_t fscache_n_marks;
36422 -extern atomic_t fscache_n_uncaches;
36423 -
36424 -extern atomic_t fscache_n_acquires;
36425 -extern atomic_t fscache_n_acquires_null;
36426 -extern atomic_t fscache_n_acquires_no_cache;
36427 -extern atomic_t fscache_n_acquires_ok;
36428 -extern atomic_t fscache_n_acquires_nobufs;
36429 -extern atomic_t fscache_n_acquires_oom;
36430 -
36431 -extern atomic_t fscache_n_updates;
36432 -extern atomic_t fscache_n_updates_null;
36433 -extern atomic_t fscache_n_updates_run;
36434 -
36435 -extern atomic_t fscache_n_relinquishes;
36436 -extern atomic_t fscache_n_relinquishes_null;
36437 -extern atomic_t fscache_n_relinquishes_waitcrt;
36438 -extern atomic_t fscache_n_relinquishes_retire;
36439 -
36440 -extern atomic_t fscache_n_cookie_index;
36441 -extern atomic_t fscache_n_cookie_data;
36442 -extern atomic_t fscache_n_cookie_special;
36443 -
36444 -extern atomic_t fscache_n_object_alloc;
36445 -extern atomic_t fscache_n_object_no_alloc;
36446 -extern atomic_t fscache_n_object_lookups;
36447 -extern atomic_t fscache_n_object_lookups_negative;
36448 -extern atomic_t fscache_n_object_lookups_positive;
36449 -extern atomic_t fscache_n_object_lookups_timed_out;
36450 -extern atomic_t fscache_n_object_created;
36451 -extern atomic_t fscache_n_object_avail;
36452 -extern atomic_t fscache_n_object_dead;
36453 -
36454 -extern atomic_t fscache_n_checkaux_none;
36455 -extern atomic_t fscache_n_checkaux_okay;
36456 -extern atomic_t fscache_n_checkaux_update;
36457 -extern atomic_t fscache_n_checkaux_obsolete;
36458 +extern atomic_unchecked_t fscache_n_op_pend;
36459 +extern atomic_unchecked_t fscache_n_op_run;
36460 +extern atomic_unchecked_t fscache_n_op_enqueue;
36461 +extern atomic_unchecked_t fscache_n_op_deferred_release;
36462 +extern atomic_unchecked_t fscache_n_op_release;
36463 +extern atomic_unchecked_t fscache_n_op_gc;
36464 +extern atomic_unchecked_t fscache_n_op_cancelled;
36465 +extern atomic_unchecked_t fscache_n_op_rejected;
36466 +
36467 +extern atomic_unchecked_t fscache_n_attr_changed;
36468 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
36469 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
36470 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
36471 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
36472 +
36473 +extern atomic_unchecked_t fscache_n_allocs;
36474 +extern atomic_unchecked_t fscache_n_allocs_ok;
36475 +extern atomic_unchecked_t fscache_n_allocs_wait;
36476 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
36477 +extern atomic_unchecked_t fscache_n_allocs_intr;
36478 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
36479 +extern atomic_unchecked_t fscache_n_alloc_ops;
36480 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
36481 +
36482 +extern atomic_unchecked_t fscache_n_retrievals;
36483 +extern atomic_unchecked_t fscache_n_retrievals_ok;
36484 +extern atomic_unchecked_t fscache_n_retrievals_wait;
36485 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
36486 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
36487 +extern atomic_unchecked_t fscache_n_retrievals_intr;
36488 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
36489 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
36490 +extern atomic_unchecked_t fscache_n_retrieval_ops;
36491 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
36492 +
36493 +extern atomic_unchecked_t fscache_n_stores;
36494 +extern atomic_unchecked_t fscache_n_stores_ok;
36495 +extern atomic_unchecked_t fscache_n_stores_again;
36496 +extern atomic_unchecked_t fscache_n_stores_nobufs;
36497 +extern atomic_unchecked_t fscache_n_stores_oom;
36498 +extern atomic_unchecked_t fscache_n_store_ops;
36499 +extern atomic_unchecked_t fscache_n_store_calls;
36500 +extern atomic_unchecked_t fscache_n_store_pages;
36501 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
36502 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
36503 +
36504 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
36505 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
36506 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
36507 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
36508 +
36509 +extern atomic_unchecked_t fscache_n_marks;
36510 +extern atomic_unchecked_t fscache_n_uncaches;
36511 +
36512 +extern atomic_unchecked_t fscache_n_acquires;
36513 +extern atomic_unchecked_t fscache_n_acquires_null;
36514 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
36515 +extern atomic_unchecked_t fscache_n_acquires_ok;
36516 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
36517 +extern atomic_unchecked_t fscache_n_acquires_oom;
36518 +
36519 +extern atomic_unchecked_t fscache_n_updates;
36520 +extern atomic_unchecked_t fscache_n_updates_null;
36521 +extern atomic_unchecked_t fscache_n_updates_run;
36522 +
36523 +extern atomic_unchecked_t fscache_n_relinquishes;
36524 +extern atomic_unchecked_t fscache_n_relinquishes_null;
36525 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
36526 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
36527 +
36528 +extern atomic_unchecked_t fscache_n_cookie_index;
36529 +extern atomic_unchecked_t fscache_n_cookie_data;
36530 +extern atomic_unchecked_t fscache_n_cookie_special;
36531 +
36532 +extern atomic_unchecked_t fscache_n_object_alloc;
36533 +extern atomic_unchecked_t fscache_n_object_no_alloc;
36534 +extern atomic_unchecked_t fscache_n_object_lookups;
36535 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
36536 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
36537 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
36538 +extern atomic_unchecked_t fscache_n_object_created;
36539 +extern atomic_unchecked_t fscache_n_object_avail;
36540 +extern atomic_unchecked_t fscache_n_object_dead;
36541 +
36542 +extern atomic_unchecked_t fscache_n_checkaux_none;
36543 +extern atomic_unchecked_t fscache_n_checkaux_okay;
36544 +extern atomic_unchecked_t fscache_n_checkaux_update;
36545 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
36546
36547 extern atomic_t fscache_n_cop_alloc_object;
36548 extern atomic_t fscache_n_cop_lookup_object;
36549 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
36550 atomic_inc(stat);
36551 }
36552
36553 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
36554 +{
36555 + atomic_inc_unchecked(stat);
36556 +}
36557 +
36558 static inline void fscache_stat_d(atomic_t *stat)
36559 {
36560 atomic_dec(stat);
36561 @@ -267,6 +272,7 @@ extern const struct file_operations fsca
36562
36563 #define __fscache_stat(stat) (NULL)
36564 #define fscache_stat(stat) do {} while (0)
36565 +#define fscache_stat_unchecked(stat) do {} while (0)
36566 #define fscache_stat_d(stat) do {} while (0)
36567 #endif
36568
36569 diff -urNp linux-3.0.3/fs/fscache/object.c linux-3.0.3/fs/fscache/object.c
36570 --- linux-3.0.3/fs/fscache/object.c 2011-07-21 22:17:23.000000000 -0400
36571 +++ linux-3.0.3/fs/fscache/object.c 2011-08-23 21:47:56.000000000 -0400
36572 @@ -128,7 +128,7 @@ static void fscache_object_state_machine
36573 /* update the object metadata on disk */
36574 case FSCACHE_OBJECT_UPDATING:
36575 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
36576 - fscache_stat(&fscache_n_updates_run);
36577 + fscache_stat_unchecked(&fscache_n_updates_run);
36578 fscache_stat(&fscache_n_cop_update_object);
36579 object->cache->ops->update_object(object);
36580 fscache_stat_d(&fscache_n_cop_update_object);
36581 @@ -217,7 +217,7 @@ static void fscache_object_state_machine
36582 spin_lock(&object->lock);
36583 object->state = FSCACHE_OBJECT_DEAD;
36584 spin_unlock(&object->lock);
36585 - fscache_stat(&fscache_n_object_dead);
36586 + fscache_stat_unchecked(&fscache_n_object_dead);
36587 goto terminal_transit;
36588
36589 /* handle the parent cache of this object being withdrawn from
36590 @@ -232,7 +232,7 @@ static void fscache_object_state_machine
36591 spin_lock(&object->lock);
36592 object->state = FSCACHE_OBJECT_DEAD;
36593 spin_unlock(&object->lock);
36594 - fscache_stat(&fscache_n_object_dead);
36595 + fscache_stat_unchecked(&fscache_n_object_dead);
36596 goto terminal_transit;
36597
36598 /* complain about the object being woken up once it is
36599 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
36600 parent->cookie->def->name, cookie->def->name,
36601 object->cache->tag->name);
36602
36603 - fscache_stat(&fscache_n_object_lookups);
36604 + fscache_stat_unchecked(&fscache_n_object_lookups);
36605 fscache_stat(&fscache_n_cop_lookup_object);
36606 ret = object->cache->ops->lookup_object(object);
36607 fscache_stat_d(&fscache_n_cop_lookup_object);
36608 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
36609 if (ret == -ETIMEDOUT) {
36610 /* probably stuck behind another object, so move this one to
36611 * the back of the queue */
36612 - fscache_stat(&fscache_n_object_lookups_timed_out);
36613 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
36614 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
36615 }
36616
36617 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
36618
36619 spin_lock(&object->lock);
36620 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
36621 - fscache_stat(&fscache_n_object_lookups_negative);
36622 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
36623
36624 /* transit here to allow write requests to begin stacking up
36625 * and read requests to begin returning ENODATA */
36626 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
36627 * result, in which case there may be data available */
36628 spin_lock(&object->lock);
36629 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
36630 - fscache_stat(&fscache_n_object_lookups_positive);
36631 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
36632
36633 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
36634
36635 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
36636 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
36637 } else {
36638 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
36639 - fscache_stat(&fscache_n_object_created);
36640 + fscache_stat_unchecked(&fscache_n_object_created);
36641
36642 object->state = FSCACHE_OBJECT_AVAILABLE;
36643 spin_unlock(&object->lock);
36644 @@ -602,7 +602,7 @@ static void fscache_object_available(str
36645 fscache_enqueue_dependents(object);
36646
36647 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
36648 - fscache_stat(&fscache_n_object_avail);
36649 + fscache_stat_unchecked(&fscache_n_object_avail);
36650
36651 _leave("");
36652 }
36653 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
36654 enum fscache_checkaux result;
36655
36656 if (!object->cookie->def->check_aux) {
36657 - fscache_stat(&fscache_n_checkaux_none);
36658 + fscache_stat_unchecked(&fscache_n_checkaux_none);
36659 return FSCACHE_CHECKAUX_OKAY;
36660 }
36661
36662 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
36663 switch (result) {
36664 /* entry okay as is */
36665 case FSCACHE_CHECKAUX_OKAY:
36666 - fscache_stat(&fscache_n_checkaux_okay);
36667 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
36668 break;
36669
36670 /* entry requires update */
36671 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
36672 - fscache_stat(&fscache_n_checkaux_update);
36673 + fscache_stat_unchecked(&fscache_n_checkaux_update);
36674 break;
36675
36676 /* entry requires deletion */
36677 case FSCACHE_CHECKAUX_OBSOLETE:
36678 - fscache_stat(&fscache_n_checkaux_obsolete);
36679 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
36680 break;
36681
36682 default:
36683 diff -urNp linux-3.0.3/fs/fscache/operation.c linux-3.0.3/fs/fscache/operation.c
36684 --- linux-3.0.3/fs/fscache/operation.c 2011-07-21 22:17:23.000000000 -0400
36685 +++ linux-3.0.3/fs/fscache/operation.c 2011-08-23 21:47:56.000000000 -0400
36686 @@ -17,7 +17,7 @@
36687 #include <linux/slab.h>
36688 #include "internal.h"
36689
36690 -atomic_t fscache_op_debug_id;
36691 +atomic_unchecked_t fscache_op_debug_id;
36692 EXPORT_SYMBOL(fscache_op_debug_id);
36693
36694 /**
36695 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fs
36696 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
36697 ASSERTCMP(atomic_read(&op->usage), >, 0);
36698
36699 - fscache_stat(&fscache_n_op_enqueue);
36700 + fscache_stat_unchecked(&fscache_n_op_enqueue);
36701 switch (op->flags & FSCACHE_OP_TYPE) {
36702 case FSCACHE_OP_ASYNC:
36703 _debug("queue async");
36704 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscach
36705 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
36706 if (op->processor)
36707 fscache_enqueue_operation(op);
36708 - fscache_stat(&fscache_n_op_run);
36709 + fscache_stat_unchecked(&fscache_n_op_run);
36710 }
36711
36712 /*
36713 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct f
36714 if (object->n_ops > 1) {
36715 atomic_inc(&op->usage);
36716 list_add_tail(&op->pend_link, &object->pending_ops);
36717 - fscache_stat(&fscache_n_op_pend);
36718 + fscache_stat_unchecked(&fscache_n_op_pend);
36719 } else if (!list_empty(&object->pending_ops)) {
36720 atomic_inc(&op->usage);
36721 list_add_tail(&op->pend_link, &object->pending_ops);
36722 - fscache_stat(&fscache_n_op_pend);
36723 + fscache_stat_unchecked(&fscache_n_op_pend);
36724 fscache_start_operations(object);
36725 } else {
36726 ASSERTCMP(object->n_in_progress, ==, 0);
36727 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct f
36728 object->n_exclusive++; /* reads and writes must wait */
36729 atomic_inc(&op->usage);
36730 list_add_tail(&op->pend_link, &object->pending_ops);
36731 - fscache_stat(&fscache_n_op_pend);
36732 + fscache_stat_unchecked(&fscache_n_op_pend);
36733 ret = 0;
36734 } else {
36735 /* not allowed to submit ops in any other state */
36736 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_obj
36737 if (object->n_exclusive > 0) {
36738 atomic_inc(&op->usage);
36739 list_add_tail(&op->pend_link, &object->pending_ops);
36740 - fscache_stat(&fscache_n_op_pend);
36741 + fscache_stat_unchecked(&fscache_n_op_pend);
36742 } else if (!list_empty(&object->pending_ops)) {
36743 atomic_inc(&op->usage);
36744 list_add_tail(&op->pend_link, &object->pending_ops);
36745 - fscache_stat(&fscache_n_op_pend);
36746 + fscache_stat_unchecked(&fscache_n_op_pend);
36747 fscache_start_operations(object);
36748 } else {
36749 ASSERTCMP(object->n_exclusive, ==, 0);
36750 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_obj
36751 object->n_ops++;
36752 atomic_inc(&op->usage);
36753 list_add_tail(&op->pend_link, &object->pending_ops);
36754 - fscache_stat(&fscache_n_op_pend);
36755 + fscache_stat_unchecked(&fscache_n_op_pend);
36756 ret = 0;
36757 } else if (object->state == FSCACHE_OBJECT_DYING ||
36758 object->state == FSCACHE_OBJECT_LC_DYING ||
36759 object->state == FSCACHE_OBJECT_WITHDRAWING) {
36760 - fscache_stat(&fscache_n_op_rejected);
36761 + fscache_stat_unchecked(&fscache_n_op_rejected);
36762 ret = -ENOBUFS;
36763 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
36764 fscache_report_unexpected_submission(object, op, ostate);
36765 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_ope
36766
36767 ret = -EBUSY;
36768 if (!list_empty(&op->pend_link)) {
36769 - fscache_stat(&fscache_n_op_cancelled);
36770 + fscache_stat_unchecked(&fscache_n_op_cancelled);
36771 list_del_init(&op->pend_link);
36772 object->n_ops--;
36773 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
36774 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscach
36775 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
36776 BUG();
36777
36778 - fscache_stat(&fscache_n_op_release);
36779 + fscache_stat_unchecked(&fscache_n_op_release);
36780
36781 if (op->release) {
36782 op->release(op);
36783 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscach
36784 * lock, and defer it otherwise */
36785 if (!spin_trylock(&object->lock)) {
36786 _debug("defer put");
36787 - fscache_stat(&fscache_n_op_deferred_release);
36788 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
36789
36790 cache = object->cache;
36791 spin_lock(&cache->op_gc_list_lock);
36792 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_st
36793
36794 _debug("GC DEFERRED REL OBJ%x OP%x",
36795 object->debug_id, op->debug_id);
36796 - fscache_stat(&fscache_n_op_gc);
36797 + fscache_stat_unchecked(&fscache_n_op_gc);
36798
36799 ASSERTCMP(atomic_read(&op->usage), ==, 0);
36800
36801 diff -urNp linux-3.0.3/fs/fscache/page.c linux-3.0.3/fs/fscache/page.c
36802 --- linux-3.0.3/fs/fscache/page.c 2011-07-21 22:17:23.000000000 -0400
36803 +++ linux-3.0.3/fs/fscache/page.c 2011-08-23 21:47:56.000000000 -0400
36804 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
36805 val = radix_tree_lookup(&cookie->stores, page->index);
36806 if (!val) {
36807 rcu_read_unlock();
36808 - fscache_stat(&fscache_n_store_vmscan_not_storing);
36809 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
36810 __fscache_uncache_page(cookie, page);
36811 return true;
36812 }
36813 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
36814 spin_unlock(&cookie->stores_lock);
36815
36816 if (xpage) {
36817 - fscache_stat(&fscache_n_store_vmscan_cancelled);
36818 - fscache_stat(&fscache_n_store_radix_deletes);
36819 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
36820 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
36821 ASSERTCMP(xpage, ==, page);
36822 } else {
36823 - fscache_stat(&fscache_n_store_vmscan_gone);
36824 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
36825 }
36826
36827 wake_up_bit(&cookie->flags, 0);
36828 @@ -107,7 +107,7 @@ page_busy:
36829 /* we might want to wait here, but that could deadlock the allocator as
36830 * the work threads writing to the cache may all end up sleeping
36831 * on memory allocation */
36832 - fscache_stat(&fscache_n_store_vmscan_busy);
36833 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
36834 return false;
36835 }
36836 EXPORT_SYMBOL(__fscache_maybe_release_page);
36837 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
36838 FSCACHE_COOKIE_STORING_TAG);
36839 if (!radix_tree_tag_get(&cookie->stores, page->index,
36840 FSCACHE_COOKIE_PENDING_TAG)) {
36841 - fscache_stat(&fscache_n_store_radix_deletes);
36842 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
36843 xpage = radix_tree_delete(&cookie->stores, page->index);
36844 }
36845 spin_unlock(&cookie->stores_lock);
36846 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
36847
36848 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
36849
36850 - fscache_stat(&fscache_n_attr_changed_calls);
36851 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
36852
36853 if (fscache_object_is_active(object)) {
36854 fscache_stat(&fscache_n_cop_attr_changed);
36855 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscach
36856
36857 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
36858
36859 - fscache_stat(&fscache_n_attr_changed);
36860 + fscache_stat_unchecked(&fscache_n_attr_changed);
36861
36862 op = kzalloc(sizeof(*op), GFP_KERNEL);
36863 if (!op) {
36864 - fscache_stat(&fscache_n_attr_changed_nomem);
36865 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
36866 _leave(" = -ENOMEM");
36867 return -ENOMEM;
36868 }
36869 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscach
36870 if (fscache_submit_exclusive_op(object, op) < 0)
36871 goto nobufs;
36872 spin_unlock(&cookie->lock);
36873 - fscache_stat(&fscache_n_attr_changed_ok);
36874 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
36875 fscache_put_operation(op);
36876 _leave(" = 0");
36877 return 0;
36878 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscach
36879 nobufs:
36880 spin_unlock(&cookie->lock);
36881 kfree(op);
36882 - fscache_stat(&fscache_n_attr_changed_nobufs);
36883 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
36884 _leave(" = %d", -ENOBUFS);
36885 return -ENOBUFS;
36886 }
36887 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache
36888 /* allocate a retrieval operation and attempt to submit it */
36889 op = kzalloc(sizeof(*op), GFP_NOIO);
36890 if (!op) {
36891 - fscache_stat(&fscache_n_retrievals_nomem);
36892 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
36893 return NULL;
36894 }
36895
36896 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_loo
36897 return 0;
36898 }
36899
36900 - fscache_stat(&fscache_n_retrievals_wait);
36901 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
36902
36903 jif = jiffies;
36904 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
36905 fscache_wait_bit_interruptible,
36906 TASK_INTERRUPTIBLE) != 0) {
36907 - fscache_stat(&fscache_n_retrievals_intr);
36908 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
36909 _leave(" = -ERESTARTSYS");
36910 return -ERESTARTSYS;
36911 }
36912 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_loo
36913 */
36914 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
36915 struct fscache_retrieval *op,
36916 - atomic_t *stat_op_waits,
36917 - atomic_t *stat_object_dead)
36918 + atomic_unchecked_t *stat_op_waits,
36919 + atomic_unchecked_t *stat_object_dead)
36920 {
36921 int ret;
36922
36923 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_ac
36924 goto check_if_dead;
36925
36926 _debug(">>> WT");
36927 - fscache_stat(stat_op_waits);
36928 + fscache_stat_unchecked(stat_op_waits);
36929 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
36930 fscache_wait_bit_interruptible,
36931 TASK_INTERRUPTIBLE) < 0) {
36932 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_ac
36933
36934 check_if_dead:
36935 if (unlikely(fscache_object_is_dead(object))) {
36936 - fscache_stat(stat_object_dead);
36937 + fscache_stat_unchecked(stat_object_dead);
36938 return -ENOBUFS;
36939 }
36940 return 0;
36941 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct
36942
36943 _enter("%p,%p,,,", cookie, page);
36944
36945 - fscache_stat(&fscache_n_retrievals);
36946 + fscache_stat_unchecked(&fscache_n_retrievals);
36947
36948 if (hlist_empty(&cookie->backing_objects))
36949 goto nobufs;
36950 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct
36951 goto nobufs_unlock;
36952 spin_unlock(&cookie->lock);
36953
36954 - fscache_stat(&fscache_n_retrieval_ops);
36955 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
36956
36957 /* pin the netfs read context in case we need to do the actual netfs
36958 * read because we've encountered a cache read failure */
36959 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct
36960
36961 error:
36962 if (ret == -ENOMEM)
36963 - fscache_stat(&fscache_n_retrievals_nomem);
36964 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
36965 else if (ret == -ERESTARTSYS)
36966 - fscache_stat(&fscache_n_retrievals_intr);
36967 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
36968 else if (ret == -ENODATA)
36969 - fscache_stat(&fscache_n_retrievals_nodata);
36970 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
36971 else if (ret < 0)
36972 - fscache_stat(&fscache_n_retrievals_nobufs);
36973 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
36974 else
36975 - fscache_stat(&fscache_n_retrievals_ok);
36976 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
36977
36978 fscache_put_retrieval(op);
36979 _leave(" = %d", ret);
36980 @@ -429,7 +429,7 @@ nobufs_unlock:
36981 spin_unlock(&cookie->lock);
36982 kfree(op);
36983 nobufs:
36984 - fscache_stat(&fscache_n_retrievals_nobufs);
36985 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
36986 _leave(" = -ENOBUFS");
36987 return -ENOBUFS;
36988 }
36989 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct
36990
36991 _enter("%p,,%d,,,", cookie, *nr_pages);
36992
36993 - fscache_stat(&fscache_n_retrievals);
36994 + fscache_stat_unchecked(&fscache_n_retrievals);
36995
36996 if (hlist_empty(&cookie->backing_objects))
36997 goto nobufs;
36998 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct
36999 goto nobufs_unlock;
37000 spin_unlock(&cookie->lock);
37001
37002 - fscache_stat(&fscache_n_retrieval_ops);
37003 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
37004
37005 /* pin the netfs read context in case we need to do the actual netfs
37006 * read because we've encountered a cache read failure */
37007 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct
37008
37009 error:
37010 if (ret == -ENOMEM)
37011 - fscache_stat(&fscache_n_retrievals_nomem);
37012 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37013 else if (ret == -ERESTARTSYS)
37014 - fscache_stat(&fscache_n_retrievals_intr);
37015 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
37016 else if (ret == -ENODATA)
37017 - fscache_stat(&fscache_n_retrievals_nodata);
37018 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
37019 else if (ret < 0)
37020 - fscache_stat(&fscache_n_retrievals_nobufs);
37021 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37022 else
37023 - fscache_stat(&fscache_n_retrievals_ok);
37024 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
37025
37026 fscache_put_retrieval(op);
37027 _leave(" = %d", ret);
37028 @@ -545,7 +545,7 @@ nobufs_unlock:
37029 spin_unlock(&cookie->lock);
37030 kfree(op);
37031 nobufs:
37032 - fscache_stat(&fscache_n_retrievals_nobufs);
37033 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37034 _leave(" = -ENOBUFS");
37035 return -ENOBUFS;
37036 }
37037 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_
37038
37039 _enter("%p,%p,,,", cookie, page);
37040
37041 - fscache_stat(&fscache_n_allocs);
37042 + fscache_stat_unchecked(&fscache_n_allocs);
37043
37044 if (hlist_empty(&cookie->backing_objects))
37045 goto nobufs;
37046 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_
37047 goto nobufs_unlock;
37048 spin_unlock(&cookie->lock);
37049
37050 - fscache_stat(&fscache_n_alloc_ops);
37051 + fscache_stat_unchecked(&fscache_n_alloc_ops);
37052
37053 ret = fscache_wait_for_retrieval_activation(
37054 object, op,
37055 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_
37056
37057 error:
37058 if (ret == -ERESTARTSYS)
37059 - fscache_stat(&fscache_n_allocs_intr);
37060 + fscache_stat_unchecked(&fscache_n_allocs_intr);
37061 else if (ret < 0)
37062 - fscache_stat(&fscache_n_allocs_nobufs);
37063 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
37064 else
37065 - fscache_stat(&fscache_n_allocs_ok);
37066 + fscache_stat_unchecked(&fscache_n_allocs_ok);
37067
37068 fscache_put_retrieval(op);
37069 _leave(" = %d", ret);
37070 @@ -625,7 +625,7 @@ nobufs_unlock:
37071 spin_unlock(&cookie->lock);
37072 kfree(op);
37073 nobufs:
37074 - fscache_stat(&fscache_n_allocs_nobufs);
37075 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
37076 _leave(" = -ENOBUFS");
37077 return -ENOBUFS;
37078 }
37079 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fsca
37080
37081 spin_lock(&cookie->stores_lock);
37082
37083 - fscache_stat(&fscache_n_store_calls);
37084 + fscache_stat_unchecked(&fscache_n_store_calls);
37085
37086 /* find a page to store */
37087 page = NULL;
37088 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fsca
37089 page = results[0];
37090 _debug("gang %d [%lx]", n, page->index);
37091 if (page->index > op->store_limit) {
37092 - fscache_stat(&fscache_n_store_pages_over_limit);
37093 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
37094 goto superseded;
37095 }
37096
37097 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fsca
37098 spin_unlock(&cookie->stores_lock);
37099 spin_unlock(&object->lock);
37100
37101 - fscache_stat(&fscache_n_store_pages);
37102 + fscache_stat_unchecked(&fscache_n_store_pages);
37103 fscache_stat(&fscache_n_cop_write_page);
37104 ret = object->cache->ops->write_page(op, page);
37105 fscache_stat_d(&fscache_n_cop_write_page);
37106 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_
37107 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37108 ASSERT(PageFsCache(page));
37109
37110 - fscache_stat(&fscache_n_stores);
37111 + fscache_stat_unchecked(&fscache_n_stores);
37112
37113 op = kzalloc(sizeof(*op), GFP_NOIO);
37114 if (!op)
37115 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_
37116 spin_unlock(&cookie->stores_lock);
37117 spin_unlock(&object->lock);
37118
37119 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
37120 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
37121 op->store_limit = object->store_limit;
37122
37123 if (fscache_submit_op(object, &op->op) < 0)
37124 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_
37125
37126 spin_unlock(&cookie->lock);
37127 radix_tree_preload_end();
37128 - fscache_stat(&fscache_n_store_ops);
37129 - fscache_stat(&fscache_n_stores_ok);
37130 + fscache_stat_unchecked(&fscache_n_store_ops);
37131 + fscache_stat_unchecked(&fscache_n_stores_ok);
37132
37133 /* the work queue now carries its own ref on the object */
37134 fscache_put_operation(&op->op);
37135 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_
37136 return 0;
37137
37138 already_queued:
37139 - fscache_stat(&fscache_n_stores_again);
37140 + fscache_stat_unchecked(&fscache_n_stores_again);
37141 already_pending:
37142 spin_unlock(&cookie->stores_lock);
37143 spin_unlock(&object->lock);
37144 spin_unlock(&cookie->lock);
37145 radix_tree_preload_end();
37146 kfree(op);
37147 - fscache_stat(&fscache_n_stores_ok);
37148 + fscache_stat_unchecked(&fscache_n_stores_ok);
37149 _leave(" = 0");
37150 return 0;
37151
37152 @@ -851,14 +851,14 @@ nobufs:
37153 spin_unlock(&cookie->lock);
37154 radix_tree_preload_end();
37155 kfree(op);
37156 - fscache_stat(&fscache_n_stores_nobufs);
37157 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
37158 _leave(" = -ENOBUFS");
37159 return -ENOBUFS;
37160
37161 nomem_free:
37162 kfree(op);
37163 nomem:
37164 - fscache_stat(&fscache_n_stores_oom);
37165 + fscache_stat_unchecked(&fscache_n_stores_oom);
37166 _leave(" = -ENOMEM");
37167 return -ENOMEM;
37168 }
37169 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscac
37170 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37171 ASSERTCMP(page, !=, NULL);
37172
37173 - fscache_stat(&fscache_n_uncaches);
37174 + fscache_stat_unchecked(&fscache_n_uncaches);
37175
37176 /* cache withdrawal may beat us to it */
37177 if (!PageFsCache(page))
37178 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fs
37179 unsigned long loop;
37180
37181 #ifdef CONFIG_FSCACHE_STATS
37182 - atomic_add(pagevec->nr, &fscache_n_marks);
37183 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
37184 #endif
37185
37186 for (loop = 0; loop < pagevec->nr; loop++) {
37187 diff -urNp linux-3.0.3/fs/fscache/stats.c linux-3.0.3/fs/fscache/stats.c
37188 --- linux-3.0.3/fs/fscache/stats.c 2011-07-21 22:17:23.000000000 -0400
37189 +++ linux-3.0.3/fs/fscache/stats.c 2011-08-23 21:47:56.000000000 -0400
37190 @@ -18,95 +18,95 @@
37191 /*
37192 * operation counters
37193 */
37194 -atomic_t fscache_n_op_pend;
37195 -atomic_t fscache_n_op_run;
37196 -atomic_t fscache_n_op_enqueue;
37197 -atomic_t fscache_n_op_requeue;
37198 -atomic_t fscache_n_op_deferred_release;
37199 -atomic_t fscache_n_op_release;
37200 -atomic_t fscache_n_op_gc;
37201 -atomic_t fscache_n_op_cancelled;
37202 -atomic_t fscache_n_op_rejected;
37203 -
37204 -atomic_t fscache_n_attr_changed;
37205 -atomic_t fscache_n_attr_changed_ok;
37206 -atomic_t fscache_n_attr_changed_nobufs;
37207 -atomic_t fscache_n_attr_changed_nomem;
37208 -atomic_t fscache_n_attr_changed_calls;
37209 -
37210 -atomic_t fscache_n_allocs;
37211 -atomic_t fscache_n_allocs_ok;
37212 -atomic_t fscache_n_allocs_wait;
37213 -atomic_t fscache_n_allocs_nobufs;
37214 -atomic_t fscache_n_allocs_intr;
37215 -atomic_t fscache_n_allocs_object_dead;
37216 -atomic_t fscache_n_alloc_ops;
37217 -atomic_t fscache_n_alloc_op_waits;
37218 -
37219 -atomic_t fscache_n_retrievals;
37220 -atomic_t fscache_n_retrievals_ok;
37221 -atomic_t fscache_n_retrievals_wait;
37222 -atomic_t fscache_n_retrievals_nodata;
37223 -atomic_t fscache_n_retrievals_nobufs;
37224 -atomic_t fscache_n_retrievals_intr;
37225 -atomic_t fscache_n_retrievals_nomem;
37226 -atomic_t fscache_n_retrievals_object_dead;
37227 -atomic_t fscache_n_retrieval_ops;
37228 -atomic_t fscache_n_retrieval_op_waits;
37229 -
37230 -atomic_t fscache_n_stores;
37231 -atomic_t fscache_n_stores_ok;
37232 -atomic_t fscache_n_stores_again;
37233 -atomic_t fscache_n_stores_nobufs;
37234 -atomic_t fscache_n_stores_oom;
37235 -atomic_t fscache_n_store_ops;
37236 -atomic_t fscache_n_store_calls;
37237 -atomic_t fscache_n_store_pages;
37238 -atomic_t fscache_n_store_radix_deletes;
37239 -atomic_t fscache_n_store_pages_over_limit;
37240 -
37241 -atomic_t fscache_n_store_vmscan_not_storing;
37242 -atomic_t fscache_n_store_vmscan_gone;
37243 -atomic_t fscache_n_store_vmscan_busy;
37244 -atomic_t fscache_n_store_vmscan_cancelled;
37245 -
37246 -atomic_t fscache_n_marks;
37247 -atomic_t fscache_n_uncaches;
37248 -
37249 -atomic_t fscache_n_acquires;
37250 -atomic_t fscache_n_acquires_null;
37251 -atomic_t fscache_n_acquires_no_cache;
37252 -atomic_t fscache_n_acquires_ok;
37253 -atomic_t fscache_n_acquires_nobufs;
37254 -atomic_t fscache_n_acquires_oom;
37255 -
37256 -atomic_t fscache_n_updates;
37257 -atomic_t fscache_n_updates_null;
37258 -atomic_t fscache_n_updates_run;
37259 -
37260 -atomic_t fscache_n_relinquishes;
37261 -atomic_t fscache_n_relinquishes_null;
37262 -atomic_t fscache_n_relinquishes_waitcrt;
37263 -atomic_t fscache_n_relinquishes_retire;
37264 -
37265 -atomic_t fscache_n_cookie_index;
37266 -atomic_t fscache_n_cookie_data;
37267 -atomic_t fscache_n_cookie_special;
37268 -
37269 -atomic_t fscache_n_object_alloc;
37270 -atomic_t fscache_n_object_no_alloc;
37271 -atomic_t fscache_n_object_lookups;
37272 -atomic_t fscache_n_object_lookups_negative;
37273 -atomic_t fscache_n_object_lookups_positive;
37274 -atomic_t fscache_n_object_lookups_timed_out;
37275 -atomic_t fscache_n_object_created;
37276 -atomic_t fscache_n_object_avail;
37277 -atomic_t fscache_n_object_dead;
37278 -
37279 -atomic_t fscache_n_checkaux_none;
37280 -atomic_t fscache_n_checkaux_okay;
37281 -atomic_t fscache_n_checkaux_update;
37282 -atomic_t fscache_n_checkaux_obsolete;
37283 +atomic_unchecked_t fscache_n_op_pend;
37284 +atomic_unchecked_t fscache_n_op_run;
37285 +atomic_unchecked_t fscache_n_op_enqueue;
37286 +atomic_unchecked_t fscache_n_op_requeue;
37287 +atomic_unchecked_t fscache_n_op_deferred_release;
37288 +atomic_unchecked_t fscache_n_op_release;
37289 +atomic_unchecked_t fscache_n_op_gc;
37290 +atomic_unchecked_t fscache_n_op_cancelled;
37291 +atomic_unchecked_t fscache_n_op_rejected;
37292 +
37293 +atomic_unchecked_t fscache_n_attr_changed;
37294 +atomic_unchecked_t fscache_n_attr_changed_ok;
37295 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
37296 +atomic_unchecked_t fscache_n_attr_changed_nomem;
37297 +atomic_unchecked_t fscache_n_attr_changed_calls;
37298 +
37299 +atomic_unchecked_t fscache_n_allocs;
37300 +atomic_unchecked_t fscache_n_allocs_ok;
37301 +atomic_unchecked_t fscache_n_allocs_wait;
37302 +atomic_unchecked_t fscache_n_allocs_nobufs;
37303 +atomic_unchecked_t fscache_n_allocs_intr;
37304 +atomic_unchecked_t fscache_n_allocs_object_dead;
37305 +atomic_unchecked_t fscache_n_alloc_ops;
37306 +atomic_unchecked_t fscache_n_alloc_op_waits;
37307 +
37308 +atomic_unchecked_t fscache_n_retrievals;
37309 +atomic_unchecked_t fscache_n_retrievals_ok;
37310 +atomic_unchecked_t fscache_n_retrievals_wait;
37311 +atomic_unchecked_t fscache_n_retrievals_nodata;
37312 +atomic_unchecked_t fscache_n_retrievals_nobufs;
37313 +atomic_unchecked_t fscache_n_retrievals_intr;
37314 +atomic_unchecked_t fscache_n_retrievals_nomem;
37315 +atomic_unchecked_t fscache_n_retrievals_object_dead;
37316 +atomic_unchecked_t fscache_n_retrieval_ops;
37317 +atomic_unchecked_t fscache_n_retrieval_op_waits;
37318 +
37319 +atomic_unchecked_t fscache_n_stores;
37320 +atomic_unchecked_t fscache_n_stores_ok;
37321 +atomic_unchecked_t fscache_n_stores_again;
37322 +atomic_unchecked_t fscache_n_stores_nobufs;
37323 +atomic_unchecked_t fscache_n_stores_oom;
37324 +atomic_unchecked_t fscache_n_store_ops;
37325 +atomic_unchecked_t fscache_n_store_calls;
37326 +atomic_unchecked_t fscache_n_store_pages;
37327 +atomic_unchecked_t fscache_n_store_radix_deletes;
37328 +atomic_unchecked_t fscache_n_store_pages_over_limit;
37329 +
37330 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
37331 +atomic_unchecked_t fscache_n_store_vmscan_gone;
37332 +atomic_unchecked_t fscache_n_store_vmscan_busy;
37333 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
37334 +
37335 +atomic_unchecked_t fscache_n_marks;
37336 +atomic_unchecked_t fscache_n_uncaches;
37337 +
37338 +atomic_unchecked_t fscache_n_acquires;
37339 +atomic_unchecked_t fscache_n_acquires_null;
37340 +atomic_unchecked_t fscache_n_acquires_no_cache;
37341 +atomic_unchecked_t fscache_n_acquires_ok;
37342 +atomic_unchecked_t fscache_n_acquires_nobufs;
37343 +atomic_unchecked_t fscache_n_acquires_oom;
37344 +
37345 +atomic_unchecked_t fscache_n_updates;
37346 +atomic_unchecked_t fscache_n_updates_null;
37347 +atomic_unchecked_t fscache_n_updates_run;
37348 +
37349 +atomic_unchecked_t fscache_n_relinquishes;
37350 +atomic_unchecked_t fscache_n_relinquishes_null;
37351 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
37352 +atomic_unchecked_t fscache_n_relinquishes_retire;
37353 +
37354 +atomic_unchecked_t fscache_n_cookie_index;
37355 +atomic_unchecked_t fscache_n_cookie_data;
37356 +atomic_unchecked_t fscache_n_cookie_special;
37357 +
37358 +atomic_unchecked_t fscache_n_object_alloc;
37359 +atomic_unchecked_t fscache_n_object_no_alloc;
37360 +atomic_unchecked_t fscache_n_object_lookups;
37361 +atomic_unchecked_t fscache_n_object_lookups_negative;
37362 +atomic_unchecked_t fscache_n_object_lookups_positive;
37363 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
37364 +atomic_unchecked_t fscache_n_object_created;
37365 +atomic_unchecked_t fscache_n_object_avail;
37366 +atomic_unchecked_t fscache_n_object_dead;
37367 +
37368 +atomic_unchecked_t fscache_n_checkaux_none;
37369 +atomic_unchecked_t fscache_n_checkaux_okay;
37370 +atomic_unchecked_t fscache_n_checkaux_update;
37371 +atomic_unchecked_t fscache_n_checkaux_obsolete;
37372
37373 atomic_t fscache_n_cop_alloc_object;
37374 atomic_t fscache_n_cop_lookup_object;
37375 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
37376 seq_puts(m, "FS-Cache statistics\n");
37377
37378 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
37379 - atomic_read(&fscache_n_cookie_index),
37380 - atomic_read(&fscache_n_cookie_data),
37381 - atomic_read(&fscache_n_cookie_special));
37382 + atomic_read_unchecked(&fscache_n_cookie_index),
37383 + atomic_read_unchecked(&fscache_n_cookie_data),
37384 + atomic_read_unchecked(&fscache_n_cookie_special));
37385
37386 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
37387 - atomic_read(&fscache_n_object_alloc),
37388 - atomic_read(&fscache_n_object_no_alloc),
37389 - atomic_read(&fscache_n_object_avail),
37390 - atomic_read(&fscache_n_object_dead));
37391 + atomic_read_unchecked(&fscache_n_object_alloc),
37392 + atomic_read_unchecked(&fscache_n_object_no_alloc),
37393 + atomic_read_unchecked(&fscache_n_object_avail),
37394 + atomic_read_unchecked(&fscache_n_object_dead));
37395 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
37396 - atomic_read(&fscache_n_checkaux_none),
37397 - atomic_read(&fscache_n_checkaux_okay),
37398 - atomic_read(&fscache_n_checkaux_update),
37399 - atomic_read(&fscache_n_checkaux_obsolete));
37400 + atomic_read_unchecked(&fscache_n_checkaux_none),
37401 + atomic_read_unchecked(&fscache_n_checkaux_okay),
37402 + atomic_read_unchecked(&fscache_n_checkaux_update),
37403 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
37404
37405 seq_printf(m, "Pages : mrk=%u unc=%u\n",
37406 - atomic_read(&fscache_n_marks),
37407 - atomic_read(&fscache_n_uncaches));
37408 + atomic_read_unchecked(&fscache_n_marks),
37409 + atomic_read_unchecked(&fscache_n_uncaches));
37410
37411 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
37412 " oom=%u\n",
37413 - atomic_read(&fscache_n_acquires),
37414 - atomic_read(&fscache_n_acquires_null),
37415 - atomic_read(&fscache_n_acquires_no_cache),
37416 - atomic_read(&fscache_n_acquires_ok),
37417 - atomic_read(&fscache_n_acquires_nobufs),
37418 - atomic_read(&fscache_n_acquires_oom));
37419 + atomic_read_unchecked(&fscache_n_acquires),
37420 + atomic_read_unchecked(&fscache_n_acquires_null),
37421 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
37422 + atomic_read_unchecked(&fscache_n_acquires_ok),
37423 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
37424 + atomic_read_unchecked(&fscache_n_acquires_oom));
37425
37426 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
37427 - atomic_read(&fscache_n_object_lookups),
37428 - atomic_read(&fscache_n_object_lookups_negative),
37429 - atomic_read(&fscache_n_object_lookups_positive),
37430 - atomic_read(&fscache_n_object_created),
37431 - atomic_read(&fscache_n_object_lookups_timed_out));
37432 + atomic_read_unchecked(&fscache_n_object_lookups),
37433 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
37434 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
37435 + atomic_read_unchecked(&fscache_n_object_created),
37436 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
37437
37438 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
37439 - atomic_read(&fscache_n_updates),
37440 - atomic_read(&fscache_n_updates_null),
37441 - atomic_read(&fscache_n_updates_run));
37442 + atomic_read_unchecked(&fscache_n_updates),
37443 + atomic_read_unchecked(&fscache_n_updates_null),
37444 + atomic_read_unchecked(&fscache_n_updates_run));
37445
37446 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
37447 - atomic_read(&fscache_n_relinquishes),
37448 - atomic_read(&fscache_n_relinquishes_null),
37449 - atomic_read(&fscache_n_relinquishes_waitcrt),
37450 - atomic_read(&fscache_n_relinquishes_retire));
37451 + atomic_read_unchecked(&fscache_n_relinquishes),
37452 + atomic_read_unchecked(&fscache_n_relinquishes_null),
37453 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
37454 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
37455
37456 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
37457 - atomic_read(&fscache_n_attr_changed),
37458 - atomic_read(&fscache_n_attr_changed_ok),
37459 - atomic_read(&fscache_n_attr_changed_nobufs),
37460 - atomic_read(&fscache_n_attr_changed_nomem),
37461 - atomic_read(&fscache_n_attr_changed_calls));
37462 + atomic_read_unchecked(&fscache_n_attr_changed),
37463 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
37464 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
37465 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
37466 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
37467
37468 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
37469 - atomic_read(&fscache_n_allocs),
37470 - atomic_read(&fscache_n_allocs_ok),
37471 - atomic_read(&fscache_n_allocs_wait),
37472 - atomic_read(&fscache_n_allocs_nobufs),
37473 - atomic_read(&fscache_n_allocs_intr));
37474 + atomic_read_unchecked(&fscache_n_allocs),
37475 + atomic_read_unchecked(&fscache_n_allocs_ok),
37476 + atomic_read_unchecked(&fscache_n_allocs_wait),
37477 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
37478 + atomic_read_unchecked(&fscache_n_allocs_intr));
37479 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
37480 - atomic_read(&fscache_n_alloc_ops),
37481 - atomic_read(&fscache_n_alloc_op_waits),
37482 - atomic_read(&fscache_n_allocs_object_dead));
37483 + atomic_read_unchecked(&fscache_n_alloc_ops),
37484 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
37485 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
37486
37487 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
37488 " int=%u oom=%u\n",
37489 - atomic_read(&fscache_n_retrievals),
37490 - atomic_read(&fscache_n_retrievals_ok),
37491 - atomic_read(&fscache_n_retrievals_wait),
37492 - atomic_read(&fscache_n_retrievals_nodata),
37493 - atomic_read(&fscache_n_retrievals_nobufs),
37494 - atomic_read(&fscache_n_retrievals_intr),
37495 - atomic_read(&fscache_n_retrievals_nomem));
37496 + atomic_read_unchecked(&fscache_n_retrievals),
37497 + atomic_read_unchecked(&fscache_n_retrievals_ok),
37498 + atomic_read_unchecked(&fscache_n_retrievals_wait),
37499 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
37500 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
37501 + atomic_read_unchecked(&fscache_n_retrievals_intr),
37502 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
37503 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
37504 - atomic_read(&fscache_n_retrieval_ops),
37505 - atomic_read(&fscache_n_retrieval_op_waits),
37506 - atomic_read(&fscache_n_retrievals_object_dead));
37507 + atomic_read_unchecked(&fscache_n_retrieval_ops),
37508 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
37509 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
37510
37511 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
37512 - atomic_read(&fscache_n_stores),
37513 - atomic_read(&fscache_n_stores_ok),
37514 - atomic_read(&fscache_n_stores_again),
37515 - atomic_read(&fscache_n_stores_nobufs),
37516 - atomic_read(&fscache_n_stores_oom));
37517 + atomic_read_unchecked(&fscache_n_stores),
37518 + atomic_read_unchecked(&fscache_n_stores_ok),
37519 + atomic_read_unchecked(&fscache_n_stores_again),
37520 + atomic_read_unchecked(&fscache_n_stores_nobufs),
37521 + atomic_read_unchecked(&fscache_n_stores_oom));
37522 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
37523 - atomic_read(&fscache_n_store_ops),
37524 - atomic_read(&fscache_n_store_calls),
37525 - atomic_read(&fscache_n_store_pages),
37526 - atomic_read(&fscache_n_store_radix_deletes),
37527 - atomic_read(&fscache_n_store_pages_over_limit));
37528 + atomic_read_unchecked(&fscache_n_store_ops),
37529 + atomic_read_unchecked(&fscache_n_store_calls),
37530 + atomic_read_unchecked(&fscache_n_store_pages),
37531 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
37532 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
37533
37534 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
37535 - atomic_read(&fscache_n_store_vmscan_not_storing),
37536 - atomic_read(&fscache_n_store_vmscan_gone),
37537 - atomic_read(&fscache_n_store_vmscan_busy),
37538 - atomic_read(&fscache_n_store_vmscan_cancelled));
37539 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
37540 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
37541 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
37542 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
37543
37544 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
37545 - atomic_read(&fscache_n_op_pend),
37546 - atomic_read(&fscache_n_op_run),
37547 - atomic_read(&fscache_n_op_enqueue),
37548 - atomic_read(&fscache_n_op_cancelled),
37549 - atomic_read(&fscache_n_op_rejected));
37550 + atomic_read_unchecked(&fscache_n_op_pend),
37551 + atomic_read_unchecked(&fscache_n_op_run),
37552 + atomic_read_unchecked(&fscache_n_op_enqueue),
37553 + atomic_read_unchecked(&fscache_n_op_cancelled),
37554 + atomic_read_unchecked(&fscache_n_op_rejected));
37555 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
37556 - atomic_read(&fscache_n_op_deferred_release),
37557 - atomic_read(&fscache_n_op_release),
37558 - atomic_read(&fscache_n_op_gc));
37559 + atomic_read_unchecked(&fscache_n_op_deferred_release),
37560 + atomic_read_unchecked(&fscache_n_op_release),
37561 + atomic_read_unchecked(&fscache_n_op_gc));
37562
37563 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
37564 atomic_read(&fscache_n_cop_alloc_object),
37565 diff -urNp linux-3.0.3/fs/fs_struct.c linux-3.0.3/fs/fs_struct.c
37566 --- linux-3.0.3/fs/fs_struct.c 2011-07-21 22:17:23.000000000 -0400
37567 +++ linux-3.0.3/fs/fs_struct.c 2011-08-23 21:48:14.000000000 -0400
37568 @@ -4,6 +4,7 @@
37569 #include <linux/path.h>
37570 #include <linux/slab.h>
37571 #include <linux/fs_struct.h>
37572 +#include <linux/grsecurity.h>
37573 #include "internal.h"
37574
37575 static inline void path_get_longterm(struct path *path)
37576 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
37577 old_root = fs->root;
37578 fs->root = *path;
37579 path_get_longterm(path);
37580 + gr_set_chroot_entries(current, path);
37581 write_seqcount_end(&fs->seq);
37582 spin_unlock(&fs->lock);
37583 if (old_root.dentry)
37584 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
37585 && fs->root.mnt == old_root->mnt) {
37586 path_get_longterm(new_root);
37587 fs->root = *new_root;
37588 + gr_set_chroot_entries(p, new_root);
37589 count++;
37590 }
37591 if (fs->pwd.dentry == old_root->dentry
37592 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
37593 spin_lock(&fs->lock);
37594 write_seqcount_begin(&fs->seq);
37595 tsk->fs = NULL;
37596 - kill = !--fs->users;
37597 + gr_clear_chroot_entries(tsk);
37598 + kill = !atomic_dec_return(&fs->users);
37599 write_seqcount_end(&fs->seq);
37600 spin_unlock(&fs->lock);
37601 task_unlock(tsk);
37602 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
37603 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
37604 /* We don't need to lock fs - think why ;-) */
37605 if (fs) {
37606 - fs->users = 1;
37607 + atomic_set(&fs->users, 1);
37608 fs->in_exec = 0;
37609 spin_lock_init(&fs->lock);
37610 seqcount_init(&fs->seq);
37611 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
37612 spin_lock(&old->lock);
37613 fs->root = old->root;
37614 path_get_longterm(&fs->root);
37615 + /* instead of calling gr_set_chroot_entries here,
37616 + we call it from every caller of this function
37617 + */
37618 fs->pwd = old->pwd;
37619 path_get_longterm(&fs->pwd);
37620 spin_unlock(&old->lock);
37621 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
37622
37623 task_lock(current);
37624 spin_lock(&fs->lock);
37625 - kill = !--fs->users;
37626 + kill = !atomic_dec_return(&fs->users);
37627 current->fs = new_fs;
37628 + gr_set_chroot_entries(current, &new_fs->root);
37629 spin_unlock(&fs->lock);
37630 task_unlock(current);
37631
37632 @@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
37633
37634 /* to be mentioned only in INIT_TASK */
37635 struct fs_struct init_fs = {
37636 - .users = 1,
37637 + .users = ATOMIC_INIT(1),
37638 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
37639 .seq = SEQCNT_ZERO,
37640 .umask = 0022,
37641 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
37642 task_lock(current);
37643
37644 spin_lock(&init_fs.lock);
37645 - init_fs.users++;
37646 + atomic_inc(&init_fs.users);
37647 spin_unlock(&init_fs.lock);
37648
37649 spin_lock(&fs->lock);
37650 current->fs = &init_fs;
37651 - kill = !--fs->users;
37652 + gr_set_chroot_entries(current, &current->fs->root);
37653 + kill = !atomic_dec_return(&fs->users);
37654 spin_unlock(&fs->lock);
37655
37656 task_unlock(current);
37657 diff -urNp linux-3.0.3/fs/fuse/cuse.c linux-3.0.3/fs/fuse/cuse.c
37658 --- linux-3.0.3/fs/fuse/cuse.c 2011-07-21 22:17:23.000000000 -0400
37659 +++ linux-3.0.3/fs/fuse/cuse.c 2011-08-23 21:47:56.000000000 -0400
37660 @@ -586,10 +586,12 @@ static int __init cuse_init(void)
37661 INIT_LIST_HEAD(&cuse_conntbl[i]);
37662
37663 /* inherit and extend fuse_dev_operations */
37664 - cuse_channel_fops = fuse_dev_operations;
37665 - cuse_channel_fops.owner = THIS_MODULE;
37666 - cuse_channel_fops.open = cuse_channel_open;
37667 - cuse_channel_fops.release = cuse_channel_release;
37668 + pax_open_kernel();
37669 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
37670 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
37671 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
37672 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
37673 + pax_close_kernel();
37674
37675 cuse_class = class_create(THIS_MODULE, "cuse");
37676 if (IS_ERR(cuse_class))
37677 diff -urNp linux-3.0.3/fs/fuse/dev.c linux-3.0.3/fs/fuse/dev.c
37678 --- linux-3.0.3/fs/fuse/dev.c 2011-07-21 22:17:23.000000000 -0400
37679 +++ linux-3.0.3/fs/fuse/dev.c 2011-08-23 21:47:56.000000000 -0400
37680 @@ -1238,7 +1238,7 @@ static ssize_t fuse_dev_splice_read(stru
37681 ret = 0;
37682 pipe_lock(pipe);
37683
37684 - if (!pipe->readers) {
37685 + if (!atomic_read(&pipe->readers)) {
37686 send_sig(SIGPIPE, current, 0);
37687 if (!ret)
37688 ret = -EPIPE;
37689 diff -urNp linux-3.0.3/fs/fuse/dir.c linux-3.0.3/fs/fuse/dir.c
37690 --- linux-3.0.3/fs/fuse/dir.c 2011-07-21 22:17:23.000000000 -0400
37691 +++ linux-3.0.3/fs/fuse/dir.c 2011-08-23 21:47:56.000000000 -0400
37692 @@ -1148,7 +1148,7 @@ static char *read_link(struct dentry *de
37693 return link;
37694 }
37695
37696 -static void free_link(char *link)
37697 +static void free_link(const char *link)
37698 {
37699 if (!IS_ERR(link))
37700 free_page((unsigned long) link);
37701 diff -urNp linux-3.0.3/fs/gfs2/inode.c linux-3.0.3/fs/gfs2/inode.c
37702 --- linux-3.0.3/fs/gfs2/inode.c 2011-07-21 22:17:23.000000000 -0400
37703 +++ linux-3.0.3/fs/gfs2/inode.c 2011-08-23 21:47:56.000000000 -0400
37704 @@ -1525,7 +1525,7 @@ out:
37705
37706 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
37707 {
37708 - char *s = nd_get_link(nd);
37709 + const char *s = nd_get_link(nd);
37710 if (!IS_ERR(s))
37711 kfree(s);
37712 }
37713 diff -urNp linux-3.0.3/fs/hfsplus/catalog.c linux-3.0.3/fs/hfsplus/catalog.c
37714 --- linux-3.0.3/fs/hfsplus/catalog.c 2011-07-21 22:17:23.000000000 -0400
37715 +++ linux-3.0.3/fs/hfsplus/catalog.c 2011-08-23 21:48:14.000000000 -0400
37716 @@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
37717 int err;
37718 u16 type;
37719
37720 + pax_track_stack();
37721 +
37722 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
37723 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
37724 if (err)
37725 @@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
37726 int entry_size;
37727 int err;
37728
37729 + pax_track_stack();
37730 +
37731 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
37732 str->name, cnid, inode->i_nlink);
37733 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
37734 @@ -349,6 +353,8 @@ int hfsplus_rename_cat(u32 cnid,
37735 int entry_size, type;
37736 int err = 0;
37737
37738 + pax_track_stack();
37739 +
37740 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
37741 cnid, src_dir->i_ino, src_name->name,
37742 dst_dir->i_ino, dst_name->name);
37743 diff -urNp linux-3.0.3/fs/hfsplus/dir.c linux-3.0.3/fs/hfsplus/dir.c
37744 --- linux-3.0.3/fs/hfsplus/dir.c 2011-07-21 22:17:23.000000000 -0400
37745 +++ linux-3.0.3/fs/hfsplus/dir.c 2011-08-23 21:48:14.000000000 -0400
37746 @@ -129,6 +129,8 @@ static int hfsplus_readdir(struct file *
37747 struct hfsplus_readdir_data *rd;
37748 u16 type;
37749
37750 + pax_track_stack();
37751 +
37752 if (filp->f_pos >= inode->i_size)
37753 return 0;
37754
37755 diff -urNp linux-3.0.3/fs/hfsplus/inode.c linux-3.0.3/fs/hfsplus/inode.c
37756 --- linux-3.0.3/fs/hfsplus/inode.c 2011-07-21 22:17:23.000000000 -0400
37757 +++ linux-3.0.3/fs/hfsplus/inode.c 2011-08-23 21:48:14.000000000 -0400
37758 @@ -489,6 +489,8 @@ int hfsplus_cat_read_inode(struct inode
37759 int res = 0;
37760 u16 type;
37761
37762 + pax_track_stack();
37763 +
37764 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
37765
37766 HFSPLUS_I(inode)->linkid = 0;
37767 @@ -552,6 +554,8 @@ int hfsplus_cat_write_inode(struct inode
37768 struct hfs_find_data fd;
37769 hfsplus_cat_entry entry;
37770
37771 + pax_track_stack();
37772 +
37773 if (HFSPLUS_IS_RSRC(inode))
37774 main_inode = HFSPLUS_I(inode)->rsrc_inode;
37775
37776 diff -urNp linux-3.0.3/fs/hfsplus/ioctl.c linux-3.0.3/fs/hfsplus/ioctl.c
37777 --- linux-3.0.3/fs/hfsplus/ioctl.c 2011-07-21 22:17:23.000000000 -0400
37778 +++ linux-3.0.3/fs/hfsplus/ioctl.c 2011-08-23 21:48:14.000000000 -0400
37779 @@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
37780 struct hfsplus_cat_file *file;
37781 int res;
37782
37783 + pax_track_stack();
37784 +
37785 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
37786 return -EOPNOTSUPP;
37787
37788 @@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
37789 struct hfsplus_cat_file *file;
37790 ssize_t res = 0;
37791
37792 + pax_track_stack();
37793 +
37794 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
37795 return -EOPNOTSUPP;
37796
37797 diff -urNp linux-3.0.3/fs/hfsplus/super.c linux-3.0.3/fs/hfsplus/super.c
37798 --- linux-3.0.3/fs/hfsplus/super.c 2011-07-21 22:17:23.000000000 -0400
37799 +++ linux-3.0.3/fs/hfsplus/super.c 2011-08-23 21:48:14.000000000 -0400
37800 @@ -340,6 +340,8 @@ static int hfsplus_fill_super(struct sup
37801 struct nls_table *nls = NULL;
37802 int err;
37803
37804 + pax_track_stack();
37805 +
37806 err = -EINVAL;
37807 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
37808 if (!sbi)
37809 diff -urNp linux-3.0.3/fs/hugetlbfs/inode.c linux-3.0.3/fs/hugetlbfs/inode.c
37810 --- linux-3.0.3/fs/hugetlbfs/inode.c 2011-07-21 22:17:23.000000000 -0400
37811 +++ linux-3.0.3/fs/hugetlbfs/inode.c 2011-08-23 21:48:14.000000000 -0400
37812 @@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs
37813 .kill_sb = kill_litter_super,
37814 };
37815
37816 -static struct vfsmount *hugetlbfs_vfsmount;
37817 +struct vfsmount *hugetlbfs_vfsmount;
37818
37819 static int can_do_hugetlb_shm(void)
37820 {
37821 diff -urNp linux-3.0.3/fs/inode.c linux-3.0.3/fs/inode.c
37822 --- linux-3.0.3/fs/inode.c 2011-07-21 22:17:23.000000000 -0400
37823 +++ linux-3.0.3/fs/inode.c 2011-08-23 21:47:56.000000000 -0400
37824 @@ -829,8 +829,8 @@ unsigned int get_next_ino(void)
37825
37826 #ifdef CONFIG_SMP
37827 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
37828 - static atomic_t shared_last_ino;
37829 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
37830 + static atomic_unchecked_t shared_last_ino;
37831 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
37832
37833 res = next - LAST_INO_BATCH;
37834 }
37835 diff -urNp linux-3.0.3/fs/jbd/checkpoint.c linux-3.0.3/fs/jbd/checkpoint.c
37836 --- linux-3.0.3/fs/jbd/checkpoint.c 2011-07-21 22:17:23.000000000 -0400
37837 +++ linux-3.0.3/fs/jbd/checkpoint.c 2011-08-23 21:48:14.000000000 -0400
37838 @@ -350,6 +350,8 @@ int log_do_checkpoint(journal_t *journal
37839 tid_t this_tid;
37840 int result;
37841
37842 + pax_track_stack();
37843 +
37844 jbd_debug(1, "Start checkpoint\n");
37845
37846 /*
37847 diff -urNp linux-3.0.3/fs/jffs2/compr_rtime.c linux-3.0.3/fs/jffs2/compr_rtime.c
37848 --- linux-3.0.3/fs/jffs2/compr_rtime.c 2011-07-21 22:17:23.000000000 -0400
37849 +++ linux-3.0.3/fs/jffs2/compr_rtime.c 2011-08-23 21:48:14.000000000 -0400
37850 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
37851 int outpos = 0;
37852 int pos=0;
37853
37854 + pax_track_stack();
37855 +
37856 memset(positions,0,sizeof(positions));
37857
37858 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
37859 @@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
37860 int outpos = 0;
37861 int pos=0;
37862
37863 + pax_track_stack();
37864 +
37865 memset(positions,0,sizeof(positions));
37866
37867 while (outpos<destlen) {
37868 diff -urNp linux-3.0.3/fs/jffs2/compr_rubin.c linux-3.0.3/fs/jffs2/compr_rubin.c
37869 --- linux-3.0.3/fs/jffs2/compr_rubin.c 2011-07-21 22:17:23.000000000 -0400
37870 +++ linux-3.0.3/fs/jffs2/compr_rubin.c 2011-08-23 21:48:14.000000000 -0400
37871 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
37872 int ret;
37873 uint32_t mysrclen, mydstlen;
37874
37875 + pax_track_stack();
37876 +
37877 mysrclen = *sourcelen;
37878 mydstlen = *dstlen - 8;
37879
37880 diff -urNp linux-3.0.3/fs/jffs2/erase.c linux-3.0.3/fs/jffs2/erase.c
37881 --- linux-3.0.3/fs/jffs2/erase.c 2011-07-21 22:17:23.000000000 -0400
37882 +++ linux-3.0.3/fs/jffs2/erase.c 2011-08-23 21:47:56.000000000 -0400
37883 @@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
37884 struct jffs2_unknown_node marker = {
37885 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
37886 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
37887 - .totlen = cpu_to_je32(c->cleanmarker_size)
37888 + .totlen = cpu_to_je32(c->cleanmarker_size),
37889 + .hdr_crc = cpu_to_je32(0)
37890 };
37891
37892 jffs2_prealloc_raw_node_refs(c, jeb, 1);
37893 diff -urNp linux-3.0.3/fs/jffs2/wbuf.c linux-3.0.3/fs/jffs2/wbuf.c
37894 --- linux-3.0.3/fs/jffs2/wbuf.c 2011-07-21 22:17:23.000000000 -0400
37895 +++ linux-3.0.3/fs/jffs2/wbuf.c 2011-08-23 21:47:56.000000000 -0400
37896 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
37897 {
37898 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
37899 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
37900 - .totlen = constant_cpu_to_je32(8)
37901 + .totlen = constant_cpu_to_je32(8),
37902 + .hdr_crc = constant_cpu_to_je32(0)
37903 };
37904
37905 /*
37906 diff -urNp linux-3.0.3/fs/jffs2/xattr.c linux-3.0.3/fs/jffs2/xattr.c
37907 --- linux-3.0.3/fs/jffs2/xattr.c 2011-07-21 22:17:23.000000000 -0400
37908 +++ linux-3.0.3/fs/jffs2/xattr.c 2011-08-23 21:48:14.000000000 -0400
37909 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
37910
37911 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
37912
37913 + pax_track_stack();
37914 +
37915 /* Phase.1 : Merge same xref */
37916 for (i=0; i < XREF_TMPHASH_SIZE; i++)
37917 xref_tmphash[i] = NULL;
37918 diff -urNp linux-3.0.3/fs/jfs/super.c linux-3.0.3/fs/jfs/super.c
37919 --- linux-3.0.3/fs/jfs/super.c 2011-07-21 22:17:23.000000000 -0400
37920 +++ linux-3.0.3/fs/jfs/super.c 2011-08-23 21:47:56.000000000 -0400
37921 @@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
37922
37923 jfs_inode_cachep =
37924 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
37925 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
37926 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
37927 init_once);
37928 if (jfs_inode_cachep == NULL)
37929 return -ENOMEM;
37930 diff -urNp linux-3.0.3/fs/Kconfig.binfmt linux-3.0.3/fs/Kconfig.binfmt
37931 --- linux-3.0.3/fs/Kconfig.binfmt 2011-07-21 22:17:23.000000000 -0400
37932 +++ linux-3.0.3/fs/Kconfig.binfmt 2011-08-23 21:47:56.000000000 -0400
37933 @@ -86,7 +86,7 @@ config HAVE_AOUT
37934
37935 config BINFMT_AOUT
37936 tristate "Kernel support for a.out and ECOFF binaries"
37937 - depends on HAVE_AOUT
37938 + depends on HAVE_AOUT && BROKEN
37939 ---help---
37940 A.out (Assembler.OUTput) is a set of formats for libraries and
37941 executables used in the earliest versions of UNIX. Linux used
37942 diff -urNp linux-3.0.3/fs/libfs.c linux-3.0.3/fs/libfs.c
37943 --- linux-3.0.3/fs/libfs.c 2011-07-21 22:17:23.000000000 -0400
37944 +++ linux-3.0.3/fs/libfs.c 2011-08-23 21:47:56.000000000 -0400
37945 @@ -163,6 +163,9 @@ int dcache_readdir(struct file * filp, v
37946
37947 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
37948 struct dentry *next;
37949 + char d_name[sizeof(next->d_iname)];
37950 + const unsigned char *name;
37951 +
37952 next = list_entry(p, struct dentry, d_u.d_child);
37953 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
37954 if (!simple_positive(next)) {
37955 @@ -172,7 +175,12 @@ int dcache_readdir(struct file * filp, v
37956
37957 spin_unlock(&next->d_lock);
37958 spin_unlock(&dentry->d_lock);
37959 - if (filldir(dirent, next->d_name.name,
37960 + name = next->d_name.name;
37961 + if (name == next->d_iname) {
37962 + memcpy(d_name, name, next->d_name.len);
37963 + name = d_name;
37964 + }
37965 + if (filldir(dirent, name,
37966 next->d_name.len, filp->f_pos,
37967 next->d_inode->i_ino,
37968 dt_type(next->d_inode)) < 0)
37969 diff -urNp linux-3.0.3/fs/lockd/clntproc.c linux-3.0.3/fs/lockd/clntproc.c
37970 --- linux-3.0.3/fs/lockd/clntproc.c 2011-07-21 22:17:23.000000000 -0400
37971 +++ linux-3.0.3/fs/lockd/clntproc.c 2011-08-23 21:48:14.000000000 -0400
37972 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
37973 /*
37974 * Cookie counter for NLM requests
37975 */
37976 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
37977 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
37978
37979 void nlmclnt_next_cookie(struct nlm_cookie *c)
37980 {
37981 - u32 cookie = atomic_inc_return(&nlm_cookie);
37982 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
37983
37984 memcpy(c->data, &cookie, 4);
37985 c->len=4;
37986 @@ -620,6 +620,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
37987 struct nlm_rqst reqst, *req;
37988 int status;
37989
37990 + pax_track_stack();
37991 +
37992 req = &reqst;
37993 memset(req, 0, sizeof(*req));
37994 locks_init_lock(&req->a_args.lock.fl);
37995 diff -urNp linux-3.0.3/fs/locks.c linux-3.0.3/fs/locks.c
37996 --- linux-3.0.3/fs/locks.c 2011-07-21 22:17:23.000000000 -0400
37997 +++ linux-3.0.3/fs/locks.c 2011-08-23 21:47:56.000000000 -0400
37998 @@ -2043,16 +2043,16 @@ void locks_remove_flock(struct file *fil
37999 return;
38000
38001 if (filp->f_op && filp->f_op->flock) {
38002 - struct file_lock fl = {
38003 + struct file_lock flock = {
38004 .fl_pid = current->tgid,
38005 .fl_file = filp,
38006 .fl_flags = FL_FLOCK,
38007 .fl_type = F_UNLCK,
38008 .fl_end = OFFSET_MAX,
38009 };
38010 - filp->f_op->flock(filp, F_SETLKW, &fl);
38011 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
38012 - fl.fl_ops->fl_release_private(&fl);
38013 + filp->f_op->flock(filp, F_SETLKW, &flock);
38014 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
38015 + flock.fl_ops->fl_release_private(&flock);
38016 }
38017
38018 lock_flocks();
38019 diff -urNp linux-3.0.3/fs/logfs/super.c linux-3.0.3/fs/logfs/super.c
38020 --- linux-3.0.3/fs/logfs/super.c 2011-07-21 22:17:23.000000000 -0400
38021 +++ linux-3.0.3/fs/logfs/super.c 2011-08-23 21:48:14.000000000 -0400
38022 @@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
38023 struct logfs_disk_super _ds1, *ds1 = &_ds1;
38024 int err, valid0, valid1;
38025
38026 + pax_track_stack();
38027 +
38028 /* read first superblock */
38029 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
38030 if (err)
38031 diff -urNp linux-3.0.3/fs/namei.c linux-3.0.3/fs/namei.c
38032 --- linux-3.0.3/fs/namei.c 2011-07-21 22:17:23.000000000 -0400
38033 +++ linux-3.0.3/fs/namei.c 2011-08-23 21:48:14.000000000 -0400
38034 @@ -237,21 +237,31 @@ int generic_permission(struct inode *ino
38035 return ret;
38036
38037 /*
38038 - * Read/write DACs are always overridable.
38039 - * Executable DACs are overridable for all directories and
38040 - * for non-directories that have least one exec bit set.
38041 + * Searching includes executable on directories, else just read.
38042 */
38043 - if (!(mask & MAY_EXEC) || execute_ok(inode))
38044 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
38045 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
38046 + if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) {
38047 +#ifdef CONFIG_GRKERNSEC
38048 + if (flags & IPERM_FLAG_RCU)
38049 + return -ECHILD;
38050 +#endif
38051 + if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
38052 return 0;
38053 + }
38054
38055 /*
38056 - * Searching includes executable on directories, else just read.
38057 + * Read/write DACs are always overridable.
38058 + * Executable DACs are overridable for all directories and
38059 + * for non-directories that have least one exec bit set.
38060 */
38061 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
38062 - if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
38063 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
38064 + if (!(mask & MAY_EXEC) || execute_ok(inode)) {
38065 +#ifdef CONFIG_GRKERNSEC
38066 + if (flags & IPERM_FLAG_RCU)
38067 + return -ECHILD;
38068 +#endif
38069 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
38070 return 0;
38071 + }
38072
38073 return -EACCES;
38074 }
38075 @@ -547,6 +557,9 @@ static int complete_walk(struct nameidat
38076 br_read_unlock(vfsmount_lock);
38077 }
38078
38079 + if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
38080 + return -ENOENT;
38081 +
38082 if (likely(!(nd->flags & LOOKUP_JUMPED)))
38083 return 0;
38084
38085 @@ -593,9 +606,16 @@ static inline int exec_permission(struct
38086 if (ret == -ECHILD)
38087 return ret;
38088
38089 - if (ns_capable(ns, CAP_DAC_OVERRIDE) ||
38090 - ns_capable(ns, CAP_DAC_READ_SEARCH))
38091 + if (ns_capable_nolog(ns, CAP_DAC_OVERRIDE))
38092 goto ok;
38093 + else {
38094 +#ifdef CONFIG_GRKERNSEC
38095 + if (flags & IPERM_FLAG_RCU)
38096 + return -ECHILD;
38097 +#endif
38098 + if (ns_capable(ns, CAP_DAC_READ_SEARCH) || ns_capable(ns, CAP_DAC_OVERRIDE))
38099 + goto ok;
38100 + }
38101
38102 return ret;
38103 ok:
38104 @@ -703,11 +723,19 @@ follow_link(struct path *link, struct na
38105 return error;
38106 }
38107
38108 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
38109 + dentry->d_inode, dentry, nd->path.mnt)) {
38110 + error = -EACCES;
38111 + *p = ERR_PTR(error); /* no ->put_link(), please */
38112 + path_put(&nd->path);
38113 + return error;
38114 + }
38115 +
38116 nd->last_type = LAST_BIND;
38117 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
38118 error = PTR_ERR(*p);
38119 if (!IS_ERR(*p)) {
38120 - char *s = nd_get_link(nd);
38121 + const char *s = nd_get_link(nd);
38122 error = 0;
38123 if (s)
38124 error = __vfs_follow_link(nd, s);
38125 @@ -1625,6 +1653,9 @@ static int do_path_lookup(int dfd, const
38126 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
38127
38128 if (likely(!retval)) {
38129 + if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
38130 + return -ENOENT;
38131 +
38132 if (unlikely(!audit_dummy_context())) {
38133 if (nd->path.dentry && nd->inode)
38134 audit_inode(name, nd->path.dentry);
38135 @@ -1935,6 +1966,30 @@ int vfs_create(struct inode *dir, struct
38136 return error;
38137 }
38138
38139 +/*
38140 + * Note that while the flag value (low two bits) for sys_open means:
38141 + * 00 - read-only
38142 + * 01 - write-only
38143 + * 10 - read-write
38144 + * 11 - special
38145 + * it is changed into
38146 + * 00 - no permissions needed
38147 + * 01 - read-permission
38148 + * 10 - write-permission
38149 + * 11 - read-write
38150 + * for the internal routines (ie open_namei()/follow_link() etc)
38151 + * This is more logical, and also allows the 00 "no perm needed"
38152 + * to be used for symlinks (where the permissions are checked
38153 + * later).
38154 + *
38155 +*/
38156 +static inline int open_to_namei_flags(int flag)
38157 +{
38158 + if ((flag+1) & O_ACCMODE)
38159 + flag++;
38160 + return flag;
38161 +}
38162 +
38163 static int may_open(struct path *path, int acc_mode, int flag)
38164 {
38165 struct dentry *dentry = path->dentry;
38166 @@ -1987,7 +2042,27 @@ static int may_open(struct path *path, i
38167 /*
38168 * Ensure there are no outstanding leases on the file.
38169 */
38170 - return break_lease(inode, flag);
38171 + error = break_lease(inode, flag);
38172 +
38173 + if (error)
38174 + return error;
38175 +
38176 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
38177 + error = -EPERM;
38178 + goto exit;
38179 + }
38180 +
38181 + if (gr_handle_rawio(inode)) {
38182 + error = -EPERM;
38183 + goto exit;
38184 + }
38185 +
38186 + if (!gr_acl_handle_open(dentry, path->mnt, open_to_namei_flags(flag))) {
38187 + error = -EACCES;
38188 + goto exit;
38189 + }
38190 +exit:
38191 + return error;
38192 }
38193
38194 static int handle_truncate(struct file *filp)
38195 @@ -2013,30 +2088,6 @@ static int handle_truncate(struct file *
38196 }
38197
38198 /*
38199 - * Note that while the flag value (low two bits) for sys_open means:
38200 - * 00 - read-only
38201 - * 01 - write-only
38202 - * 10 - read-write
38203 - * 11 - special
38204 - * it is changed into
38205 - * 00 - no permissions needed
38206 - * 01 - read-permission
38207 - * 10 - write-permission
38208 - * 11 - read-write
38209 - * for the internal routines (ie open_namei()/follow_link() etc)
38210 - * This is more logical, and also allows the 00 "no perm needed"
38211 - * to be used for symlinks (where the permissions are checked
38212 - * later).
38213 - *
38214 -*/
38215 -static inline int open_to_namei_flags(int flag)
38216 -{
38217 - if ((flag+1) & O_ACCMODE)
38218 - flag++;
38219 - return flag;
38220 -}
38221 -
38222 -/*
38223 * Handle the last step of open()
38224 */
38225 static struct file *do_last(struct nameidata *nd, struct path *path,
38226 @@ -2045,6 +2096,7 @@ static struct file *do_last(struct namei
38227 struct dentry *dir = nd->path.dentry;
38228 struct dentry *dentry;
38229 int open_flag = op->open_flag;
38230 + int flag = open_to_namei_flags(open_flag);
38231 int will_truncate = open_flag & O_TRUNC;
38232 int want_write = 0;
38233 int acc_mode = op->acc_mode;
38234 @@ -2132,6 +2184,12 @@ static struct file *do_last(struct namei
38235 /* Negative dentry, just create the file */
38236 if (!dentry->d_inode) {
38237 int mode = op->mode;
38238 +
38239 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, flag, mode)) {
38240 + error = -EACCES;
38241 + goto exit_mutex_unlock;
38242 + }
38243 +
38244 if (!IS_POSIXACL(dir->d_inode))
38245 mode &= ~current_umask();
38246 /*
38247 @@ -2155,6 +2213,8 @@ static struct file *do_last(struct namei
38248 error = vfs_create(dir->d_inode, dentry, mode, nd);
38249 if (error)
38250 goto exit_mutex_unlock;
38251 + else
38252 + gr_handle_create(path->dentry, path->mnt);
38253 mutex_unlock(&dir->d_inode->i_mutex);
38254 dput(nd->path.dentry);
38255 nd->path.dentry = dentry;
38256 @@ -2164,6 +2224,14 @@ static struct file *do_last(struct namei
38257 /*
38258 * It already exists.
38259 */
38260 +
38261 + /* only check if O_CREAT is specified, all other checks need to go
38262 + into may_open */
38263 + if (gr_handle_fifo(path->dentry, path->mnt, dir, flag, acc_mode)) {
38264 + error = -EACCES;
38265 + goto exit_mutex_unlock;
38266 + }
38267 +
38268 mutex_unlock(&dir->d_inode->i_mutex);
38269 audit_inode(pathname, path->dentry);
38270
38271 @@ -2450,6 +2518,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
38272 error = may_mknod(mode);
38273 if (error)
38274 goto out_dput;
38275 +
38276 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
38277 + error = -EPERM;
38278 + goto out_dput;
38279 + }
38280 +
38281 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
38282 + error = -EACCES;
38283 + goto out_dput;
38284 + }
38285 +
38286 error = mnt_want_write(nd.path.mnt);
38287 if (error)
38288 goto out_dput;
38289 @@ -2470,6 +2549,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
38290 }
38291 out_drop_write:
38292 mnt_drop_write(nd.path.mnt);
38293 +
38294 + if (!error)
38295 + gr_handle_create(dentry, nd.path.mnt);
38296 out_dput:
38297 dput(dentry);
38298 out_unlock:
38299 @@ -2522,6 +2604,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
38300 if (IS_ERR(dentry))
38301 goto out_unlock;
38302
38303 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
38304 + error = -EACCES;
38305 + goto out_dput;
38306 + }
38307 +
38308 if (!IS_POSIXACL(nd.path.dentry->d_inode))
38309 mode &= ~current_umask();
38310 error = mnt_want_write(nd.path.mnt);
38311 @@ -2533,6 +2620,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
38312 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
38313 out_drop_write:
38314 mnt_drop_write(nd.path.mnt);
38315 +
38316 + if (!error)
38317 + gr_handle_create(dentry, nd.path.mnt);
38318 +
38319 out_dput:
38320 dput(dentry);
38321 out_unlock:
38322 @@ -2613,6 +2704,8 @@ static long do_rmdir(int dfd, const char
38323 char * name;
38324 struct dentry *dentry;
38325 struct nameidata nd;
38326 + ino_t saved_ino = 0;
38327 + dev_t saved_dev = 0;
38328
38329 error = user_path_parent(dfd, pathname, &nd, &name);
38330 if (error)
38331 @@ -2641,6 +2734,17 @@ static long do_rmdir(int dfd, const char
38332 error = -ENOENT;
38333 goto exit3;
38334 }
38335 +
38336 + if (dentry->d_inode->i_nlink <= 1) {
38337 + saved_ino = dentry->d_inode->i_ino;
38338 + saved_dev = gr_get_dev_from_dentry(dentry);
38339 + }
38340 +
38341 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
38342 + error = -EACCES;
38343 + goto exit3;
38344 + }
38345 +
38346 error = mnt_want_write(nd.path.mnt);
38347 if (error)
38348 goto exit3;
38349 @@ -2648,6 +2752,8 @@ static long do_rmdir(int dfd, const char
38350 if (error)
38351 goto exit4;
38352 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
38353 + if (!error && (saved_dev || saved_ino))
38354 + gr_handle_delete(saved_ino, saved_dev);
38355 exit4:
38356 mnt_drop_write(nd.path.mnt);
38357 exit3:
38358 @@ -2710,6 +2816,8 @@ static long do_unlinkat(int dfd, const c
38359 struct dentry *dentry;
38360 struct nameidata nd;
38361 struct inode *inode = NULL;
38362 + ino_t saved_ino = 0;
38363 + dev_t saved_dev = 0;
38364
38365 error = user_path_parent(dfd, pathname, &nd, &name);
38366 if (error)
38367 @@ -2732,6 +2840,16 @@ static long do_unlinkat(int dfd, const c
38368 if (!inode)
38369 goto slashes;
38370 ihold(inode);
38371 +
38372 + if (inode->i_nlink <= 1) {
38373 + saved_ino = inode->i_ino;
38374 + saved_dev = gr_get_dev_from_dentry(dentry);
38375 + }
38376 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
38377 + error = -EACCES;
38378 + goto exit2;
38379 + }
38380 +
38381 error = mnt_want_write(nd.path.mnt);
38382 if (error)
38383 goto exit2;
38384 @@ -2739,6 +2857,8 @@ static long do_unlinkat(int dfd, const c
38385 if (error)
38386 goto exit3;
38387 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
38388 + if (!error && (saved_ino || saved_dev))
38389 + gr_handle_delete(saved_ino, saved_dev);
38390 exit3:
38391 mnt_drop_write(nd.path.mnt);
38392 exit2:
38393 @@ -2816,6 +2936,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
38394 if (IS_ERR(dentry))
38395 goto out_unlock;
38396
38397 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
38398 + error = -EACCES;
38399 + goto out_dput;
38400 + }
38401 +
38402 error = mnt_want_write(nd.path.mnt);
38403 if (error)
38404 goto out_dput;
38405 @@ -2823,6 +2948,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
38406 if (error)
38407 goto out_drop_write;
38408 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
38409 + if (!error)
38410 + gr_handle_create(dentry, nd.path.mnt);
38411 out_drop_write:
38412 mnt_drop_write(nd.path.mnt);
38413 out_dput:
38414 @@ -2931,6 +3058,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
38415 error = PTR_ERR(new_dentry);
38416 if (IS_ERR(new_dentry))
38417 goto out_unlock;
38418 +
38419 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
38420 + old_path.dentry->d_inode,
38421 + old_path.dentry->d_inode->i_mode, to)) {
38422 + error = -EACCES;
38423 + goto out_dput;
38424 + }
38425 +
38426 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
38427 + old_path.dentry, old_path.mnt, to)) {
38428 + error = -EACCES;
38429 + goto out_dput;
38430 + }
38431 +
38432 error = mnt_want_write(nd.path.mnt);
38433 if (error)
38434 goto out_dput;
38435 @@ -2938,6 +3079,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
38436 if (error)
38437 goto out_drop_write;
38438 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
38439 + if (!error)
38440 + gr_handle_create(new_dentry, nd.path.mnt);
38441 out_drop_write:
38442 mnt_drop_write(nd.path.mnt);
38443 out_dput:
38444 @@ -3113,6 +3256,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38445 char *to;
38446 int error;
38447
38448 + pax_track_stack();
38449 +
38450 error = user_path_parent(olddfd, oldname, &oldnd, &from);
38451 if (error)
38452 goto exit;
38453 @@ -3169,6 +3314,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38454 if (new_dentry == trap)
38455 goto exit5;
38456
38457 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
38458 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
38459 + to);
38460 + if (error)
38461 + goto exit5;
38462 +
38463 error = mnt_want_write(oldnd.path.mnt);
38464 if (error)
38465 goto exit5;
38466 @@ -3178,6 +3329,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38467 goto exit6;
38468 error = vfs_rename(old_dir->d_inode, old_dentry,
38469 new_dir->d_inode, new_dentry);
38470 + if (!error)
38471 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
38472 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
38473 exit6:
38474 mnt_drop_write(oldnd.path.mnt);
38475 exit5:
38476 @@ -3203,6 +3357,8 @@ SYSCALL_DEFINE2(rename, const char __use
38477
38478 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
38479 {
38480 + char tmpbuf[64];
38481 + const char *newlink;
38482 int len;
38483
38484 len = PTR_ERR(link);
38485 @@ -3212,7 +3368,14 @@ int vfs_readlink(struct dentry *dentry,
38486 len = strlen(link);
38487 if (len > (unsigned) buflen)
38488 len = buflen;
38489 - if (copy_to_user(buffer, link, len))
38490 +
38491 + if (len < sizeof(tmpbuf)) {
38492 + memcpy(tmpbuf, link, len);
38493 + newlink = tmpbuf;
38494 + } else
38495 + newlink = link;
38496 +
38497 + if (copy_to_user(buffer, newlink, len))
38498 len = -EFAULT;
38499 out:
38500 return len;
38501 diff -urNp linux-3.0.3/fs/namespace.c linux-3.0.3/fs/namespace.c
38502 --- linux-3.0.3/fs/namespace.c 2011-07-21 22:17:23.000000000 -0400
38503 +++ linux-3.0.3/fs/namespace.c 2011-08-23 21:48:14.000000000 -0400
38504 @@ -1328,6 +1328,9 @@ static int do_umount(struct vfsmount *mn
38505 if (!(sb->s_flags & MS_RDONLY))
38506 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
38507 up_write(&sb->s_umount);
38508 +
38509 + gr_log_remount(mnt->mnt_devname, retval);
38510 +
38511 return retval;
38512 }
38513
38514 @@ -1347,6 +1350,9 @@ static int do_umount(struct vfsmount *mn
38515 br_write_unlock(vfsmount_lock);
38516 up_write(&namespace_sem);
38517 release_mounts(&umount_list);
38518 +
38519 + gr_log_unmount(mnt->mnt_devname, retval);
38520 +
38521 return retval;
38522 }
38523
38524 @@ -2338,6 +2344,16 @@ long do_mount(char *dev_name, char *dir_
38525 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
38526 MS_STRICTATIME);
38527
38528 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
38529 + retval = -EPERM;
38530 + goto dput_out;
38531 + }
38532 +
38533 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
38534 + retval = -EPERM;
38535 + goto dput_out;
38536 + }
38537 +
38538 if (flags & MS_REMOUNT)
38539 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
38540 data_page);
38541 @@ -2352,6 +2368,9 @@ long do_mount(char *dev_name, char *dir_
38542 dev_name, data_page);
38543 dput_out:
38544 path_put(&path);
38545 +
38546 + gr_log_mount(dev_name, dir_name, retval);
38547 +
38548 return retval;
38549 }
38550
38551 @@ -2575,6 +2594,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
38552 if (error)
38553 goto out2;
38554
38555 + if (gr_handle_chroot_pivot()) {
38556 + error = -EPERM;
38557 + goto out2;
38558 + }
38559 +
38560 get_fs_root(current->fs, &root);
38561 error = lock_mount(&old);
38562 if (error)
38563 diff -urNp linux-3.0.3/fs/ncpfs/dir.c linux-3.0.3/fs/ncpfs/dir.c
38564 --- linux-3.0.3/fs/ncpfs/dir.c 2011-07-21 22:17:23.000000000 -0400
38565 +++ linux-3.0.3/fs/ncpfs/dir.c 2011-08-23 21:48:14.000000000 -0400
38566 @@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
38567 int res, val = 0, len;
38568 __u8 __name[NCP_MAXPATHLEN + 1];
38569
38570 + pax_track_stack();
38571 +
38572 if (dentry == dentry->d_sb->s_root)
38573 return 1;
38574
38575 @@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
38576 int error, res, len;
38577 __u8 __name[NCP_MAXPATHLEN + 1];
38578
38579 + pax_track_stack();
38580 +
38581 error = -EIO;
38582 if (!ncp_conn_valid(server))
38583 goto finished;
38584 @@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
38585 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
38586 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
38587
38588 + pax_track_stack();
38589 +
38590 ncp_age_dentry(server, dentry);
38591 len = sizeof(__name);
38592 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
38593 @@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
38594 int error, len;
38595 __u8 __name[NCP_MAXPATHLEN + 1];
38596
38597 + pax_track_stack();
38598 +
38599 DPRINTK("ncp_mkdir: making %s/%s\n",
38600 dentry->d_parent->d_name.name, dentry->d_name.name);
38601
38602 @@ -1140,6 +1148,8 @@ static int ncp_rename(struct inode *old_
38603 int old_len, new_len;
38604 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
38605
38606 + pax_track_stack();
38607 +
38608 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
38609 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
38610 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
38611 diff -urNp linux-3.0.3/fs/ncpfs/inode.c linux-3.0.3/fs/ncpfs/inode.c
38612 --- linux-3.0.3/fs/ncpfs/inode.c 2011-07-21 22:17:23.000000000 -0400
38613 +++ linux-3.0.3/fs/ncpfs/inode.c 2011-08-23 21:48:14.000000000 -0400
38614 @@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
38615 #endif
38616 struct ncp_entry_info finfo;
38617
38618 + pax_track_stack();
38619 +
38620 memset(&data, 0, sizeof(data));
38621 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
38622 if (!server)
38623 diff -urNp linux-3.0.3/fs/nfs/inode.c linux-3.0.3/fs/nfs/inode.c
38624 --- linux-3.0.3/fs/nfs/inode.c 2011-07-21 22:17:23.000000000 -0400
38625 +++ linux-3.0.3/fs/nfs/inode.c 2011-08-23 21:47:56.000000000 -0400
38626 @@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
38627 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
38628 nfsi->attrtimeo_timestamp = jiffies;
38629
38630 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
38631 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
38632 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
38633 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
38634 else
38635 @@ -1000,16 +1000,16 @@ static int nfs_size_need_update(const st
38636 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
38637 }
38638
38639 -static atomic_long_t nfs_attr_generation_counter;
38640 +static atomic_long_unchecked_t nfs_attr_generation_counter;
38641
38642 static unsigned long nfs_read_attr_generation_counter(void)
38643 {
38644 - return atomic_long_read(&nfs_attr_generation_counter);
38645 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
38646 }
38647
38648 unsigned long nfs_inc_attr_generation_counter(void)
38649 {
38650 - return atomic_long_inc_return(&nfs_attr_generation_counter);
38651 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
38652 }
38653
38654 void nfs_fattr_init(struct nfs_fattr *fattr)
38655 diff -urNp linux-3.0.3/fs/nfsd/nfs4state.c linux-3.0.3/fs/nfsd/nfs4state.c
38656 --- linux-3.0.3/fs/nfsd/nfs4state.c 2011-08-23 21:44:40.000000000 -0400
38657 +++ linux-3.0.3/fs/nfsd/nfs4state.c 2011-08-23 21:48:14.000000000 -0400
38658 @@ -3794,6 +3794,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
38659 unsigned int strhashval;
38660 int err;
38661
38662 + pax_track_stack();
38663 +
38664 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
38665 (long long) lock->lk_offset,
38666 (long long) lock->lk_length);
38667 diff -urNp linux-3.0.3/fs/nfsd/nfs4xdr.c linux-3.0.3/fs/nfsd/nfs4xdr.c
38668 --- linux-3.0.3/fs/nfsd/nfs4xdr.c 2011-07-21 22:17:23.000000000 -0400
38669 +++ linux-3.0.3/fs/nfsd/nfs4xdr.c 2011-08-23 21:48:14.000000000 -0400
38670 @@ -1788,6 +1788,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
38671 .dentry = dentry,
38672 };
38673
38674 + pax_track_stack();
38675 +
38676 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
38677 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
38678 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
38679 diff -urNp linux-3.0.3/fs/nfsd/vfs.c linux-3.0.3/fs/nfsd/vfs.c
38680 --- linux-3.0.3/fs/nfsd/vfs.c 2011-07-21 22:17:23.000000000 -0400
38681 +++ linux-3.0.3/fs/nfsd/vfs.c 2011-08-23 21:47:56.000000000 -0400
38682 @@ -896,7 +896,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
38683 } else {
38684 oldfs = get_fs();
38685 set_fs(KERNEL_DS);
38686 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
38687 + host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
38688 set_fs(oldfs);
38689 }
38690
38691 @@ -1000,7 +1000,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
38692
38693 /* Write the data. */
38694 oldfs = get_fs(); set_fs(KERNEL_DS);
38695 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
38696 + host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
38697 set_fs(oldfs);
38698 if (host_err < 0)
38699 goto out_nfserr;
38700 @@ -1535,7 +1535,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
38701 */
38702
38703 oldfs = get_fs(); set_fs(KERNEL_DS);
38704 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
38705 + host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
38706 set_fs(oldfs);
38707
38708 if (host_err < 0)
38709 diff -urNp linux-3.0.3/fs/notify/fanotify/fanotify_user.c linux-3.0.3/fs/notify/fanotify/fanotify_user.c
38710 --- linux-3.0.3/fs/notify/fanotify/fanotify_user.c 2011-07-21 22:17:23.000000000 -0400
38711 +++ linux-3.0.3/fs/notify/fanotify/fanotify_user.c 2011-08-23 21:48:14.000000000 -0400
38712 @@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct
38713 goto out_close_fd;
38714
38715 ret = -EFAULT;
38716 - if (copy_to_user(buf, &fanotify_event_metadata,
38717 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
38718 + copy_to_user(buf, &fanotify_event_metadata,
38719 fanotify_event_metadata.event_len))
38720 goto out_kill_access_response;
38721
38722 diff -urNp linux-3.0.3/fs/notify/notification.c linux-3.0.3/fs/notify/notification.c
38723 --- linux-3.0.3/fs/notify/notification.c 2011-07-21 22:17:23.000000000 -0400
38724 +++ linux-3.0.3/fs/notify/notification.c 2011-08-23 21:47:56.000000000 -0400
38725 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
38726 * get set to 0 so it will never get 'freed'
38727 */
38728 static struct fsnotify_event *q_overflow_event;
38729 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
38730 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
38731
38732 /**
38733 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
38734 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
38735 */
38736 u32 fsnotify_get_cookie(void)
38737 {
38738 - return atomic_inc_return(&fsnotify_sync_cookie);
38739 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
38740 }
38741 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
38742
38743 diff -urNp linux-3.0.3/fs/ntfs/dir.c linux-3.0.3/fs/ntfs/dir.c
38744 --- linux-3.0.3/fs/ntfs/dir.c 2011-07-21 22:17:23.000000000 -0400
38745 +++ linux-3.0.3/fs/ntfs/dir.c 2011-08-23 21:47:56.000000000 -0400
38746 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
38747 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
38748 ~(s64)(ndir->itype.index.block_size - 1)));
38749 /* Bounds checks. */
38750 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
38751 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
38752 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
38753 "inode 0x%lx or driver bug.", vdir->i_ino);
38754 goto err_out;
38755 diff -urNp linux-3.0.3/fs/ntfs/file.c linux-3.0.3/fs/ntfs/file.c
38756 --- linux-3.0.3/fs/ntfs/file.c 2011-07-21 22:17:23.000000000 -0400
38757 +++ linux-3.0.3/fs/ntfs/file.c 2011-08-23 21:47:56.000000000 -0400
38758 @@ -2222,6 +2222,6 @@ const struct inode_operations ntfs_file_
38759 #endif /* NTFS_RW */
38760 };
38761
38762 -const struct file_operations ntfs_empty_file_ops = {};
38763 +const struct file_operations ntfs_empty_file_ops __read_only;
38764
38765 -const struct inode_operations ntfs_empty_inode_ops = {};
38766 +const struct inode_operations ntfs_empty_inode_ops __read_only;
38767 diff -urNp linux-3.0.3/fs/ocfs2/localalloc.c linux-3.0.3/fs/ocfs2/localalloc.c
38768 --- linux-3.0.3/fs/ocfs2/localalloc.c 2011-07-21 22:17:23.000000000 -0400
38769 +++ linux-3.0.3/fs/ocfs2/localalloc.c 2011-08-23 21:47:56.000000000 -0400
38770 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
38771 goto bail;
38772 }
38773
38774 - atomic_inc(&osb->alloc_stats.moves);
38775 + atomic_inc_unchecked(&osb->alloc_stats.moves);
38776
38777 bail:
38778 if (handle)
38779 diff -urNp linux-3.0.3/fs/ocfs2/namei.c linux-3.0.3/fs/ocfs2/namei.c
38780 --- linux-3.0.3/fs/ocfs2/namei.c 2011-07-21 22:17:23.000000000 -0400
38781 +++ linux-3.0.3/fs/ocfs2/namei.c 2011-08-23 21:48:14.000000000 -0400
38782 @@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
38783 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
38784 struct ocfs2_dir_lookup_result target_insert = { NULL, };
38785
38786 + pax_track_stack();
38787 +
38788 /* At some point it might be nice to break this function up a
38789 * bit. */
38790
38791 diff -urNp linux-3.0.3/fs/ocfs2/ocfs2.h linux-3.0.3/fs/ocfs2/ocfs2.h
38792 --- linux-3.0.3/fs/ocfs2/ocfs2.h 2011-07-21 22:17:23.000000000 -0400
38793 +++ linux-3.0.3/fs/ocfs2/ocfs2.h 2011-08-23 21:47:56.000000000 -0400
38794 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
38795
38796 struct ocfs2_alloc_stats
38797 {
38798 - atomic_t moves;
38799 - atomic_t local_data;
38800 - atomic_t bitmap_data;
38801 - atomic_t bg_allocs;
38802 - atomic_t bg_extends;
38803 + atomic_unchecked_t moves;
38804 + atomic_unchecked_t local_data;
38805 + atomic_unchecked_t bitmap_data;
38806 + atomic_unchecked_t bg_allocs;
38807 + atomic_unchecked_t bg_extends;
38808 };
38809
38810 enum ocfs2_local_alloc_state
38811 diff -urNp linux-3.0.3/fs/ocfs2/suballoc.c linux-3.0.3/fs/ocfs2/suballoc.c
38812 --- linux-3.0.3/fs/ocfs2/suballoc.c 2011-07-21 22:17:23.000000000 -0400
38813 +++ linux-3.0.3/fs/ocfs2/suballoc.c 2011-08-23 21:47:56.000000000 -0400
38814 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
38815 mlog_errno(status);
38816 goto bail;
38817 }
38818 - atomic_inc(&osb->alloc_stats.bg_extends);
38819 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
38820
38821 /* You should never ask for this much metadata */
38822 BUG_ON(bits_wanted >
38823 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
38824 mlog_errno(status);
38825 goto bail;
38826 }
38827 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
38828 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
38829
38830 *suballoc_loc = res.sr_bg_blkno;
38831 *suballoc_bit_start = res.sr_bit_offset;
38832 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
38833 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
38834 res->sr_bits);
38835
38836 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
38837 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
38838
38839 BUG_ON(res->sr_bits != 1);
38840
38841 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
38842 mlog_errno(status);
38843 goto bail;
38844 }
38845 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
38846 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
38847
38848 BUG_ON(res.sr_bits != 1);
38849
38850 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
38851 cluster_start,
38852 num_clusters);
38853 if (!status)
38854 - atomic_inc(&osb->alloc_stats.local_data);
38855 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
38856 } else {
38857 if (min_clusters > (osb->bitmap_cpg - 1)) {
38858 /* The only paths asking for contiguousness
38859 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
38860 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
38861 res.sr_bg_blkno,
38862 res.sr_bit_offset);
38863 - atomic_inc(&osb->alloc_stats.bitmap_data);
38864 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
38865 *num_clusters = res.sr_bits;
38866 }
38867 }
38868 diff -urNp linux-3.0.3/fs/ocfs2/super.c linux-3.0.3/fs/ocfs2/super.c
38869 --- linux-3.0.3/fs/ocfs2/super.c 2011-07-21 22:17:23.000000000 -0400
38870 +++ linux-3.0.3/fs/ocfs2/super.c 2011-08-23 21:47:56.000000000 -0400
38871 @@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
38872 "%10s => GlobalAllocs: %d LocalAllocs: %d "
38873 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
38874 "Stats",
38875 - atomic_read(&osb->alloc_stats.bitmap_data),
38876 - atomic_read(&osb->alloc_stats.local_data),
38877 - atomic_read(&osb->alloc_stats.bg_allocs),
38878 - atomic_read(&osb->alloc_stats.moves),
38879 - atomic_read(&osb->alloc_stats.bg_extends));
38880 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
38881 + atomic_read_unchecked(&osb->alloc_stats.local_data),
38882 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
38883 + atomic_read_unchecked(&osb->alloc_stats.moves),
38884 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
38885
38886 out += snprintf(buf + out, len - out,
38887 "%10s => State: %u Descriptor: %llu Size: %u bits "
38888 @@ -2112,11 +2112,11 @@ static int ocfs2_initialize_super(struct
38889 spin_lock_init(&osb->osb_xattr_lock);
38890 ocfs2_init_steal_slots(osb);
38891
38892 - atomic_set(&osb->alloc_stats.moves, 0);
38893 - atomic_set(&osb->alloc_stats.local_data, 0);
38894 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
38895 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
38896 - atomic_set(&osb->alloc_stats.bg_extends, 0);
38897 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
38898 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
38899 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
38900 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
38901 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
38902
38903 /* Copy the blockcheck stats from the superblock probe */
38904 osb->osb_ecc_stats = *stats;
38905 diff -urNp linux-3.0.3/fs/ocfs2/symlink.c linux-3.0.3/fs/ocfs2/symlink.c
38906 --- linux-3.0.3/fs/ocfs2/symlink.c 2011-07-21 22:17:23.000000000 -0400
38907 +++ linux-3.0.3/fs/ocfs2/symlink.c 2011-08-23 21:47:56.000000000 -0400
38908 @@ -142,7 +142,7 @@ bail:
38909
38910 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
38911 {
38912 - char *link = nd_get_link(nd);
38913 + const char *link = nd_get_link(nd);
38914 if (!IS_ERR(link))
38915 kfree(link);
38916 }
38917 diff -urNp linux-3.0.3/fs/open.c linux-3.0.3/fs/open.c
38918 --- linux-3.0.3/fs/open.c 2011-07-21 22:17:23.000000000 -0400
38919 +++ linux-3.0.3/fs/open.c 2011-08-23 21:48:14.000000000 -0400
38920 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
38921 error = locks_verify_truncate(inode, NULL, length);
38922 if (!error)
38923 error = security_path_truncate(&path);
38924 +
38925 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
38926 + error = -EACCES;
38927 +
38928 if (!error)
38929 error = do_truncate(path.dentry, length, 0, NULL);
38930
38931 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
38932 if (__mnt_is_readonly(path.mnt))
38933 res = -EROFS;
38934
38935 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
38936 + res = -EACCES;
38937 +
38938 out_path_release:
38939 path_put(&path);
38940 out:
38941 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
38942 if (error)
38943 goto dput_and_out;
38944
38945 + gr_log_chdir(path.dentry, path.mnt);
38946 +
38947 set_fs_pwd(current->fs, &path);
38948
38949 dput_and_out:
38950 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
38951 goto out_putf;
38952
38953 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
38954 +
38955 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
38956 + error = -EPERM;
38957 +
38958 + if (!error)
38959 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
38960 +
38961 if (!error)
38962 set_fs_pwd(current->fs, &file->f_path);
38963 out_putf:
38964 @@ -438,7 +454,18 @@ SYSCALL_DEFINE1(chroot, const char __use
38965 if (error)
38966 goto dput_and_out;
38967
38968 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
38969 + goto dput_and_out;
38970 +
38971 + if (gr_handle_chroot_caps(&path)) {
38972 + error = -ENOMEM;
38973 + goto dput_and_out;
38974 + }
38975 +
38976 set_fs_root(current->fs, &path);
38977 +
38978 + gr_handle_chroot_chdir(&path);
38979 +
38980 error = 0;
38981 dput_and_out:
38982 path_put(&path);
38983 @@ -466,12 +493,25 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
38984 err = mnt_want_write_file(file);
38985 if (err)
38986 goto out_putf;
38987 +
38988 mutex_lock(&inode->i_mutex);
38989 +
38990 + if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
38991 + err = -EACCES;
38992 + goto out_unlock;
38993 + }
38994 +
38995 err = security_path_chmod(dentry, file->f_vfsmnt, mode);
38996 if (err)
38997 goto out_unlock;
38998 if (mode == (mode_t) -1)
38999 mode = inode->i_mode;
39000 +
39001 + if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
39002 + err = -EACCES;
39003 + goto out_unlock;
39004 + }
39005 +
39006 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
39007 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
39008 err = notify_change(dentry, &newattrs);
39009 @@ -499,12 +539,25 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
39010 error = mnt_want_write(path.mnt);
39011 if (error)
39012 goto dput_and_out;
39013 +
39014 mutex_lock(&inode->i_mutex);
39015 +
39016 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
39017 + error = -EACCES;
39018 + goto out_unlock;
39019 + }
39020 +
39021 error = security_path_chmod(path.dentry, path.mnt, mode);
39022 if (error)
39023 goto out_unlock;
39024 if (mode == (mode_t) -1)
39025 mode = inode->i_mode;
39026 +
39027 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
39028 + error = -EACCES;
39029 + goto out_unlock;
39030 + }
39031 +
39032 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
39033 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
39034 error = notify_change(path.dentry, &newattrs);
39035 @@ -528,6 +581,9 @@ static int chown_common(struct path *pat
39036 int error;
39037 struct iattr newattrs;
39038
39039 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
39040 + return -EACCES;
39041 +
39042 newattrs.ia_valid = ATTR_CTIME;
39043 if (user != (uid_t) -1) {
39044 newattrs.ia_valid |= ATTR_UID;
39045 @@ -998,7 +1054,10 @@ long do_sys_open(int dfd, const char __u
39046 if (!IS_ERR(tmp)) {
39047 fd = get_unused_fd_flags(flags);
39048 if (fd >= 0) {
39049 - struct file *f = do_filp_open(dfd, tmp, &op, lookup);
39050 + struct file *f;
39051 + /* don't allow to be set by userland */
39052 + flags &= ~FMODE_GREXEC;
39053 + f = do_filp_open(dfd, tmp, &op, lookup);
39054 if (IS_ERR(f)) {
39055 put_unused_fd(fd);
39056 fd = PTR_ERR(f);
39057 diff -urNp linux-3.0.3/fs/partitions/ldm.c linux-3.0.3/fs/partitions/ldm.c
39058 --- linux-3.0.3/fs/partitions/ldm.c 2011-07-21 22:17:23.000000000 -0400
39059 +++ linux-3.0.3/fs/partitions/ldm.c 2011-08-23 21:48:14.000000000 -0400
39060 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
39061 ldm_error ("A VBLK claims to have %d parts.", num);
39062 return false;
39063 }
39064 +
39065 if (rec >= num) {
39066 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
39067 return false;
39068 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
39069 goto found;
39070 }
39071
39072 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
39073 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
39074 if (!f) {
39075 ldm_crit ("Out of memory.");
39076 return false;
39077 diff -urNp linux-3.0.3/fs/pipe.c linux-3.0.3/fs/pipe.c
39078 --- linux-3.0.3/fs/pipe.c 2011-07-21 22:17:23.000000000 -0400
39079 +++ linux-3.0.3/fs/pipe.c 2011-08-23 21:48:14.000000000 -0400
39080 @@ -420,9 +420,9 @@ redo:
39081 }
39082 if (bufs) /* More to do? */
39083 continue;
39084 - if (!pipe->writers)
39085 + if (!atomic_read(&pipe->writers))
39086 break;
39087 - if (!pipe->waiting_writers) {
39088 + if (!atomic_read(&pipe->waiting_writers)) {
39089 /* syscall merging: Usually we must not sleep
39090 * if O_NONBLOCK is set, or if we got some data.
39091 * But if a writer sleeps in kernel space, then
39092 @@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
39093 mutex_lock(&inode->i_mutex);
39094 pipe = inode->i_pipe;
39095
39096 - if (!pipe->readers) {
39097 + if (!atomic_read(&pipe->readers)) {
39098 send_sig(SIGPIPE, current, 0);
39099 ret = -EPIPE;
39100 goto out;
39101 @@ -530,7 +530,7 @@ redo1:
39102 for (;;) {
39103 int bufs;
39104
39105 - if (!pipe->readers) {
39106 + if (!atomic_read(&pipe->readers)) {
39107 send_sig(SIGPIPE, current, 0);
39108 if (!ret)
39109 ret = -EPIPE;
39110 @@ -616,9 +616,9 @@ redo2:
39111 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
39112 do_wakeup = 0;
39113 }
39114 - pipe->waiting_writers++;
39115 + atomic_inc(&pipe->waiting_writers);
39116 pipe_wait(pipe);
39117 - pipe->waiting_writers--;
39118 + atomic_dec(&pipe->waiting_writers);
39119 }
39120 out:
39121 mutex_unlock(&inode->i_mutex);
39122 @@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
39123 mask = 0;
39124 if (filp->f_mode & FMODE_READ) {
39125 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
39126 - if (!pipe->writers && filp->f_version != pipe->w_counter)
39127 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
39128 mask |= POLLHUP;
39129 }
39130
39131 @@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
39132 * Most Unices do not set POLLERR for FIFOs but on Linux they
39133 * behave exactly like pipes for poll().
39134 */
39135 - if (!pipe->readers)
39136 + if (!atomic_read(&pipe->readers))
39137 mask |= POLLERR;
39138 }
39139
39140 @@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
39141
39142 mutex_lock(&inode->i_mutex);
39143 pipe = inode->i_pipe;
39144 - pipe->readers -= decr;
39145 - pipe->writers -= decw;
39146 + atomic_sub(decr, &pipe->readers);
39147 + atomic_sub(decw, &pipe->writers);
39148
39149 - if (!pipe->readers && !pipe->writers) {
39150 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
39151 free_pipe_info(inode);
39152 } else {
39153 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
39154 @@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
39155
39156 if (inode->i_pipe) {
39157 ret = 0;
39158 - inode->i_pipe->readers++;
39159 + atomic_inc(&inode->i_pipe->readers);
39160 }
39161
39162 mutex_unlock(&inode->i_mutex);
39163 @@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
39164
39165 if (inode->i_pipe) {
39166 ret = 0;
39167 - inode->i_pipe->writers++;
39168 + atomic_inc(&inode->i_pipe->writers);
39169 }
39170
39171 mutex_unlock(&inode->i_mutex);
39172 @@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
39173 if (inode->i_pipe) {
39174 ret = 0;
39175 if (filp->f_mode & FMODE_READ)
39176 - inode->i_pipe->readers++;
39177 + atomic_inc(&inode->i_pipe->readers);
39178 if (filp->f_mode & FMODE_WRITE)
39179 - inode->i_pipe->writers++;
39180 + atomic_inc(&inode->i_pipe->writers);
39181 }
39182
39183 mutex_unlock(&inode->i_mutex);
39184 @@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
39185 inode->i_pipe = NULL;
39186 }
39187
39188 -static struct vfsmount *pipe_mnt __read_mostly;
39189 +struct vfsmount *pipe_mnt __read_mostly;
39190
39191 /*
39192 * pipefs_dname() is called from d_path().
39193 @@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
39194 goto fail_iput;
39195 inode->i_pipe = pipe;
39196
39197 - pipe->readers = pipe->writers = 1;
39198 + atomic_set(&pipe->readers, 1);
39199 + atomic_set(&pipe->writers, 1);
39200 inode->i_fop = &rdwr_pipefifo_fops;
39201
39202 /*
39203 diff -urNp linux-3.0.3/fs/proc/array.c linux-3.0.3/fs/proc/array.c
39204 --- linux-3.0.3/fs/proc/array.c 2011-07-21 22:17:23.000000000 -0400
39205 +++ linux-3.0.3/fs/proc/array.c 2011-08-23 21:48:14.000000000 -0400
39206 @@ -60,6 +60,7 @@
39207 #include <linux/tty.h>
39208 #include <linux/string.h>
39209 #include <linux/mman.h>
39210 +#include <linux/grsecurity.h>
39211 #include <linux/proc_fs.h>
39212 #include <linux/ioport.h>
39213 #include <linux/uaccess.h>
39214 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
39215 seq_putc(m, '\n');
39216 }
39217
39218 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39219 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
39220 +{
39221 + if (p->mm)
39222 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
39223 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
39224 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
39225 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
39226 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
39227 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
39228 + else
39229 + seq_printf(m, "PaX:\t-----\n");
39230 +}
39231 +#endif
39232 +
39233 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
39234 struct pid *pid, struct task_struct *task)
39235 {
39236 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
39237 task_cpus_allowed(m, task);
39238 cpuset_task_status_allowed(m, task);
39239 task_context_switch_counts(m, task);
39240 +
39241 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39242 + task_pax(m, task);
39243 +#endif
39244 +
39245 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
39246 + task_grsec_rbac(m, task);
39247 +#endif
39248 +
39249 return 0;
39250 }
39251
39252 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39253 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
39254 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
39255 + _mm->pax_flags & MF_PAX_SEGMEXEC))
39256 +#endif
39257 +
39258 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
39259 struct pid *pid, struct task_struct *task, int whole)
39260 {
39261 @@ -375,9 +406,11 @@ static int do_task_stat(struct seq_file
39262 cputime_t cutime, cstime, utime, stime;
39263 cputime_t cgtime, gtime;
39264 unsigned long rsslim = 0;
39265 - char tcomm[sizeof(task->comm)];
39266 + char tcomm[sizeof(task->comm)] = { 0 };
39267 unsigned long flags;
39268
39269 + pax_track_stack();
39270 +
39271 state = *get_task_state(task);
39272 vsize = eip = esp = 0;
39273 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
39274 @@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
39275 gtime = task->gtime;
39276 }
39277
39278 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39279 + if (PAX_RAND_FLAGS(mm)) {
39280 + eip = 0;
39281 + esp = 0;
39282 + wchan = 0;
39283 + }
39284 +#endif
39285 +#ifdef CONFIG_GRKERNSEC_HIDESYM
39286 + wchan = 0;
39287 + eip =0;
39288 + esp =0;
39289 +#endif
39290 +
39291 /* scale priority and nice values from timeslices to -20..20 */
39292 /* to make it look like a "normal" Unix priority/nice value */
39293 priority = task_prio(task);
39294 @@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
39295 vsize,
39296 mm ? get_mm_rss(mm) : 0,
39297 rsslim,
39298 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39299 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
39300 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
39301 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
39302 +#else
39303 mm ? (permitted ? mm->start_code : 1) : 0,
39304 mm ? (permitted ? mm->end_code : 1) : 0,
39305 (permitted && mm) ? mm->start_stack : 0,
39306 +#endif
39307 esp,
39308 eip,
39309 /* The signal information here is obsolete.
39310 @@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
39311
39312 return 0;
39313 }
39314 +
39315 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
39316 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
39317 +{
39318 + u32 curr_ip = 0;
39319 + unsigned long flags;
39320 +
39321 + if (lock_task_sighand(task, &flags)) {
39322 + curr_ip = task->signal->curr_ip;
39323 + unlock_task_sighand(task, &flags);
39324 + }
39325 +
39326 + return sprintf(buffer, "%pI4\n", &curr_ip);
39327 +}
39328 +#endif
39329 diff -urNp linux-3.0.3/fs/proc/base.c linux-3.0.3/fs/proc/base.c
39330 --- linux-3.0.3/fs/proc/base.c 2011-08-23 21:44:40.000000000 -0400
39331 +++ linux-3.0.3/fs/proc/base.c 2011-08-23 21:48:14.000000000 -0400
39332 @@ -107,6 +107,22 @@ struct pid_entry {
39333 union proc_op op;
39334 };
39335
39336 +struct getdents_callback {
39337 + struct linux_dirent __user * current_dir;
39338 + struct linux_dirent __user * previous;
39339 + struct file * file;
39340 + int count;
39341 + int error;
39342 +};
39343 +
39344 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
39345 + loff_t offset, u64 ino, unsigned int d_type)
39346 +{
39347 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
39348 + buf->error = -EINVAL;
39349 + return 0;
39350 +}
39351 +
39352 #define NOD(NAME, MODE, IOP, FOP, OP) { \
39353 .name = (NAME), \
39354 .len = sizeof(NAME) - 1, \
39355 @@ -209,6 +225,9 @@ static struct mm_struct *__check_mem_per
39356 if (task == current)
39357 return mm;
39358
39359 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
39360 + return ERR_PTR(-EPERM);
39361 +
39362 /*
39363 * If current is actively ptrace'ing, and would also be
39364 * permitted to freshly attach with ptrace now, permit it.
39365 @@ -282,6 +301,9 @@ static int proc_pid_cmdline(struct task_
39366 if (!mm->arg_end)
39367 goto out_mm; /* Shh! No looking before we're done */
39368
39369 + if (gr_acl_handle_procpidmem(task))
39370 + goto out_mm;
39371 +
39372 len = mm->arg_end - mm->arg_start;
39373
39374 if (len > PAGE_SIZE)
39375 @@ -309,12 +331,28 @@ out:
39376 return res;
39377 }
39378
39379 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39380 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
39381 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
39382 + _mm->pax_flags & MF_PAX_SEGMEXEC))
39383 +#endif
39384 +
39385 static int proc_pid_auxv(struct task_struct *task, char *buffer)
39386 {
39387 struct mm_struct *mm = mm_for_maps(task);
39388 int res = PTR_ERR(mm);
39389 if (mm && !IS_ERR(mm)) {
39390 unsigned int nwords = 0;
39391 +
39392 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39393 + /* allow if we're currently ptracing this task */
39394 + if (PAX_RAND_FLAGS(mm) &&
39395 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
39396 + mmput(mm);
39397 + return res;
39398 + }
39399 +#endif
39400 +
39401 do {
39402 nwords += 2;
39403 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
39404 @@ -328,7 +366,7 @@ static int proc_pid_auxv(struct task_str
39405 }
39406
39407
39408 -#ifdef CONFIG_KALLSYMS
39409 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39410 /*
39411 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
39412 * Returns the resolved symbol. If that fails, simply return the address.
39413 @@ -367,7 +405,7 @@ static void unlock_trace(struct task_str
39414 mutex_unlock(&task->signal->cred_guard_mutex);
39415 }
39416
39417 -#ifdef CONFIG_STACKTRACE
39418 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39419
39420 #define MAX_STACK_TRACE_DEPTH 64
39421
39422 @@ -558,7 +596,7 @@ static int proc_pid_limits(struct task_s
39423 return count;
39424 }
39425
39426 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
39427 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
39428 static int proc_pid_syscall(struct task_struct *task, char *buffer)
39429 {
39430 long nr;
39431 @@ -587,7 +625,7 @@ static int proc_pid_syscall(struct task_
39432 /************************************************************************/
39433
39434 /* permission checks */
39435 -static int proc_fd_access_allowed(struct inode *inode)
39436 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
39437 {
39438 struct task_struct *task;
39439 int allowed = 0;
39440 @@ -597,7 +635,10 @@ static int proc_fd_access_allowed(struct
39441 */
39442 task = get_proc_task(inode);
39443 if (task) {
39444 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
39445 + if (log)
39446 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
39447 + else
39448 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
39449 put_task_struct(task);
39450 }
39451 return allowed;
39452 @@ -978,6 +1019,9 @@ static ssize_t environ_read(struct file
39453 if (!task)
39454 goto out_no_task;
39455
39456 + if (gr_acl_handle_procpidmem(task))
39457 + goto out;
39458 +
39459 ret = -ENOMEM;
39460 page = (char *)__get_free_page(GFP_TEMPORARY);
39461 if (!page)
39462 @@ -1614,7 +1658,7 @@ static void *proc_pid_follow_link(struct
39463 path_put(&nd->path);
39464
39465 /* Are we allowed to snoop on the tasks file descriptors? */
39466 - if (!proc_fd_access_allowed(inode))
39467 + if (!proc_fd_access_allowed(inode,0))
39468 goto out;
39469
39470 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
39471 @@ -1653,8 +1697,18 @@ static int proc_pid_readlink(struct dent
39472 struct path path;
39473
39474 /* Are we allowed to snoop on the tasks file descriptors? */
39475 - if (!proc_fd_access_allowed(inode))
39476 - goto out;
39477 + /* logging this is needed for learning on chromium to work properly,
39478 + but we don't want to flood the logs from 'ps' which does a readlink
39479 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
39480 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
39481 + */
39482 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
39483 + if (!proc_fd_access_allowed(inode,0))
39484 + goto out;
39485 + } else {
39486 + if (!proc_fd_access_allowed(inode,1))
39487 + goto out;
39488 + }
39489
39490 error = PROC_I(inode)->op.proc_get_link(inode, &path);
39491 if (error)
39492 @@ -1719,7 +1773,11 @@ struct inode *proc_pid_make_inode(struct
39493 rcu_read_lock();
39494 cred = __task_cred(task);
39495 inode->i_uid = cred->euid;
39496 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39497 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39498 +#else
39499 inode->i_gid = cred->egid;
39500 +#endif
39501 rcu_read_unlock();
39502 }
39503 security_task_to_inode(task, inode);
39504 @@ -1737,6 +1795,9 @@ int pid_getattr(struct vfsmount *mnt, st
39505 struct inode *inode = dentry->d_inode;
39506 struct task_struct *task;
39507 const struct cred *cred;
39508 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39509 + const struct cred *tmpcred = current_cred();
39510 +#endif
39511
39512 generic_fillattr(inode, stat);
39513
39514 @@ -1744,13 +1805,41 @@ int pid_getattr(struct vfsmount *mnt, st
39515 stat->uid = 0;
39516 stat->gid = 0;
39517 task = pid_task(proc_pid(inode), PIDTYPE_PID);
39518 +
39519 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
39520 + rcu_read_unlock();
39521 + return -ENOENT;
39522 + }
39523 +
39524 if (task) {
39525 + cred = __task_cred(task);
39526 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39527 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
39528 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39529 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
39530 +#endif
39531 + ) {
39532 +#endif
39533 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
39534 +#ifdef CONFIG_GRKERNSEC_PROC_USER
39535 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
39536 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39537 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
39538 +#endif
39539 task_dumpable(task)) {
39540 - cred = __task_cred(task);
39541 stat->uid = cred->euid;
39542 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39543 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
39544 +#else
39545 stat->gid = cred->egid;
39546 +#endif
39547 }
39548 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39549 + } else {
39550 + rcu_read_unlock();
39551 + return -ENOENT;
39552 + }
39553 +#endif
39554 }
39555 rcu_read_unlock();
39556 return 0;
39557 @@ -1787,11 +1876,20 @@ int pid_revalidate(struct dentry *dentry
39558
39559 if (task) {
39560 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
39561 +#ifdef CONFIG_GRKERNSEC_PROC_USER
39562 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
39563 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39564 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
39565 +#endif
39566 task_dumpable(task)) {
39567 rcu_read_lock();
39568 cred = __task_cred(task);
39569 inode->i_uid = cred->euid;
39570 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39571 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39572 +#else
39573 inode->i_gid = cred->egid;
39574 +#endif
39575 rcu_read_unlock();
39576 } else {
39577 inode->i_uid = 0;
39578 @@ -1909,7 +2007,8 @@ static int proc_fd_info(struct inode *in
39579 int fd = proc_fd(inode);
39580
39581 if (task) {
39582 - files = get_files_struct(task);
39583 + if (!gr_acl_handle_procpidmem(task))
39584 + files = get_files_struct(task);
39585 put_task_struct(task);
39586 }
39587 if (files) {
39588 @@ -2169,11 +2268,21 @@ static const struct file_operations proc
39589 */
39590 static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
39591 {
39592 + struct task_struct *task;
39593 int rv = generic_permission(inode, mask, flags, NULL);
39594 - if (rv == 0)
39595 - return 0;
39596 +
39597 if (task_pid(current) == proc_pid(inode))
39598 rv = 0;
39599 +
39600 + task = get_proc_task(inode);
39601 + if (task == NULL)
39602 + return rv;
39603 +
39604 + if (gr_acl_handle_procpidmem(task))
39605 + rv = -EACCES;
39606 +
39607 + put_task_struct(task);
39608 +
39609 return rv;
39610 }
39611
39612 @@ -2283,6 +2392,9 @@ static struct dentry *proc_pident_lookup
39613 if (!task)
39614 goto out_no_task;
39615
39616 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
39617 + goto out;
39618 +
39619 /*
39620 * Yes, it does not scale. And it should not. Don't add
39621 * new entries into /proc/<tgid>/ without very good reasons.
39622 @@ -2327,6 +2439,9 @@ static int proc_pident_readdir(struct fi
39623 if (!task)
39624 goto out_no_task;
39625
39626 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
39627 + goto out;
39628 +
39629 ret = 0;
39630 i = filp->f_pos;
39631 switch (i) {
39632 @@ -2597,7 +2712,7 @@ static void *proc_self_follow_link(struc
39633 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
39634 void *cookie)
39635 {
39636 - char *s = nd_get_link(nd);
39637 + const char *s = nd_get_link(nd);
39638 if (!IS_ERR(s))
39639 __putname(s);
39640 }
39641 @@ -2795,7 +2910,7 @@ static const struct pid_entry tgid_base_
39642 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
39643 #endif
39644 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
39645 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
39646 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
39647 INF("syscall", S_IRUGO, proc_pid_syscall),
39648 #endif
39649 INF("cmdline", S_IRUGO, proc_pid_cmdline),
39650 @@ -2820,10 +2935,10 @@ static const struct pid_entry tgid_base_
39651 #ifdef CONFIG_SECURITY
39652 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
39653 #endif
39654 -#ifdef CONFIG_KALLSYMS
39655 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39656 INF("wchan", S_IRUGO, proc_pid_wchan),
39657 #endif
39658 -#ifdef CONFIG_STACKTRACE
39659 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39660 ONE("stack", S_IRUGO, proc_pid_stack),
39661 #endif
39662 #ifdef CONFIG_SCHEDSTATS
39663 @@ -2857,6 +2972,9 @@ static const struct pid_entry tgid_base_
39664 #ifdef CONFIG_HARDWALL
39665 INF("hardwall", S_IRUGO, proc_pid_hardwall),
39666 #endif
39667 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
39668 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
39669 +#endif
39670 };
39671
39672 static int proc_tgid_base_readdir(struct file * filp,
39673 @@ -2982,7 +3100,14 @@ static struct dentry *proc_pid_instantia
39674 if (!inode)
39675 goto out;
39676
39677 +#ifdef CONFIG_GRKERNSEC_PROC_USER
39678 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
39679 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39680 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39681 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
39682 +#else
39683 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
39684 +#endif
39685 inode->i_op = &proc_tgid_base_inode_operations;
39686 inode->i_fop = &proc_tgid_base_operations;
39687 inode->i_flags|=S_IMMUTABLE;
39688 @@ -3024,7 +3149,11 @@ struct dentry *proc_pid_lookup(struct in
39689 if (!task)
39690 goto out;
39691
39692 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
39693 + goto out_put_task;
39694 +
39695 result = proc_pid_instantiate(dir, dentry, task, NULL);
39696 +out_put_task:
39697 put_task_struct(task);
39698 out:
39699 return result;
39700 @@ -3089,6 +3218,11 @@ int proc_pid_readdir(struct file * filp,
39701 {
39702 unsigned int nr;
39703 struct task_struct *reaper;
39704 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39705 + const struct cred *tmpcred = current_cred();
39706 + const struct cred *itercred;
39707 +#endif
39708 + filldir_t __filldir = filldir;
39709 struct tgid_iter iter;
39710 struct pid_namespace *ns;
39711
39712 @@ -3112,8 +3246,27 @@ int proc_pid_readdir(struct file * filp,
39713 for (iter = next_tgid(ns, iter);
39714 iter.task;
39715 iter.tgid += 1, iter = next_tgid(ns, iter)) {
39716 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39717 + rcu_read_lock();
39718 + itercred = __task_cred(iter.task);
39719 +#endif
39720 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
39721 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39722 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
39723 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39724 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
39725 +#endif
39726 + )
39727 +#endif
39728 + )
39729 + __filldir = &gr_fake_filldir;
39730 + else
39731 + __filldir = filldir;
39732 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39733 + rcu_read_unlock();
39734 +#endif
39735 filp->f_pos = iter.tgid + TGID_OFFSET;
39736 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
39737 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
39738 put_task_struct(iter.task);
39739 goto out;
39740 }
39741 @@ -3141,7 +3294,7 @@ static const struct pid_entry tid_base_s
39742 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
39743 #endif
39744 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
39745 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
39746 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
39747 INF("syscall", S_IRUGO, proc_pid_syscall),
39748 #endif
39749 INF("cmdline", S_IRUGO, proc_pid_cmdline),
39750 @@ -3165,10 +3318,10 @@ static const struct pid_entry tid_base_s
39751 #ifdef CONFIG_SECURITY
39752 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
39753 #endif
39754 -#ifdef CONFIG_KALLSYMS
39755 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39756 INF("wchan", S_IRUGO, proc_pid_wchan),
39757 #endif
39758 -#ifdef CONFIG_STACKTRACE
39759 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39760 ONE("stack", S_IRUGO, proc_pid_stack),
39761 #endif
39762 #ifdef CONFIG_SCHEDSTATS
39763 diff -urNp linux-3.0.3/fs/proc/cmdline.c linux-3.0.3/fs/proc/cmdline.c
39764 --- linux-3.0.3/fs/proc/cmdline.c 2011-07-21 22:17:23.000000000 -0400
39765 +++ linux-3.0.3/fs/proc/cmdline.c 2011-08-23 21:48:14.000000000 -0400
39766 @@ -23,7 +23,11 @@ static const struct file_operations cmdl
39767
39768 static int __init proc_cmdline_init(void)
39769 {
39770 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
39771 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
39772 +#else
39773 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
39774 +#endif
39775 return 0;
39776 }
39777 module_init(proc_cmdline_init);
39778 diff -urNp linux-3.0.3/fs/proc/devices.c linux-3.0.3/fs/proc/devices.c
39779 --- linux-3.0.3/fs/proc/devices.c 2011-07-21 22:17:23.000000000 -0400
39780 +++ linux-3.0.3/fs/proc/devices.c 2011-08-23 21:48:14.000000000 -0400
39781 @@ -64,7 +64,11 @@ static const struct file_operations proc
39782
39783 static int __init proc_devices_init(void)
39784 {
39785 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
39786 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
39787 +#else
39788 proc_create("devices", 0, NULL, &proc_devinfo_operations);
39789 +#endif
39790 return 0;
39791 }
39792 module_init(proc_devices_init);
39793 diff -urNp linux-3.0.3/fs/proc/inode.c linux-3.0.3/fs/proc/inode.c
39794 --- linux-3.0.3/fs/proc/inode.c 2011-07-21 22:17:23.000000000 -0400
39795 +++ linux-3.0.3/fs/proc/inode.c 2011-08-23 21:48:14.000000000 -0400
39796 @@ -440,7 +440,11 @@ struct inode *proc_get_inode(struct supe
39797 if (de->mode) {
39798 inode->i_mode = de->mode;
39799 inode->i_uid = de->uid;
39800 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39801 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39802 +#else
39803 inode->i_gid = de->gid;
39804 +#endif
39805 }
39806 if (de->size)
39807 inode->i_size = de->size;
39808 diff -urNp linux-3.0.3/fs/proc/internal.h linux-3.0.3/fs/proc/internal.h
39809 --- linux-3.0.3/fs/proc/internal.h 2011-07-21 22:17:23.000000000 -0400
39810 +++ linux-3.0.3/fs/proc/internal.h 2011-08-23 21:48:14.000000000 -0400
39811 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
39812 struct pid *pid, struct task_struct *task);
39813 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
39814 struct pid *pid, struct task_struct *task);
39815 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
39816 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
39817 +#endif
39818 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
39819
39820 extern const struct file_operations proc_maps_operations;
39821 diff -urNp linux-3.0.3/fs/proc/Kconfig linux-3.0.3/fs/proc/Kconfig
39822 --- linux-3.0.3/fs/proc/Kconfig 2011-07-21 22:17:23.000000000 -0400
39823 +++ linux-3.0.3/fs/proc/Kconfig 2011-08-23 21:48:14.000000000 -0400
39824 @@ -30,12 +30,12 @@ config PROC_FS
39825
39826 config PROC_KCORE
39827 bool "/proc/kcore support" if !ARM
39828 - depends on PROC_FS && MMU
39829 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
39830
39831 config PROC_VMCORE
39832 bool "/proc/vmcore support"
39833 - depends on PROC_FS && CRASH_DUMP
39834 - default y
39835 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
39836 + default n
39837 help
39838 Exports the dump image of crashed kernel in ELF format.
39839
39840 @@ -59,8 +59,8 @@ config PROC_SYSCTL
39841 limited in memory.
39842
39843 config PROC_PAGE_MONITOR
39844 - default y
39845 - depends on PROC_FS && MMU
39846 + default n
39847 + depends on PROC_FS && MMU && !GRKERNSEC
39848 bool "Enable /proc page monitoring" if EXPERT
39849 help
39850 Various /proc files exist to monitor process memory utilization:
39851 diff -urNp linux-3.0.3/fs/proc/kcore.c linux-3.0.3/fs/proc/kcore.c
39852 --- linux-3.0.3/fs/proc/kcore.c 2011-07-21 22:17:23.000000000 -0400
39853 +++ linux-3.0.3/fs/proc/kcore.c 2011-08-23 21:48:14.000000000 -0400
39854 @@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
39855 off_t offset = 0;
39856 struct kcore_list *m;
39857
39858 + pax_track_stack();
39859 +
39860 /* setup ELF header */
39861 elf = (struct elfhdr *) bufp;
39862 bufp += sizeof(struct elfhdr);
39863 @@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
39864 * the addresses in the elf_phdr on our list.
39865 */
39866 start = kc_offset_to_vaddr(*fpos - elf_buflen);
39867 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
39868 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
39869 + if (tsz > buflen)
39870 tsz = buflen;
39871 -
39872 +
39873 while (buflen) {
39874 struct kcore_list *m;
39875
39876 @@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
39877 kfree(elf_buf);
39878 } else {
39879 if (kern_addr_valid(start)) {
39880 - unsigned long n;
39881 + char *elf_buf;
39882 + mm_segment_t oldfs;
39883
39884 - n = copy_to_user(buffer, (char *)start, tsz);
39885 - /*
39886 - * We cannot distingush between fault on source
39887 - * and fault on destination. When this happens
39888 - * we clear too and hope it will trigger the
39889 - * EFAULT again.
39890 - */
39891 - if (n) {
39892 - if (clear_user(buffer + tsz - n,
39893 - n))
39894 + elf_buf = kmalloc(tsz, GFP_KERNEL);
39895 + if (!elf_buf)
39896 + return -ENOMEM;
39897 + oldfs = get_fs();
39898 + set_fs(KERNEL_DS);
39899 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
39900 + set_fs(oldfs);
39901 + if (copy_to_user(buffer, elf_buf, tsz)) {
39902 + kfree(elf_buf);
39903 return -EFAULT;
39904 + }
39905 }
39906 + set_fs(oldfs);
39907 + kfree(elf_buf);
39908 } else {
39909 if (clear_user(buffer, tsz))
39910 return -EFAULT;
39911 @@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
39912
39913 static int open_kcore(struct inode *inode, struct file *filp)
39914 {
39915 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
39916 + return -EPERM;
39917 +#endif
39918 if (!capable(CAP_SYS_RAWIO))
39919 return -EPERM;
39920 if (kcore_need_update)
39921 diff -urNp linux-3.0.3/fs/proc/meminfo.c linux-3.0.3/fs/proc/meminfo.c
39922 --- linux-3.0.3/fs/proc/meminfo.c 2011-07-21 22:17:23.000000000 -0400
39923 +++ linux-3.0.3/fs/proc/meminfo.c 2011-08-23 21:48:14.000000000 -0400
39924 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
39925 unsigned long pages[NR_LRU_LISTS];
39926 int lru;
39927
39928 + pax_track_stack();
39929 +
39930 /*
39931 * display in kilobytes.
39932 */
39933 @@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
39934 vmi.used >> 10,
39935 vmi.largest_chunk >> 10
39936 #ifdef CONFIG_MEMORY_FAILURE
39937 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
39938 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
39939 #endif
39940 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
39941 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
39942 diff -urNp linux-3.0.3/fs/proc/nommu.c linux-3.0.3/fs/proc/nommu.c
39943 --- linux-3.0.3/fs/proc/nommu.c 2011-07-21 22:17:23.000000000 -0400
39944 +++ linux-3.0.3/fs/proc/nommu.c 2011-08-23 21:47:56.000000000 -0400
39945 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
39946 if (len < 1)
39947 len = 1;
39948 seq_printf(m, "%*c", len, ' ');
39949 - seq_path(m, &file->f_path, "");
39950 + seq_path(m, &file->f_path, "\n\\");
39951 }
39952
39953 seq_putc(m, '\n');
39954 diff -urNp linux-3.0.3/fs/proc/proc_net.c linux-3.0.3/fs/proc/proc_net.c
39955 --- linux-3.0.3/fs/proc/proc_net.c 2011-07-21 22:17:23.000000000 -0400
39956 +++ linux-3.0.3/fs/proc/proc_net.c 2011-08-23 21:48:14.000000000 -0400
39957 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
39958 struct task_struct *task;
39959 struct nsproxy *ns;
39960 struct net *net = NULL;
39961 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39962 + const struct cred *cred = current_cred();
39963 +#endif
39964 +
39965 +#ifdef CONFIG_GRKERNSEC_PROC_USER
39966 + if (cred->fsuid)
39967 + return net;
39968 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39969 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
39970 + return net;
39971 +#endif
39972
39973 rcu_read_lock();
39974 task = pid_task(proc_pid(dir), PIDTYPE_PID);
39975 diff -urNp linux-3.0.3/fs/proc/proc_sysctl.c linux-3.0.3/fs/proc/proc_sysctl.c
39976 --- linux-3.0.3/fs/proc/proc_sysctl.c 2011-07-21 22:17:23.000000000 -0400
39977 +++ linux-3.0.3/fs/proc/proc_sysctl.c 2011-08-23 21:48:14.000000000 -0400
39978 @@ -8,6 +8,8 @@
39979 #include <linux/namei.h>
39980 #include "internal.h"
39981
39982 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
39983 +
39984 static const struct dentry_operations proc_sys_dentry_operations;
39985 static const struct file_operations proc_sys_file_operations;
39986 static const struct inode_operations proc_sys_inode_operations;
39987 @@ -111,6 +113,9 @@ static struct dentry *proc_sys_lookup(st
39988 if (!p)
39989 goto out;
39990
39991 + if (gr_handle_sysctl(p, MAY_EXEC))
39992 + goto out;
39993 +
39994 err = ERR_PTR(-ENOMEM);
39995 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
39996 if (h)
39997 @@ -230,6 +235,9 @@ static int scan(struct ctl_table_header
39998 if (*pos < file->f_pos)
39999 continue;
40000
40001 + if (gr_handle_sysctl(table, 0))
40002 + continue;
40003 +
40004 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
40005 if (res)
40006 return res;
40007 @@ -355,6 +363,9 @@ static int proc_sys_getattr(struct vfsmo
40008 if (IS_ERR(head))
40009 return PTR_ERR(head);
40010
40011 + if (table && gr_handle_sysctl(table, MAY_EXEC))
40012 + return -ENOENT;
40013 +
40014 generic_fillattr(inode, stat);
40015 if (table)
40016 stat->mode = (stat->mode & S_IFMT) | table->mode;
40017 diff -urNp linux-3.0.3/fs/proc/root.c linux-3.0.3/fs/proc/root.c
40018 --- linux-3.0.3/fs/proc/root.c 2011-07-21 22:17:23.000000000 -0400
40019 +++ linux-3.0.3/fs/proc/root.c 2011-08-23 21:48:14.000000000 -0400
40020 @@ -123,7 +123,15 @@ void __init proc_root_init(void)
40021 #ifdef CONFIG_PROC_DEVICETREE
40022 proc_device_tree_init();
40023 #endif
40024 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40025 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40026 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
40027 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40028 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
40029 +#endif
40030 +#else
40031 proc_mkdir("bus", NULL);
40032 +#endif
40033 proc_sys_init();
40034 }
40035
40036 diff -urNp linux-3.0.3/fs/proc/task_mmu.c linux-3.0.3/fs/proc/task_mmu.c
40037 --- linux-3.0.3/fs/proc/task_mmu.c 2011-07-21 22:17:23.000000000 -0400
40038 +++ linux-3.0.3/fs/proc/task_mmu.c 2011-08-23 21:48:14.000000000 -0400
40039 @@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
40040 "VmExe:\t%8lu kB\n"
40041 "VmLib:\t%8lu kB\n"
40042 "VmPTE:\t%8lu kB\n"
40043 - "VmSwap:\t%8lu kB\n",
40044 - hiwater_vm << (PAGE_SHIFT-10),
40045 + "VmSwap:\t%8lu kB\n"
40046 +
40047 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40048 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
40049 +#endif
40050 +
40051 + ,hiwater_vm << (PAGE_SHIFT-10),
40052 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
40053 mm->locked_vm << (PAGE_SHIFT-10),
40054 hiwater_rss << (PAGE_SHIFT-10),
40055 @@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
40056 data << (PAGE_SHIFT-10),
40057 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
40058 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
40059 - swap << (PAGE_SHIFT-10));
40060 + swap << (PAGE_SHIFT-10)
40061 +
40062 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40063 + , mm->context.user_cs_base, mm->context.user_cs_limit
40064 +#endif
40065 +
40066 + );
40067 }
40068
40069 unsigned long task_vsize(struct mm_struct *mm)
40070 @@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
40071 return ret;
40072 }
40073
40074 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40075 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
40076 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
40077 + _mm->pax_flags & MF_PAX_SEGMEXEC))
40078 +#endif
40079 +
40080 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
40081 {
40082 struct mm_struct *mm = vma->vm_mm;
40083 @@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
40084 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
40085 }
40086
40087 - /* We don't show the stack guard page in /proc/maps */
40088 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40089 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
40090 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
40091 +#else
40092 start = vma->vm_start;
40093 - if (stack_guard_page_start(vma, start))
40094 - start += PAGE_SIZE;
40095 end = vma->vm_end;
40096 - if (stack_guard_page_end(vma, end))
40097 - end -= PAGE_SIZE;
40098 +#endif
40099
40100 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
40101 start,
40102 @@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
40103 flags & VM_WRITE ? 'w' : '-',
40104 flags & VM_EXEC ? 'x' : '-',
40105 flags & VM_MAYSHARE ? 's' : 'p',
40106 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40107 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
40108 +#else
40109 pgoff,
40110 +#endif
40111 MAJOR(dev), MINOR(dev), ino, &len);
40112
40113 /*
40114 @@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
40115 */
40116 if (file) {
40117 pad_len_spaces(m, len);
40118 - seq_path(m, &file->f_path, "\n");
40119 + seq_path(m, &file->f_path, "\n\\");
40120 } else {
40121 const char *name = arch_vma_name(vma);
40122 if (!name) {
40123 @@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
40124 if (vma->vm_start <= mm->brk &&
40125 vma->vm_end >= mm->start_brk) {
40126 name = "[heap]";
40127 - } else if (vma->vm_start <= mm->start_stack &&
40128 - vma->vm_end >= mm->start_stack) {
40129 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
40130 + (vma->vm_start <= mm->start_stack &&
40131 + vma->vm_end >= mm->start_stack)) {
40132 name = "[stack]";
40133 }
40134 } else {
40135 @@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
40136 };
40137
40138 memset(&mss, 0, sizeof mss);
40139 - mss.vma = vma;
40140 - /* mmap_sem is held in m_start */
40141 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
40142 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
40143 -
40144 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40145 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
40146 +#endif
40147 + mss.vma = vma;
40148 + /* mmap_sem is held in m_start */
40149 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
40150 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
40151 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40152 + }
40153 +#endif
40154 show_map_vma(m, vma);
40155
40156 seq_printf(m,
40157 @@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
40158 "KernelPageSize: %8lu kB\n"
40159 "MMUPageSize: %8lu kB\n"
40160 "Locked: %8lu kB\n",
40161 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40162 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
40163 +#else
40164 (vma->vm_end - vma->vm_start) >> 10,
40165 +#endif
40166 mss.resident >> 10,
40167 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
40168 mss.shared_clean >> 10,
40169 @@ -1001,7 +1032,7 @@ static int show_numa_map(struct seq_file
40170
40171 if (file) {
40172 seq_printf(m, " file=");
40173 - seq_path(m, &file->f_path, "\n\t= ");
40174 + seq_path(m, &file->f_path, "\n\t\\= ");
40175 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
40176 seq_printf(m, " heap");
40177 } else if (vma->vm_start <= mm->start_stack &&
40178 diff -urNp linux-3.0.3/fs/proc/task_nommu.c linux-3.0.3/fs/proc/task_nommu.c
40179 --- linux-3.0.3/fs/proc/task_nommu.c 2011-07-21 22:17:23.000000000 -0400
40180 +++ linux-3.0.3/fs/proc/task_nommu.c 2011-08-23 21:47:56.000000000 -0400
40181 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
40182 else
40183 bytes += kobjsize(mm);
40184
40185 - if (current->fs && current->fs->users > 1)
40186 + if (current->fs && atomic_read(&current->fs->users) > 1)
40187 sbytes += kobjsize(current->fs);
40188 else
40189 bytes += kobjsize(current->fs);
40190 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
40191
40192 if (file) {
40193 pad_len_spaces(m, len);
40194 - seq_path(m, &file->f_path, "");
40195 + seq_path(m, &file->f_path, "\n\\");
40196 } else if (mm) {
40197 if (vma->vm_start <= mm->start_stack &&
40198 vma->vm_end >= mm->start_stack) {
40199 diff -urNp linux-3.0.3/fs/quota/netlink.c linux-3.0.3/fs/quota/netlink.c
40200 --- linux-3.0.3/fs/quota/netlink.c 2011-07-21 22:17:23.000000000 -0400
40201 +++ linux-3.0.3/fs/quota/netlink.c 2011-08-23 21:47:56.000000000 -0400
40202 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
40203 void quota_send_warning(short type, unsigned int id, dev_t dev,
40204 const char warntype)
40205 {
40206 - static atomic_t seq;
40207 + static atomic_unchecked_t seq;
40208 struct sk_buff *skb;
40209 void *msg_head;
40210 int ret;
40211 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
40212 "VFS: Not enough memory to send quota warning.\n");
40213 return;
40214 }
40215 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
40216 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
40217 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
40218 if (!msg_head) {
40219 printk(KERN_ERR
40220 diff -urNp linux-3.0.3/fs/readdir.c linux-3.0.3/fs/readdir.c
40221 --- linux-3.0.3/fs/readdir.c 2011-07-21 22:17:23.000000000 -0400
40222 +++ linux-3.0.3/fs/readdir.c 2011-08-23 21:48:14.000000000 -0400
40223 @@ -17,6 +17,7 @@
40224 #include <linux/security.h>
40225 #include <linux/syscalls.h>
40226 #include <linux/unistd.h>
40227 +#include <linux/namei.h>
40228
40229 #include <asm/uaccess.h>
40230
40231 @@ -67,6 +68,7 @@ struct old_linux_dirent {
40232
40233 struct readdir_callback {
40234 struct old_linux_dirent __user * dirent;
40235 + struct file * file;
40236 int result;
40237 };
40238
40239 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
40240 buf->result = -EOVERFLOW;
40241 return -EOVERFLOW;
40242 }
40243 +
40244 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40245 + return 0;
40246 +
40247 buf->result++;
40248 dirent = buf->dirent;
40249 if (!access_ok(VERIFY_WRITE, dirent,
40250 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
40251
40252 buf.result = 0;
40253 buf.dirent = dirent;
40254 + buf.file = file;
40255
40256 error = vfs_readdir(file, fillonedir, &buf);
40257 if (buf.result)
40258 @@ -142,6 +149,7 @@ struct linux_dirent {
40259 struct getdents_callback {
40260 struct linux_dirent __user * current_dir;
40261 struct linux_dirent __user * previous;
40262 + struct file * file;
40263 int count;
40264 int error;
40265 };
40266 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
40267 buf->error = -EOVERFLOW;
40268 return -EOVERFLOW;
40269 }
40270 +
40271 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40272 + return 0;
40273 +
40274 dirent = buf->previous;
40275 if (dirent) {
40276 if (__put_user(offset, &dirent->d_off))
40277 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
40278 buf.previous = NULL;
40279 buf.count = count;
40280 buf.error = 0;
40281 + buf.file = file;
40282
40283 error = vfs_readdir(file, filldir, &buf);
40284 if (error >= 0)
40285 @@ -229,6 +242,7 @@ out:
40286 struct getdents_callback64 {
40287 struct linux_dirent64 __user * current_dir;
40288 struct linux_dirent64 __user * previous;
40289 + struct file *file;
40290 int count;
40291 int error;
40292 };
40293 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
40294 buf->error = -EINVAL; /* only used if we fail.. */
40295 if (reclen > buf->count)
40296 return -EINVAL;
40297 +
40298 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40299 + return 0;
40300 +
40301 dirent = buf->previous;
40302 if (dirent) {
40303 if (__put_user(offset, &dirent->d_off))
40304 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
40305
40306 buf.current_dir = dirent;
40307 buf.previous = NULL;
40308 + buf.file = file;
40309 buf.count = count;
40310 buf.error = 0;
40311
40312 diff -urNp linux-3.0.3/fs/reiserfs/dir.c linux-3.0.3/fs/reiserfs/dir.c
40313 --- linux-3.0.3/fs/reiserfs/dir.c 2011-07-21 22:17:23.000000000 -0400
40314 +++ linux-3.0.3/fs/reiserfs/dir.c 2011-08-23 21:48:14.000000000 -0400
40315 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
40316 struct reiserfs_dir_entry de;
40317 int ret = 0;
40318
40319 + pax_track_stack();
40320 +
40321 reiserfs_write_lock(inode->i_sb);
40322
40323 reiserfs_check_lock_depth(inode->i_sb, "readdir");
40324 diff -urNp linux-3.0.3/fs/reiserfs/do_balan.c linux-3.0.3/fs/reiserfs/do_balan.c
40325 --- linux-3.0.3/fs/reiserfs/do_balan.c 2011-07-21 22:17:23.000000000 -0400
40326 +++ linux-3.0.3/fs/reiserfs/do_balan.c 2011-08-23 21:47:56.000000000 -0400
40327 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
40328 return;
40329 }
40330
40331 - atomic_inc(&(fs_generation(tb->tb_sb)));
40332 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
40333 do_balance_starts(tb);
40334
40335 /* balance leaf returns 0 except if combining L R and S into
40336 diff -urNp linux-3.0.3/fs/reiserfs/journal.c linux-3.0.3/fs/reiserfs/journal.c
40337 --- linux-3.0.3/fs/reiserfs/journal.c 2011-07-21 22:17:23.000000000 -0400
40338 +++ linux-3.0.3/fs/reiserfs/journal.c 2011-08-23 21:48:14.000000000 -0400
40339 @@ -2299,6 +2299,8 @@ static struct buffer_head *reiserfs_brea
40340 struct buffer_head *bh;
40341 int i, j;
40342
40343 + pax_track_stack();
40344 +
40345 bh = __getblk(dev, block, bufsize);
40346 if (buffer_uptodate(bh))
40347 return (bh);
40348 diff -urNp linux-3.0.3/fs/reiserfs/namei.c linux-3.0.3/fs/reiserfs/namei.c
40349 --- linux-3.0.3/fs/reiserfs/namei.c 2011-07-21 22:17:23.000000000 -0400
40350 +++ linux-3.0.3/fs/reiserfs/namei.c 2011-08-23 21:48:14.000000000 -0400
40351 @@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
40352 unsigned long savelink = 1;
40353 struct timespec ctime;
40354
40355 + pax_track_stack();
40356 +
40357 /* three balancings: (1) old name removal, (2) new name insertion
40358 and (3) maybe "save" link insertion
40359 stat data updates: (1) old directory,
40360 diff -urNp linux-3.0.3/fs/reiserfs/procfs.c linux-3.0.3/fs/reiserfs/procfs.c
40361 --- linux-3.0.3/fs/reiserfs/procfs.c 2011-07-21 22:17:23.000000000 -0400
40362 +++ linux-3.0.3/fs/reiserfs/procfs.c 2011-08-23 21:48:14.000000000 -0400
40363 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
40364 "SMALL_TAILS " : "NO_TAILS ",
40365 replay_only(sb) ? "REPLAY_ONLY " : "",
40366 convert_reiserfs(sb) ? "CONV " : "",
40367 - atomic_read(&r->s_generation_counter),
40368 + atomic_read_unchecked(&r->s_generation_counter),
40369 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
40370 SF(s_do_balance), SF(s_unneeded_left_neighbor),
40371 SF(s_good_search_by_key_reada), SF(s_bmaps),
40372 @@ -299,6 +299,8 @@ static int show_journal(struct seq_file
40373 struct journal_params *jp = &rs->s_v1.s_journal;
40374 char b[BDEVNAME_SIZE];
40375
40376 + pax_track_stack();
40377 +
40378 seq_printf(m, /* on-disk fields */
40379 "jp_journal_1st_block: \t%i\n"
40380 "jp_journal_dev: \t%s[%x]\n"
40381 diff -urNp linux-3.0.3/fs/reiserfs/stree.c linux-3.0.3/fs/reiserfs/stree.c
40382 --- linux-3.0.3/fs/reiserfs/stree.c 2011-07-21 22:17:23.000000000 -0400
40383 +++ linux-3.0.3/fs/reiserfs/stree.c 2011-08-23 21:48:14.000000000 -0400
40384 @@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
40385 int iter = 0;
40386 #endif
40387
40388 + pax_track_stack();
40389 +
40390 BUG_ON(!th->t_trans_id);
40391
40392 init_tb_struct(th, &s_del_balance, sb, path,
40393 @@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
40394 int retval;
40395 int quota_cut_bytes = 0;
40396
40397 + pax_track_stack();
40398 +
40399 BUG_ON(!th->t_trans_id);
40400
40401 le_key2cpu_key(&cpu_key, key);
40402 @@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
40403 int quota_cut_bytes;
40404 loff_t tail_pos = 0;
40405
40406 + pax_track_stack();
40407 +
40408 BUG_ON(!th->t_trans_id);
40409
40410 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
40411 @@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
40412 int retval;
40413 int fs_gen;
40414
40415 + pax_track_stack();
40416 +
40417 BUG_ON(!th->t_trans_id);
40418
40419 fs_gen = get_generation(inode->i_sb);
40420 @@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
40421 int fs_gen = 0;
40422 int quota_bytes = 0;
40423
40424 + pax_track_stack();
40425 +
40426 BUG_ON(!th->t_trans_id);
40427
40428 if (inode) { /* Do we count quotas for item? */
40429 diff -urNp linux-3.0.3/fs/reiserfs/super.c linux-3.0.3/fs/reiserfs/super.c
40430 --- linux-3.0.3/fs/reiserfs/super.c 2011-07-21 22:17:23.000000000 -0400
40431 +++ linux-3.0.3/fs/reiserfs/super.c 2011-08-23 21:48:14.000000000 -0400
40432 @@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
40433 {.option_name = NULL}
40434 };
40435
40436 + pax_track_stack();
40437 +
40438 *blocks = 0;
40439 if (!options || !*options)
40440 /* use default configuration: create tails, journaling on, no
40441 diff -urNp linux-3.0.3/fs/select.c linux-3.0.3/fs/select.c
40442 --- linux-3.0.3/fs/select.c 2011-07-21 22:17:23.000000000 -0400
40443 +++ linux-3.0.3/fs/select.c 2011-08-23 21:48:14.000000000 -0400
40444 @@ -20,6 +20,7 @@
40445 #include <linux/module.h>
40446 #include <linux/slab.h>
40447 #include <linux/poll.h>
40448 +#include <linux/security.h>
40449 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
40450 #include <linux/file.h>
40451 #include <linux/fdtable.h>
40452 @@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
40453 int retval, i, timed_out = 0;
40454 unsigned long slack = 0;
40455
40456 + pax_track_stack();
40457 +
40458 rcu_read_lock();
40459 retval = max_select_fd(n, fds);
40460 rcu_read_unlock();
40461 @@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
40462 /* Allocate small arguments on the stack to save memory and be faster */
40463 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
40464
40465 + pax_track_stack();
40466 +
40467 ret = -EINVAL;
40468 if (n < 0)
40469 goto out_nofds;
40470 @@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
40471 struct poll_list *walk = head;
40472 unsigned long todo = nfds;
40473
40474 + pax_track_stack();
40475 +
40476 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
40477 if (nfds > rlimit(RLIMIT_NOFILE))
40478 return -EINVAL;
40479
40480 diff -urNp linux-3.0.3/fs/seq_file.c linux-3.0.3/fs/seq_file.c
40481 --- linux-3.0.3/fs/seq_file.c 2011-07-21 22:17:23.000000000 -0400
40482 +++ linux-3.0.3/fs/seq_file.c 2011-08-23 21:47:56.000000000 -0400
40483 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
40484 return 0;
40485 }
40486 if (!m->buf) {
40487 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
40488 + m->size = PAGE_SIZE;
40489 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
40490 if (!m->buf)
40491 return -ENOMEM;
40492 }
40493 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
40494 Eoverflow:
40495 m->op->stop(m, p);
40496 kfree(m->buf);
40497 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
40498 + m->size <<= 1;
40499 + m->buf = kmalloc(m->size, GFP_KERNEL);
40500 return !m->buf ? -ENOMEM : -EAGAIN;
40501 }
40502
40503 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
40504 m->version = file->f_version;
40505 /* grab buffer if we didn't have one */
40506 if (!m->buf) {
40507 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
40508 + m->size = PAGE_SIZE;
40509 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
40510 if (!m->buf)
40511 goto Enomem;
40512 }
40513 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
40514 goto Fill;
40515 m->op->stop(m, p);
40516 kfree(m->buf);
40517 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
40518 + m->size <<= 1;
40519 + m->buf = kmalloc(m->size, GFP_KERNEL);
40520 if (!m->buf)
40521 goto Enomem;
40522 m->count = 0;
40523 @@ -549,7 +553,7 @@ static void single_stop(struct seq_file
40524 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
40525 void *data)
40526 {
40527 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
40528 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
40529 int res = -ENOMEM;
40530
40531 if (op) {
40532 diff -urNp linux-3.0.3/fs/splice.c linux-3.0.3/fs/splice.c
40533 --- linux-3.0.3/fs/splice.c 2011-07-21 22:17:23.000000000 -0400
40534 +++ linux-3.0.3/fs/splice.c 2011-08-23 21:48:14.000000000 -0400
40535 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode
40536 pipe_lock(pipe);
40537
40538 for (;;) {
40539 - if (!pipe->readers) {
40540 + if (!atomic_read(&pipe->readers)) {
40541 send_sig(SIGPIPE, current, 0);
40542 if (!ret)
40543 ret = -EPIPE;
40544 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode
40545 do_wakeup = 0;
40546 }
40547
40548 - pipe->waiting_writers++;
40549 + atomic_inc(&pipe->waiting_writers);
40550 pipe_wait(pipe);
40551 - pipe->waiting_writers--;
40552 + atomic_dec(&pipe->waiting_writers);
40553 }
40554
40555 pipe_unlock(pipe);
40556 @@ -320,6 +320,8 @@ __generic_file_splice_read(struct file *
40557 .spd_release = spd_release_page,
40558 };
40559
40560 + pax_track_stack();
40561 +
40562 if (splice_grow_spd(pipe, &spd))
40563 return -ENOMEM;
40564
40565 @@ -560,7 +562,7 @@ static ssize_t kernel_readv(struct file
40566 old_fs = get_fs();
40567 set_fs(get_ds());
40568 /* The cast to a user pointer is valid due to the set_fs() */
40569 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
40570 + res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
40571 set_fs(old_fs);
40572
40573 return res;
40574 @@ -575,7 +577,7 @@ static ssize_t kernel_write(struct file
40575 old_fs = get_fs();
40576 set_fs(get_ds());
40577 /* The cast to a user pointer is valid due to the set_fs() */
40578 - res = vfs_write(file, (const char __user *)buf, count, &pos);
40579 + res = vfs_write(file, (__force const char __user *)buf, count, &pos);
40580 set_fs(old_fs);
40581
40582 return res;
40583 @@ -603,6 +605,8 @@ ssize_t default_file_splice_read(struct
40584 .spd_release = spd_release_page,
40585 };
40586
40587 + pax_track_stack();
40588 +
40589 if (splice_grow_spd(pipe, &spd))
40590 return -ENOMEM;
40591
40592 @@ -626,7 +630,7 @@ ssize_t default_file_splice_read(struct
40593 goto err;
40594
40595 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
40596 - vec[i].iov_base = (void __user *) page_address(page);
40597 + vec[i].iov_base = (__force void __user *) page_address(page);
40598 vec[i].iov_len = this_len;
40599 spd.pages[i] = page;
40600 spd.nr_pages++;
40601 @@ -846,10 +850,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
40602 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
40603 {
40604 while (!pipe->nrbufs) {
40605 - if (!pipe->writers)
40606 + if (!atomic_read(&pipe->writers))
40607 return 0;
40608
40609 - if (!pipe->waiting_writers && sd->num_spliced)
40610 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
40611 return 0;
40612
40613 if (sd->flags & SPLICE_F_NONBLOCK)
40614 @@ -1182,7 +1186,7 @@ ssize_t splice_direct_to_actor(struct fi
40615 * out of the pipe right after the splice_to_pipe(). So set
40616 * PIPE_READERS appropriately.
40617 */
40618 - pipe->readers = 1;
40619 + atomic_set(&pipe->readers, 1);
40620
40621 current->splice_pipe = pipe;
40622 }
40623 @@ -1619,6 +1623,8 @@ static long vmsplice_to_pipe(struct file
40624 };
40625 long ret;
40626
40627 + pax_track_stack();
40628 +
40629 pipe = get_pipe_info(file);
40630 if (!pipe)
40631 return -EBADF;
40632 @@ -1734,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_
40633 ret = -ERESTARTSYS;
40634 break;
40635 }
40636 - if (!pipe->writers)
40637 + if (!atomic_read(&pipe->writers))
40638 break;
40639 - if (!pipe->waiting_writers) {
40640 + if (!atomic_read(&pipe->waiting_writers)) {
40641 if (flags & SPLICE_F_NONBLOCK) {
40642 ret = -EAGAIN;
40643 break;
40644 @@ -1768,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_
40645 pipe_lock(pipe);
40646
40647 while (pipe->nrbufs >= pipe->buffers) {
40648 - if (!pipe->readers) {
40649 + if (!atomic_read(&pipe->readers)) {
40650 send_sig(SIGPIPE, current, 0);
40651 ret = -EPIPE;
40652 break;
40653 @@ -1781,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_
40654 ret = -ERESTARTSYS;
40655 break;
40656 }
40657 - pipe->waiting_writers++;
40658 + atomic_inc(&pipe->waiting_writers);
40659 pipe_wait(pipe);
40660 - pipe->waiting_writers--;
40661 + atomic_dec(&pipe->waiting_writers);
40662 }
40663
40664 pipe_unlock(pipe);
40665 @@ -1819,14 +1825,14 @@ retry:
40666 pipe_double_lock(ipipe, opipe);
40667
40668 do {
40669 - if (!opipe->readers) {
40670 + if (!atomic_read(&opipe->readers)) {
40671 send_sig(SIGPIPE, current, 0);
40672 if (!ret)
40673 ret = -EPIPE;
40674 break;
40675 }
40676
40677 - if (!ipipe->nrbufs && !ipipe->writers)
40678 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
40679 break;
40680
40681 /*
40682 @@ -1923,7 +1929,7 @@ static int link_pipe(struct pipe_inode_i
40683 pipe_double_lock(ipipe, opipe);
40684
40685 do {
40686 - if (!opipe->readers) {
40687 + if (!atomic_read(&opipe->readers)) {
40688 send_sig(SIGPIPE, current, 0);
40689 if (!ret)
40690 ret = -EPIPE;
40691 @@ -1968,7 +1974,7 @@ static int link_pipe(struct pipe_inode_i
40692 * return EAGAIN if we have the potential of some data in the
40693 * future, otherwise just return 0
40694 */
40695 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
40696 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
40697 ret = -EAGAIN;
40698
40699 pipe_unlock(ipipe);
40700 diff -urNp linux-3.0.3/fs/sysfs/file.c linux-3.0.3/fs/sysfs/file.c
40701 --- linux-3.0.3/fs/sysfs/file.c 2011-07-21 22:17:23.000000000 -0400
40702 +++ linux-3.0.3/fs/sysfs/file.c 2011-08-23 21:47:56.000000000 -0400
40703 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
40704
40705 struct sysfs_open_dirent {
40706 atomic_t refcnt;
40707 - atomic_t event;
40708 + atomic_unchecked_t event;
40709 wait_queue_head_t poll;
40710 struct list_head buffers; /* goes through sysfs_buffer.list */
40711 };
40712 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentr
40713 if (!sysfs_get_active(attr_sd))
40714 return -ENODEV;
40715
40716 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
40717 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
40718 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
40719
40720 sysfs_put_active(attr_sd);
40721 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct
40722 return -ENOMEM;
40723
40724 atomic_set(&new_od->refcnt, 0);
40725 - atomic_set(&new_od->event, 1);
40726 + atomic_set_unchecked(&new_od->event, 1);
40727 init_waitqueue_head(&new_od->poll);
40728 INIT_LIST_HEAD(&new_od->buffers);
40729 goto retry;
40730 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct fi
40731
40732 sysfs_put_active(attr_sd);
40733
40734 - if (buffer->event != atomic_read(&od->event))
40735 + if (buffer->event != atomic_read_unchecked(&od->event))
40736 goto trigger;
40737
40738 return DEFAULT_POLLMASK;
40739 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_di
40740
40741 od = sd->s_attr.open;
40742 if (od) {
40743 - atomic_inc(&od->event);
40744 + atomic_inc_unchecked(&od->event);
40745 wake_up_interruptible(&od->poll);
40746 }
40747
40748 diff -urNp linux-3.0.3/fs/sysfs/mount.c linux-3.0.3/fs/sysfs/mount.c
40749 --- linux-3.0.3/fs/sysfs/mount.c 2011-07-21 22:17:23.000000000 -0400
40750 +++ linux-3.0.3/fs/sysfs/mount.c 2011-08-23 21:48:14.000000000 -0400
40751 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
40752 .s_name = "",
40753 .s_count = ATOMIC_INIT(1),
40754 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
40755 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
40756 + .s_mode = S_IFDIR | S_IRWXU,
40757 +#else
40758 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
40759 +#endif
40760 .s_ino = 1,
40761 };
40762
40763 diff -urNp linux-3.0.3/fs/sysfs/symlink.c linux-3.0.3/fs/sysfs/symlink.c
40764 --- linux-3.0.3/fs/sysfs/symlink.c 2011-07-21 22:17:23.000000000 -0400
40765 +++ linux-3.0.3/fs/sysfs/symlink.c 2011-08-23 21:47:56.000000000 -0400
40766 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
40767
40768 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
40769 {
40770 - char *page = nd_get_link(nd);
40771 + const char *page = nd_get_link(nd);
40772 if (!IS_ERR(page))
40773 free_page((unsigned long)page);
40774 }
40775 diff -urNp linux-3.0.3/fs/udf/inode.c linux-3.0.3/fs/udf/inode.c
40776 --- linux-3.0.3/fs/udf/inode.c 2011-07-21 22:17:23.000000000 -0400
40777 +++ linux-3.0.3/fs/udf/inode.c 2011-08-23 21:48:14.000000000 -0400
40778 @@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
40779 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
40780 int lastblock = 0;
40781
40782 + pax_track_stack();
40783 +
40784 prev_epos.offset = udf_file_entry_alloc_offset(inode);
40785 prev_epos.block = iinfo->i_location;
40786 prev_epos.bh = NULL;
40787 diff -urNp linux-3.0.3/fs/udf/misc.c linux-3.0.3/fs/udf/misc.c
40788 --- linux-3.0.3/fs/udf/misc.c 2011-07-21 22:17:23.000000000 -0400
40789 +++ linux-3.0.3/fs/udf/misc.c 2011-08-23 21:47:56.000000000 -0400
40790 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
40791
40792 u8 udf_tag_checksum(const struct tag *t)
40793 {
40794 - u8 *data = (u8 *)t;
40795 + const u8 *data = (const u8 *)t;
40796 u8 checksum = 0;
40797 int i;
40798 for (i = 0; i < sizeof(struct tag); ++i)
40799 diff -urNp linux-3.0.3/fs/utimes.c linux-3.0.3/fs/utimes.c
40800 --- linux-3.0.3/fs/utimes.c 2011-07-21 22:17:23.000000000 -0400
40801 +++ linux-3.0.3/fs/utimes.c 2011-08-23 21:48:14.000000000 -0400
40802 @@ -1,6 +1,7 @@
40803 #include <linux/compiler.h>
40804 #include <linux/file.h>
40805 #include <linux/fs.h>
40806 +#include <linux/security.h>
40807 #include <linux/linkage.h>
40808 #include <linux/mount.h>
40809 #include <linux/namei.h>
40810 @@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
40811 goto mnt_drop_write_and_out;
40812 }
40813 }
40814 +
40815 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
40816 + error = -EACCES;
40817 + goto mnt_drop_write_and_out;
40818 + }
40819 +
40820 mutex_lock(&inode->i_mutex);
40821 error = notify_change(path->dentry, &newattrs);
40822 mutex_unlock(&inode->i_mutex);
40823 diff -urNp linux-3.0.3/fs/xattr_acl.c linux-3.0.3/fs/xattr_acl.c
40824 --- linux-3.0.3/fs/xattr_acl.c 2011-07-21 22:17:23.000000000 -0400
40825 +++ linux-3.0.3/fs/xattr_acl.c 2011-08-23 21:47:56.000000000 -0400
40826 @@ -17,8 +17,8 @@
40827 struct posix_acl *
40828 posix_acl_from_xattr(const void *value, size_t size)
40829 {
40830 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
40831 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
40832 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
40833 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
40834 int count;
40835 struct posix_acl *acl;
40836 struct posix_acl_entry *acl_e;
40837 diff -urNp linux-3.0.3/fs/xattr.c linux-3.0.3/fs/xattr.c
40838 --- linux-3.0.3/fs/xattr.c 2011-07-21 22:17:23.000000000 -0400
40839 +++ linux-3.0.3/fs/xattr.c 2011-08-23 21:48:14.000000000 -0400
40840 @@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
40841 * Extended attribute SET operations
40842 */
40843 static long
40844 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
40845 +setxattr(struct path *path, const char __user *name, const void __user *value,
40846 size_t size, int flags)
40847 {
40848 int error;
40849 @@ -278,7 +278,13 @@ setxattr(struct dentry *d, const char __
40850 return PTR_ERR(kvalue);
40851 }
40852
40853 - error = vfs_setxattr(d, kname, kvalue, size, flags);
40854 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
40855 + error = -EACCES;
40856 + goto out;
40857 + }
40858 +
40859 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
40860 +out:
40861 kfree(kvalue);
40862 return error;
40863 }
40864 @@ -295,7 +301,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
40865 return error;
40866 error = mnt_want_write(path.mnt);
40867 if (!error) {
40868 - error = setxattr(path.dentry, name, value, size, flags);
40869 + error = setxattr(&path, name, value, size, flags);
40870 mnt_drop_write(path.mnt);
40871 }
40872 path_put(&path);
40873 @@ -314,7 +320,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
40874 return error;
40875 error = mnt_want_write(path.mnt);
40876 if (!error) {
40877 - error = setxattr(path.dentry, name, value, size, flags);
40878 + error = setxattr(&path, name, value, size, flags);
40879 mnt_drop_write(path.mnt);
40880 }
40881 path_put(&path);
40882 @@ -325,17 +331,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
40883 const void __user *,value, size_t, size, int, flags)
40884 {
40885 struct file *f;
40886 - struct dentry *dentry;
40887 int error = -EBADF;
40888
40889 f = fget(fd);
40890 if (!f)
40891 return error;
40892 - dentry = f->f_path.dentry;
40893 - audit_inode(NULL, dentry);
40894 + audit_inode(NULL, f->f_path.dentry);
40895 error = mnt_want_write_file(f);
40896 if (!error) {
40897 - error = setxattr(dentry, name, value, size, flags);
40898 + error = setxattr(&f->f_path, name, value, size, flags);
40899 mnt_drop_write(f->f_path.mnt);
40900 }
40901 fput(f);
40902 diff -urNp linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl32.c linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl32.c
40903 --- linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-07-21 22:17:23.000000000 -0400
40904 +++ linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-08-23 21:48:14.000000000 -0400
40905 @@ -73,6 +73,7 @@ xfs_compat_ioc_fsgeometry_v1(
40906 xfs_fsop_geom_t fsgeo;
40907 int error;
40908
40909 + memset(&fsgeo, 0, sizeof(fsgeo));
40910 error = xfs_fs_geometry(mp, &fsgeo, 3);
40911 if (error)
40912 return -error;
40913 diff -urNp linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl.c linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl.c
40914 --- linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl.c 2011-07-21 22:17:23.000000000 -0400
40915 +++ linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl.c 2011-08-23 21:47:56.000000000 -0400
40916 @@ -128,7 +128,7 @@ xfs_find_handle(
40917 }
40918
40919 error = -EFAULT;
40920 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
40921 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
40922 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
40923 goto out_put;
40924
40925 diff -urNp linux-3.0.3/fs/xfs/linux-2.6/xfs_iops.c linux-3.0.3/fs/xfs/linux-2.6/xfs_iops.c
40926 --- linux-3.0.3/fs/xfs/linux-2.6/xfs_iops.c 2011-07-21 22:17:23.000000000 -0400
40927 +++ linux-3.0.3/fs/xfs/linux-2.6/xfs_iops.c 2011-08-23 21:47:56.000000000 -0400
40928 @@ -437,7 +437,7 @@ xfs_vn_put_link(
40929 struct nameidata *nd,
40930 void *p)
40931 {
40932 - char *s = nd_get_link(nd);
40933 + const char *s = nd_get_link(nd);
40934
40935 if (!IS_ERR(s))
40936 kfree(s);
40937 diff -urNp linux-3.0.3/fs/xfs/xfs_bmap.c linux-3.0.3/fs/xfs/xfs_bmap.c
40938 --- linux-3.0.3/fs/xfs/xfs_bmap.c 2011-07-21 22:17:23.000000000 -0400
40939 +++ linux-3.0.3/fs/xfs/xfs_bmap.c 2011-08-23 21:47:56.000000000 -0400
40940 @@ -253,7 +253,7 @@ xfs_bmap_validate_ret(
40941 int nmap,
40942 int ret_nmap);
40943 #else
40944 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
40945 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
40946 #endif /* DEBUG */
40947
40948 STATIC int
40949 diff -urNp linux-3.0.3/fs/xfs/xfs_dir2_sf.c linux-3.0.3/fs/xfs/xfs_dir2_sf.c
40950 --- linux-3.0.3/fs/xfs/xfs_dir2_sf.c 2011-07-21 22:17:23.000000000 -0400
40951 +++ linux-3.0.3/fs/xfs/xfs_dir2_sf.c 2011-08-23 21:47:56.000000000 -0400
40952 @@ -780,7 +780,15 @@ xfs_dir2_sf_getdents(
40953 }
40954
40955 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
40956 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
40957 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
40958 + char name[sfep->namelen];
40959 + memcpy(name, sfep->name, sfep->namelen);
40960 + if (filldir(dirent, name, sfep->namelen,
40961 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
40962 + *offset = off & 0x7fffffff;
40963 + return 0;
40964 + }
40965 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
40966 off & 0x7fffffff, ino, DT_UNKNOWN)) {
40967 *offset = off & 0x7fffffff;
40968 return 0;
40969 diff -urNp linux-3.0.3/grsecurity/gracl_alloc.c linux-3.0.3/grsecurity/gracl_alloc.c
40970 --- linux-3.0.3/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
40971 +++ linux-3.0.3/grsecurity/gracl_alloc.c 2011-08-23 21:48:14.000000000 -0400
40972 @@ -0,0 +1,105 @@
40973 +#include <linux/kernel.h>
40974 +#include <linux/mm.h>
40975 +#include <linux/slab.h>
40976 +#include <linux/vmalloc.h>
40977 +#include <linux/gracl.h>
40978 +#include <linux/grsecurity.h>
40979 +
40980 +static unsigned long alloc_stack_next = 1;
40981 +static unsigned long alloc_stack_size = 1;
40982 +static void **alloc_stack;
40983 +
40984 +static __inline__ int
40985 +alloc_pop(void)
40986 +{
40987 + if (alloc_stack_next == 1)
40988 + return 0;
40989 +
40990 + kfree(alloc_stack[alloc_stack_next - 2]);
40991 +
40992 + alloc_stack_next--;
40993 +
40994 + return 1;
40995 +}
40996 +
40997 +static __inline__ int
40998 +alloc_push(void *buf)
40999 +{
41000 + if (alloc_stack_next >= alloc_stack_size)
41001 + return 1;
41002 +
41003 + alloc_stack[alloc_stack_next - 1] = buf;
41004 +
41005 + alloc_stack_next++;
41006 +
41007 + return 0;
41008 +}
41009 +
41010 +void *
41011 +acl_alloc(unsigned long len)
41012 +{
41013 + void *ret = NULL;
41014 +
41015 + if (!len || len > PAGE_SIZE)
41016 + goto out;
41017 +
41018 + ret = kmalloc(len, GFP_KERNEL);
41019 +
41020 + if (ret) {
41021 + if (alloc_push(ret)) {
41022 + kfree(ret);
41023 + ret = NULL;
41024 + }
41025 + }
41026 +
41027 +out:
41028 + return ret;
41029 +}
41030 +
41031 +void *
41032 +acl_alloc_num(unsigned long num, unsigned long len)
41033 +{
41034 + if (!len || (num > (PAGE_SIZE / len)))
41035 + return NULL;
41036 +
41037 + return acl_alloc(num * len);
41038 +}
41039 +
41040 +void
41041 +acl_free_all(void)
41042 +{
41043 + if (gr_acl_is_enabled() || !alloc_stack)
41044 + return;
41045 +
41046 + while (alloc_pop()) ;
41047 +
41048 + if (alloc_stack) {
41049 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
41050 + kfree(alloc_stack);
41051 + else
41052 + vfree(alloc_stack);
41053 + }
41054 +
41055 + alloc_stack = NULL;
41056 + alloc_stack_size = 1;
41057 + alloc_stack_next = 1;
41058 +
41059 + return;
41060 +}
41061 +
41062 +int
41063 +acl_alloc_stack_init(unsigned long size)
41064 +{
41065 + if ((size * sizeof (void *)) <= PAGE_SIZE)
41066 + alloc_stack =
41067 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
41068 + else
41069 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
41070 +
41071 + alloc_stack_size = size;
41072 +
41073 + if (!alloc_stack)
41074 + return 0;
41075 + else
41076 + return 1;
41077 +}
41078 diff -urNp linux-3.0.3/grsecurity/gracl.c linux-3.0.3/grsecurity/gracl.c
41079 --- linux-3.0.3/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
41080 +++ linux-3.0.3/grsecurity/gracl.c 2011-08-23 21:48:14.000000000 -0400
41081 @@ -0,0 +1,4106 @@
41082 +#include <linux/kernel.h>
41083 +#include <linux/module.h>
41084 +#include <linux/sched.h>
41085 +#include <linux/mm.h>
41086 +#include <linux/file.h>
41087 +#include <linux/fs.h>
41088 +#include <linux/namei.h>
41089 +#include <linux/mount.h>
41090 +#include <linux/tty.h>
41091 +#include <linux/proc_fs.h>
41092 +#include <linux/lglock.h>
41093 +#include <linux/slab.h>
41094 +#include <linux/vmalloc.h>
41095 +#include <linux/types.h>
41096 +#include <linux/sysctl.h>
41097 +#include <linux/netdevice.h>
41098 +#include <linux/ptrace.h>
41099 +#include <linux/gracl.h>
41100 +#include <linux/gralloc.h>
41101 +#include <linux/grsecurity.h>
41102 +#include <linux/grinternal.h>
41103 +#include <linux/pid_namespace.h>
41104 +#include <linux/fdtable.h>
41105 +#include <linux/percpu.h>
41106 +
41107 +#include <asm/uaccess.h>
41108 +#include <asm/errno.h>
41109 +#include <asm/mman.h>
41110 +
41111 +static struct acl_role_db acl_role_set;
41112 +static struct name_db name_set;
41113 +static struct inodev_db inodev_set;
41114 +
41115 +/* for keeping track of userspace pointers used for subjects, so we
41116 + can share references in the kernel as well
41117 +*/
41118 +
41119 +static struct path real_root;
41120 +
41121 +static struct acl_subj_map_db subj_map_set;
41122 +
41123 +static struct acl_role_label *default_role;
41124 +
41125 +static struct acl_role_label *role_list;
41126 +
41127 +static u16 acl_sp_role_value;
41128 +
41129 +extern char *gr_shared_page[4];
41130 +static DEFINE_MUTEX(gr_dev_mutex);
41131 +DEFINE_RWLOCK(gr_inode_lock);
41132 +
41133 +struct gr_arg *gr_usermode;
41134 +
41135 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
41136 +
41137 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
41138 +extern void gr_clear_learn_entries(void);
41139 +
41140 +#ifdef CONFIG_GRKERNSEC_RESLOG
41141 +extern void gr_log_resource(const struct task_struct *task,
41142 + const int res, const unsigned long wanted, const int gt);
41143 +#endif
41144 +
41145 +unsigned char *gr_system_salt;
41146 +unsigned char *gr_system_sum;
41147 +
41148 +static struct sprole_pw **acl_special_roles = NULL;
41149 +static __u16 num_sprole_pws = 0;
41150 +
41151 +static struct acl_role_label *kernel_role = NULL;
41152 +
41153 +static unsigned int gr_auth_attempts = 0;
41154 +static unsigned long gr_auth_expires = 0UL;
41155 +
41156 +#ifdef CONFIG_NET
41157 +extern struct vfsmount *sock_mnt;
41158 +#endif
41159 +
41160 +extern struct vfsmount *pipe_mnt;
41161 +extern struct vfsmount *shm_mnt;
41162 +#ifdef CONFIG_HUGETLBFS
41163 +extern struct vfsmount *hugetlbfs_vfsmount;
41164 +#endif
41165 +
41166 +static struct acl_object_label *fakefs_obj_rw;
41167 +static struct acl_object_label *fakefs_obj_rwx;
41168 +
41169 +extern int gr_init_uidset(void);
41170 +extern void gr_free_uidset(void);
41171 +extern void gr_remove_uid(uid_t uid);
41172 +extern int gr_find_uid(uid_t uid);
41173 +
41174 +DECLARE_BRLOCK(vfsmount_lock);
41175 +
41176 +__inline__ int
41177 +gr_acl_is_enabled(void)
41178 +{
41179 + return (gr_status & GR_READY);
41180 +}
41181 +
41182 +#ifdef CONFIG_BTRFS_FS
41183 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
41184 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
41185 +#endif
41186 +
41187 +static inline dev_t __get_dev(const struct dentry *dentry)
41188 +{
41189 +#ifdef CONFIG_BTRFS_FS
41190 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
41191 + return get_btrfs_dev_from_inode(dentry->d_inode);
41192 + else
41193 +#endif
41194 + return dentry->d_inode->i_sb->s_dev;
41195 +}
41196 +
41197 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
41198 +{
41199 + return __get_dev(dentry);
41200 +}
41201 +
41202 +static char gr_task_roletype_to_char(struct task_struct *task)
41203 +{
41204 + switch (task->role->roletype &
41205 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
41206 + GR_ROLE_SPECIAL)) {
41207 + case GR_ROLE_DEFAULT:
41208 + return 'D';
41209 + case GR_ROLE_USER:
41210 + return 'U';
41211 + case GR_ROLE_GROUP:
41212 + return 'G';
41213 + case GR_ROLE_SPECIAL:
41214 + return 'S';
41215 + }
41216 +
41217 + return 'X';
41218 +}
41219 +
41220 +char gr_roletype_to_char(void)
41221 +{
41222 + return gr_task_roletype_to_char(current);
41223 +}
41224 +
41225 +__inline__ int
41226 +gr_acl_tpe_check(void)
41227 +{
41228 + if (unlikely(!(gr_status & GR_READY)))
41229 + return 0;
41230 + if (current->role->roletype & GR_ROLE_TPE)
41231 + return 1;
41232 + else
41233 + return 0;
41234 +}
41235 +
41236 +int
41237 +gr_handle_rawio(const struct inode *inode)
41238 +{
41239 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
41240 + if (inode && S_ISBLK(inode->i_mode) &&
41241 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
41242 + !capable(CAP_SYS_RAWIO))
41243 + return 1;
41244 +#endif
41245 + return 0;
41246 +}
41247 +
41248 +static int
41249 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
41250 +{
41251 + if (likely(lena != lenb))
41252 + return 0;
41253 +
41254 + return !memcmp(a, b, lena);
41255 +}
41256 +
41257 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
41258 +{
41259 + *buflen -= namelen;
41260 + if (*buflen < 0)
41261 + return -ENAMETOOLONG;
41262 + *buffer -= namelen;
41263 + memcpy(*buffer, str, namelen);
41264 + return 0;
41265 +}
41266 +
41267 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
41268 +{
41269 + return prepend(buffer, buflen, name->name, name->len);
41270 +}
41271 +
41272 +static int prepend_path(const struct path *path, struct path *root,
41273 + char **buffer, int *buflen)
41274 +{
41275 + struct dentry *dentry = path->dentry;
41276 + struct vfsmount *vfsmnt = path->mnt;
41277 + bool slash = false;
41278 + int error = 0;
41279 +
41280 + while (dentry != root->dentry || vfsmnt != root->mnt) {
41281 + struct dentry * parent;
41282 +
41283 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
41284 + /* Global root? */
41285 + if (vfsmnt->mnt_parent == vfsmnt) {
41286 + goto out;
41287 + }
41288 + dentry = vfsmnt->mnt_mountpoint;
41289 + vfsmnt = vfsmnt->mnt_parent;
41290 + continue;
41291 + }
41292 + parent = dentry->d_parent;
41293 + prefetch(parent);
41294 + spin_lock(&dentry->d_lock);
41295 + error = prepend_name(buffer, buflen, &dentry->d_name);
41296 + spin_unlock(&dentry->d_lock);
41297 + if (!error)
41298 + error = prepend(buffer, buflen, "/", 1);
41299 + if (error)
41300 + break;
41301 +
41302 + slash = true;
41303 + dentry = parent;
41304 + }
41305 +
41306 +out:
41307 + if (!error && !slash)
41308 + error = prepend(buffer, buflen, "/", 1);
41309 +
41310 + return error;
41311 +}
41312 +
41313 +/* this must be called with vfsmount_lock and rename_lock held */
41314 +
41315 +static char *__our_d_path(const struct path *path, struct path *root,
41316 + char *buf, int buflen)
41317 +{
41318 + char *res = buf + buflen;
41319 + int error;
41320 +
41321 + prepend(&res, &buflen, "\0", 1);
41322 + error = prepend_path(path, root, &res, &buflen);
41323 + if (error)
41324 + return ERR_PTR(error);
41325 +
41326 + return res;
41327 +}
41328 +
41329 +static char *
41330 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
41331 +{
41332 + char *retval;
41333 +
41334 + retval = __our_d_path(path, root, buf, buflen);
41335 + if (unlikely(IS_ERR(retval)))
41336 + retval = strcpy(buf, "<path too long>");
41337 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
41338 + retval[1] = '\0';
41339 +
41340 + return retval;
41341 +}
41342 +
41343 +static char *
41344 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
41345 + char *buf, int buflen)
41346 +{
41347 + struct path path;
41348 + char *res;
41349 +
41350 + path.dentry = (struct dentry *)dentry;
41351 + path.mnt = (struct vfsmount *)vfsmnt;
41352 +
41353 + /* we can use real_root.dentry, real_root.mnt, because this is only called
41354 + by the RBAC system */
41355 + res = gen_full_path(&path, &real_root, buf, buflen);
41356 +
41357 + return res;
41358 +}
41359 +
41360 +static char *
41361 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
41362 + char *buf, int buflen)
41363 +{
41364 + char *res;
41365 + struct path path;
41366 + struct path root;
41367 + struct task_struct *reaper = &init_task;
41368 +
41369 + path.dentry = (struct dentry *)dentry;
41370 + path.mnt = (struct vfsmount *)vfsmnt;
41371 +
41372 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
41373 + get_fs_root(reaper->fs, &root);
41374 +
41375 + write_seqlock(&rename_lock);
41376 + br_read_lock(vfsmount_lock);
41377 + res = gen_full_path(&path, &root, buf, buflen);
41378 + br_read_unlock(vfsmount_lock);
41379 + write_sequnlock(&rename_lock);
41380 +
41381 + path_put(&root);
41382 + return res;
41383 +}
41384 +
41385 +static char *
41386 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
41387 +{
41388 + char *ret;
41389 + write_seqlock(&rename_lock);
41390 + br_read_lock(vfsmount_lock);
41391 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
41392 + PAGE_SIZE);
41393 + br_read_unlock(vfsmount_lock);
41394 + write_sequnlock(&rename_lock);
41395 + return ret;
41396 +}
41397 +
41398 +char *
41399 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
41400 +{
41401 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
41402 + PAGE_SIZE);
41403 +}
41404 +
41405 +char *
41406 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
41407 +{
41408 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
41409 + PAGE_SIZE);
41410 +}
41411 +
41412 +char *
41413 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
41414 +{
41415 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
41416 + PAGE_SIZE);
41417 +}
41418 +
41419 +char *
41420 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
41421 +{
41422 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
41423 + PAGE_SIZE);
41424 +}
41425 +
41426 +char *
41427 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
41428 +{
41429 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
41430 + PAGE_SIZE);
41431 +}
41432 +
41433 +__inline__ __u32
41434 +to_gr_audit(const __u32 reqmode)
41435 +{
41436 + /* masks off auditable permission flags, then shifts them to create
41437 + auditing flags, and adds the special case of append auditing if
41438 + we're requesting write */
41439 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
41440 +}
41441 +
41442 +struct acl_subject_label *
41443 +lookup_subject_map(const struct acl_subject_label *userp)
41444 +{
41445 + unsigned int index = shash(userp, subj_map_set.s_size);
41446 + struct subject_map *match;
41447 +
41448 + match = subj_map_set.s_hash[index];
41449 +
41450 + while (match && match->user != userp)
41451 + match = match->next;
41452 +
41453 + if (match != NULL)
41454 + return match->kernel;
41455 + else
41456 + return NULL;
41457 +}
41458 +
41459 +static void
41460 +insert_subj_map_entry(struct subject_map *subjmap)
41461 +{
41462 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
41463 + struct subject_map **curr;
41464 +
41465 + subjmap->prev = NULL;
41466 +
41467 + curr = &subj_map_set.s_hash[index];
41468 + if (*curr != NULL)
41469 + (*curr)->prev = subjmap;
41470 +
41471 + subjmap->next = *curr;
41472 + *curr = subjmap;
41473 +
41474 + return;
41475 +}
41476 +
41477 +static struct acl_role_label *
41478 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
41479 + const gid_t gid)
41480 +{
41481 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
41482 + struct acl_role_label *match;
41483 + struct role_allowed_ip *ipp;
41484 + unsigned int x;
41485 + u32 curr_ip = task->signal->curr_ip;
41486 +
41487 + task->signal->saved_ip = curr_ip;
41488 +
41489 + match = acl_role_set.r_hash[index];
41490 +
41491 + while (match) {
41492 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
41493 + for (x = 0; x < match->domain_child_num; x++) {
41494 + if (match->domain_children[x] == uid)
41495 + goto found;
41496 + }
41497 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
41498 + break;
41499 + match = match->next;
41500 + }
41501 +found:
41502 + if (match == NULL) {
41503 + try_group:
41504 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
41505 + match = acl_role_set.r_hash[index];
41506 +
41507 + while (match) {
41508 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
41509 + for (x = 0; x < match->domain_child_num; x++) {
41510 + if (match->domain_children[x] == gid)
41511 + goto found2;
41512 + }
41513 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
41514 + break;
41515 + match = match->next;
41516 + }
41517 +found2:
41518 + if (match == NULL)
41519 + match = default_role;
41520 + if (match->allowed_ips == NULL)
41521 + return match;
41522 + else {
41523 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
41524 + if (likely
41525 + ((ntohl(curr_ip) & ipp->netmask) ==
41526 + (ntohl(ipp->addr) & ipp->netmask)))
41527 + return match;
41528 + }
41529 + match = default_role;
41530 + }
41531 + } else if (match->allowed_ips == NULL) {
41532 + return match;
41533 + } else {
41534 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
41535 + if (likely
41536 + ((ntohl(curr_ip) & ipp->netmask) ==
41537 + (ntohl(ipp->addr) & ipp->netmask)))
41538 + return match;
41539 + }
41540 + goto try_group;
41541 + }
41542 +
41543 + return match;
41544 +}
41545 +
41546 +struct acl_subject_label *
41547 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
41548 + const struct acl_role_label *role)
41549 +{
41550 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
41551 + struct acl_subject_label *match;
41552 +
41553 + match = role->subj_hash[index];
41554 +
41555 + while (match && (match->inode != ino || match->device != dev ||
41556 + (match->mode & GR_DELETED))) {
41557 + match = match->next;
41558 + }
41559 +
41560 + if (match && !(match->mode & GR_DELETED))
41561 + return match;
41562 + else
41563 + return NULL;
41564 +}
41565 +
41566 +struct acl_subject_label *
41567 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
41568 + const struct acl_role_label *role)
41569 +{
41570 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
41571 + struct acl_subject_label *match;
41572 +
41573 + match = role->subj_hash[index];
41574 +
41575 + while (match && (match->inode != ino || match->device != dev ||
41576 + !(match->mode & GR_DELETED))) {
41577 + match = match->next;
41578 + }
41579 +
41580 + if (match && (match->mode & GR_DELETED))
41581 + return match;
41582 + else
41583 + return NULL;
41584 +}
41585 +
41586 +static struct acl_object_label *
41587 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
41588 + const struct acl_subject_label *subj)
41589 +{
41590 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
41591 + struct acl_object_label *match;
41592 +
41593 + match = subj->obj_hash[index];
41594 +
41595 + while (match && (match->inode != ino || match->device != dev ||
41596 + (match->mode & GR_DELETED))) {
41597 + match = match->next;
41598 + }
41599 +
41600 + if (match && !(match->mode & GR_DELETED))
41601 + return match;
41602 + else
41603 + return NULL;
41604 +}
41605 +
41606 +static struct acl_object_label *
41607 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
41608 + const struct acl_subject_label *subj)
41609 +{
41610 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
41611 + struct acl_object_label *match;
41612 +
41613 + match = subj->obj_hash[index];
41614 +
41615 + while (match && (match->inode != ino || match->device != dev ||
41616 + !(match->mode & GR_DELETED))) {
41617 + match = match->next;
41618 + }
41619 +
41620 + if (match && (match->mode & GR_DELETED))
41621 + return match;
41622 +
41623 + match = subj->obj_hash[index];
41624 +
41625 + while (match && (match->inode != ino || match->device != dev ||
41626 + (match->mode & GR_DELETED))) {
41627 + match = match->next;
41628 + }
41629 +
41630 + if (match && !(match->mode & GR_DELETED))
41631 + return match;
41632 + else
41633 + return NULL;
41634 +}
41635 +
41636 +static struct name_entry *
41637 +lookup_name_entry(const char *name)
41638 +{
41639 + unsigned int len = strlen(name);
41640 + unsigned int key = full_name_hash(name, len);
41641 + unsigned int index = key % name_set.n_size;
41642 + struct name_entry *match;
41643 +
41644 + match = name_set.n_hash[index];
41645 +
41646 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
41647 + match = match->next;
41648 +
41649 + return match;
41650 +}
41651 +
41652 +static struct name_entry *
41653 +lookup_name_entry_create(const char *name)
41654 +{
41655 + unsigned int len = strlen(name);
41656 + unsigned int key = full_name_hash(name, len);
41657 + unsigned int index = key % name_set.n_size;
41658 + struct name_entry *match;
41659 +
41660 + match = name_set.n_hash[index];
41661 +
41662 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
41663 + !match->deleted))
41664 + match = match->next;
41665 +
41666 + if (match && match->deleted)
41667 + return match;
41668 +
41669 + match = name_set.n_hash[index];
41670 +
41671 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
41672 + match->deleted))
41673 + match = match->next;
41674 +
41675 + if (match && !match->deleted)
41676 + return match;
41677 + else
41678 + return NULL;
41679 +}
41680 +
41681 +static struct inodev_entry *
41682 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
41683 +{
41684 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
41685 + struct inodev_entry *match;
41686 +
41687 + match = inodev_set.i_hash[index];
41688 +
41689 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
41690 + match = match->next;
41691 +
41692 + return match;
41693 +}
41694 +
41695 +static void
41696 +insert_inodev_entry(struct inodev_entry *entry)
41697 +{
41698 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
41699 + inodev_set.i_size);
41700 + struct inodev_entry **curr;
41701 +
41702 + entry->prev = NULL;
41703 +
41704 + curr = &inodev_set.i_hash[index];
41705 + if (*curr != NULL)
41706 + (*curr)->prev = entry;
41707 +
41708 + entry->next = *curr;
41709 + *curr = entry;
41710 +
41711 + return;
41712 +}
41713 +
41714 +static void
41715 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
41716 +{
41717 + unsigned int index =
41718 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
41719 + struct acl_role_label **curr;
41720 + struct acl_role_label *tmp;
41721 +
41722 + curr = &acl_role_set.r_hash[index];
41723 +
41724 + /* if role was already inserted due to domains and already has
41725 + a role in the same bucket as it attached, then we need to
41726 + combine these two buckets
41727 + */
41728 + if (role->next) {
41729 + tmp = role->next;
41730 + while (tmp->next)
41731 + tmp = tmp->next;
41732 + tmp->next = *curr;
41733 + } else
41734 + role->next = *curr;
41735 + *curr = role;
41736 +
41737 + return;
41738 +}
41739 +
41740 +static void
41741 +insert_acl_role_label(struct acl_role_label *role)
41742 +{
41743 + int i;
41744 +
41745 + if (role_list == NULL) {
41746 + role_list = role;
41747 + role->prev = NULL;
41748 + } else {
41749 + role->prev = role_list;
41750 + role_list = role;
41751 + }
41752 +
41753 + /* used for hash chains */
41754 + role->next = NULL;
41755 +
41756 + if (role->roletype & GR_ROLE_DOMAIN) {
41757 + for (i = 0; i < role->domain_child_num; i++)
41758 + __insert_acl_role_label(role, role->domain_children[i]);
41759 + } else
41760 + __insert_acl_role_label(role, role->uidgid);
41761 +}
41762 +
41763 +static int
41764 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
41765 +{
41766 + struct name_entry **curr, *nentry;
41767 + struct inodev_entry *ientry;
41768 + unsigned int len = strlen(name);
41769 + unsigned int key = full_name_hash(name, len);
41770 + unsigned int index = key % name_set.n_size;
41771 +
41772 + curr = &name_set.n_hash[index];
41773 +
41774 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
41775 + curr = &((*curr)->next);
41776 +
41777 + if (*curr != NULL)
41778 + return 1;
41779 +
41780 + nentry = acl_alloc(sizeof (struct name_entry));
41781 + if (nentry == NULL)
41782 + return 0;
41783 + ientry = acl_alloc(sizeof (struct inodev_entry));
41784 + if (ientry == NULL)
41785 + return 0;
41786 + ientry->nentry = nentry;
41787 +
41788 + nentry->key = key;
41789 + nentry->name = name;
41790 + nentry->inode = inode;
41791 + nentry->device = device;
41792 + nentry->len = len;
41793 + nentry->deleted = deleted;
41794 +
41795 + nentry->prev = NULL;
41796 + curr = &name_set.n_hash[index];
41797 + if (*curr != NULL)
41798 + (*curr)->prev = nentry;
41799 + nentry->next = *curr;
41800 + *curr = nentry;
41801 +
41802 + /* insert us into the table searchable by inode/dev */
41803 + insert_inodev_entry(ientry);
41804 +
41805 + return 1;
41806 +}
41807 +
41808 +static void
41809 +insert_acl_obj_label(struct acl_object_label *obj,
41810 + struct acl_subject_label *subj)
41811 +{
41812 + unsigned int index =
41813 + fhash(obj->inode, obj->device, subj->obj_hash_size);
41814 + struct acl_object_label **curr;
41815 +
41816 +
41817 + obj->prev = NULL;
41818 +
41819 + curr = &subj->obj_hash[index];
41820 + if (*curr != NULL)
41821 + (*curr)->prev = obj;
41822 +
41823 + obj->next = *curr;
41824 + *curr = obj;
41825 +
41826 + return;
41827 +}
41828 +
41829 +static void
41830 +insert_acl_subj_label(struct acl_subject_label *obj,
41831 + struct acl_role_label *role)
41832 +{
41833 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
41834 + struct acl_subject_label **curr;
41835 +
41836 + obj->prev = NULL;
41837 +
41838 + curr = &role->subj_hash[index];
41839 + if (*curr != NULL)
41840 + (*curr)->prev = obj;
41841 +
41842 + obj->next = *curr;
41843 + *curr = obj;
41844 +
41845 + return;
41846 +}
41847 +
41848 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
41849 +
41850 +static void *
41851 +create_table(__u32 * len, int elementsize)
41852 +{
41853 + unsigned int table_sizes[] = {
41854 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
41855 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
41856 + 4194301, 8388593, 16777213, 33554393, 67108859
41857 + };
41858 + void *newtable = NULL;
41859 + unsigned int pwr = 0;
41860 +
41861 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
41862 + table_sizes[pwr] <= *len)
41863 + pwr++;
41864 +
41865 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
41866 + return newtable;
41867 +
41868 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
41869 + newtable =
41870 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
41871 + else
41872 + newtable = vmalloc(table_sizes[pwr] * elementsize);
41873 +
41874 + *len = table_sizes[pwr];
41875 +
41876 + return newtable;
41877 +}
41878 +
41879 +static int
41880 +init_variables(const struct gr_arg *arg)
41881 +{
41882 + struct task_struct *reaper = &init_task;
41883 + unsigned int stacksize;
41884 +
41885 + subj_map_set.s_size = arg->role_db.num_subjects;
41886 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
41887 + name_set.n_size = arg->role_db.num_objects;
41888 + inodev_set.i_size = arg->role_db.num_objects;
41889 +
41890 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
41891 + !name_set.n_size || !inodev_set.i_size)
41892 + return 1;
41893 +
41894 + if (!gr_init_uidset())
41895 + return 1;
41896 +
41897 + /* set up the stack that holds allocation info */
41898 +
41899 + stacksize = arg->role_db.num_pointers + 5;
41900 +
41901 + if (!acl_alloc_stack_init(stacksize))
41902 + return 1;
41903 +
41904 + /* grab reference for the real root dentry and vfsmount */
41905 + get_fs_root(reaper->fs, &real_root);
41906 +
41907 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
41908 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
41909 +#endif
41910 +
41911 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
41912 + if (fakefs_obj_rw == NULL)
41913 + return 1;
41914 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
41915 +
41916 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
41917 + if (fakefs_obj_rwx == NULL)
41918 + return 1;
41919 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
41920 +
41921 + subj_map_set.s_hash =
41922 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
41923 + acl_role_set.r_hash =
41924 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
41925 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
41926 + inodev_set.i_hash =
41927 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
41928 +
41929 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
41930 + !name_set.n_hash || !inodev_set.i_hash)
41931 + return 1;
41932 +
41933 + memset(subj_map_set.s_hash, 0,
41934 + sizeof(struct subject_map *) * subj_map_set.s_size);
41935 + memset(acl_role_set.r_hash, 0,
41936 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
41937 + memset(name_set.n_hash, 0,
41938 + sizeof (struct name_entry *) * name_set.n_size);
41939 + memset(inodev_set.i_hash, 0,
41940 + sizeof (struct inodev_entry *) * inodev_set.i_size);
41941 +
41942 + return 0;
41943 +}
41944 +
41945 +/* free information not needed after startup
41946 + currently contains user->kernel pointer mappings for subjects
41947 +*/
41948 +
41949 +static void
41950 +free_init_variables(void)
41951 +{
41952 + __u32 i;
41953 +
41954 + if (subj_map_set.s_hash) {
41955 + for (i = 0; i < subj_map_set.s_size; i++) {
41956 + if (subj_map_set.s_hash[i]) {
41957 + kfree(subj_map_set.s_hash[i]);
41958 + subj_map_set.s_hash[i] = NULL;
41959 + }
41960 + }
41961 +
41962 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
41963 + PAGE_SIZE)
41964 + kfree(subj_map_set.s_hash);
41965 + else
41966 + vfree(subj_map_set.s_hash);
41967 + }
41968 +
41969 + return;
41970 +}
41971 +
41972 +static void
41973 +free_variables(void)
41974 +{
41975 + struct acl_subject_label *s;
41976 + struct acl_role_label *r;
41977 + struct task_struct *task, *task2;
41978 + unsigned int x;
41979 +
41980 + gr_clear_learn_entries();
41981 +
41982 + read_lock(&tasklist_lock);
41983 + do_each_thread(task2, task) {
41984 + task->acl_sp_role = 0;
41985 + task->acl_role_id = 0;
41986 + task->acl = NULL;
41987 + task->role = NULL;
41988 + } while_each_thread(task2, task);
41989 + read_unlock(&tasklist_lock);
41990 +
41991 + /* release the reference to the real root dentry and vfsmount */
41992 + path_put(&real_root);
41993 +
41994 + /* free all object hash tables */
41995 +
41996 + FOR_EACH_ROLE_START(r)
41997 + if (r->subj_hash == NULL)
41998 + goto next_role;
41999 + FOR_EACH_SUBJECT_START(r, s, x)
42000 + if (s->obj_hash == NULL)
42001 + break;
42002 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
42003 + kfree(s->obj_hash);
42004 + else
42005 + vfree(s->obj_hash);
42006 + FOR_EACH_SUBJECT_END(s, x)
42007 + FOR_EACH_NESTED_SUBJECT_START(r, s)
42008 + if (s->obj_hash == NULL)
42009 + break;
42010 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
42011 + kfree(s->obj_hash);
42012 + else
42013 + vfree(s->obj_hash);
42014 + FOR_EACH_NESTED_SUBJECT_END(s)
42015 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
42016 + kfree(r->subj_hash);
42017 + else
42018 + vfree(r->subj_hash);
42019 + r->subj_hash = NULL;
42020 +next_role:
42021 + FOR_EACH_ROLE_END(r)
42022 +
42023 + acl_free_all();
42024 +
42025 + if (acl_role_set.r_hash) {
42026 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
42027 + PAGE_SIZE)
42028 + kfree(acl_role_set.r_hash);
42029 + else
42030 + vfree(acl_role_set.r_hash);
42031 + }
42032 + if (name_set.n_hash) {
42033 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
42034 + PAGE_SIZE)
42035 + kfree(name_set.n_hash);
42036 + else
42037 + vfree(name_set.n_hash);
42038 + }
42039 +
42040 + if (inodev_set.i_hash) {
42041 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
42042 + PAGE_SIZE)
42043 + kfree(inodev_set.i_hash);
42044 + else
42045 + vfree(inodev_set.i_hash);
42046 + }
42047 +
42048 + gr_free_uidset();
42049 +
42050 + memset(&name_set, 0, sizeof (struct name_db));
42051 + memset(&inodev_set, 0, sizeof (struct inodev_db));
42052 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
42053 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
42054 +
42055 + default_role = NULL;
42056 + role_list = NULL;
42057 +
42058 + return;
42059 +}
42060 +
42061 +static __u32
42062 +count_user_objs(struct acl_object_label *userp)
42063 +{
42064 + struct acl_object_label o_tmp;
42065 + __u32 num = 0;
42066 +
42067 + while (userp) {
42068 + if (copy_from_user(&o_tmp, userp,
42069 + sizeof (struct acl_object_label)))
42070 + break;
42071 +
42072 + userp = o_tmp.prev;
42073 + num++;
42074 + }
42075 +
42076 + return num;
42077 +}
42078 +
42079 +static struct acl_subject_label *
42080 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
42081 +
42082 +static int
42083 +copy_user_glob(struct acl_object_label *obj)
42084 +{
42085 + struct acl_object_label *g_tmp, **guser;
42086 + unsigned int len;
42087 + char *tmp;
42088 +
42089 + if (obj->globbed == NULL)
42090 + return 0;
42091 +
42092 + guser = &obj->globbed;
42093 + while (*guser) {
42094 + g_tmp = (struct acl_object_label *)
42095 + acl_alloc(sizeof (struct acl_object_label));
42096 + if (g_tmp == NULL)
42097 + return -ENOMEM;
42098 +
42099 + if (copy_from_user(g_tmp, *guser,
42100 + sizeof (struct acl_object_label)))
42101 + return -EFAULT;
42102 +
42103 + len = strnlen_user(g_tmp->filename, PATH_MAX);
42104 +
42105 + if (!len || len >= PATH_MAX)
42106 + return -EINVAL;
42107 +
42108 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42109 + return -ENOMEM;
42110 +
42111 + if (copy_from_user(tmp, g_tmp->filename, len))
42112 + return -EFAULT;
42113 + tmp[len-1] = '\0';
42114 + g_tmp->filename = tmp;
42115 +
42116 + *guser = g_tmp;
42117 + guser = &(g_tmp->next);
42118 + }
42119 +
42120 + return 0;
42121 +}
42122 +
42123 +static int
42124 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
42125 + struct acl_role_label *role)
42126 +{
42127 + struct acl_object_label *o_tmp;
42128 + unsigned int len;
42129 + int ret;
42130 + char *tmp;
42131 +
42132 + while (userp) {
42133 + if ((o_tmp = (struct acl_object_label *)
42134 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
42135 + return -ENOMEM;
42136 +
42137 + if (copy_from_user(o_tmp, userp,
42138 + sizeof (struct acl_object_label)))
42139 + return -EFAULT;
42140 +
42141 + userp = o_tmp->prev;
42142 +
42143 + len = strnlen_user(o_tmp->filename, PATH_MAX);
42144 +
42145 + if (!len || len >= PATH_MAX)
42146 + return -EINVAL;
42147 +
42148 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42149 + return -ENOMEM;
42150 +
42151 + if (copy_from_user(tmp, o_tmp->filename, len))
42152 + return -EFAULT;
42153 + tmp[len-1] = '\0';
42154 + o_tmp->filename = tmp;
42155 +
42156 + insert_acl_obj_label(o_tmp, subj);
42157 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
42158 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
42159 + return -ENOMEM;
42160 +
42161 + ret = copy_user_glob(o_tmp);
42162 + if (ret)
42163 + return ret;
42164 +
42165 + if (o_tmp->nested) {
42166 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
42167 + if (IS_ERR(o_tmp->nested))
42168 + return PTR_ERR(o_tmp->nested);
42169 +
42170 + /* insert into nested subject list */
42171 + o_tmp->nested->next = role->hash->first;
42172 + role->hash->first = o_tmp->nested;
42173 + }
42174 + }
42175 +
42176 + return 0;
42177 +}
42178 +
42179 +static __u32
42180 +count_user_subjs(struct acl_subject_label *userp)
42181 +{
42182 + struct acl_subject_label s_tmp;
42183 + __u32 num = 0;
42184 +
42185 + while (userp) {
42186 + if (copy_from_user(&s_tmp, userp,
42187 + sizeof (struct acl_subject_label)))
42188 + break;
42189 +
42190 + userp = s_tmp.prev;
42191 + /* do not count nested subjects against this count, since
42192 + they are not included in the hash table, but are
42193 + attached to objects. We have already counted
42194 + the subjects in userspace for the allocation
42195 + stack
42196 + */
42197 + if (!(s_tmp.mode & GR_NESTED))
42198 + num++;
42199 + }
42200 +
42201 + return num;
42202 +}
42203 +
42204 +static int
42205 +copy_user_allowedips(struct acl_role_label *rolep)
42206 +{
42207 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
42208 +
42209 + ruserip = rolep->allowed_ips;
42210 +
42211 + while (ruserip) {
42212 + rlast = rtmp;
42213 +
42214 + if ((rtmp = (struct role_allowed_ip *)
42215 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
42216 + return -ENOMEM;
42217 +
42218 + if (copy_from_user(rtmp, ruserip,
42219 + sizeof (struct role_allowed_ip)))
42220 + return -EFAULT;
42221 +
42222 + ruserip = rtmp->prev;
42223 +
42224 + if (!rlast) {
42225 + rtmp->prev = NULL;
42226 + rolep->allowed_ips = rtmp;
42227 + } else {
42228 + rlast->next = rtmp;
42229 + rtmp->prev = rlast;
42230 + }
42231 +
42232 + if (!ruserip)
42233 + rtmp->next = NULL;
42234 + }
42235 +
42236 + return 0;
42237 +}
42238 +
42239 +static int
42240 +copy_user_transitions(struct acl_role_label *rolep)
42241 +{
42242 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
42243 +
42244 + unsigned int len;
42245 + char *tmp;
42246 +
42247 + rusertp = rolep->transitions;
42248 +
42249 + while (rusertp) {
42250 + rlast = rtmp;
42251 +
42252 + if ((rtmp = (struct role_transition *)
42253 + acl_alloc(sizeof (struct role_transition))) == NULL)
42254 + return -ENOMEM;
42255 +
42256 + if (copy_from_user(rtmp, rusertp,
42257 + sizeof (struct role_transition)))
42258 + return -EFAULT;
42259 +
42260 + rusertp = rtmp->prev;
42261 +
42262 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
42263 +
42264 + if (!len || len >= GR_SPROLE_LEN)
42265 + return -EINVAL;
42266 +
42267 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42268 + return -ENOMEM;
42269 +
42270 + if (copy_from_user(tmp, rtmp->rolename, len))
42271 + return -EFAULT;
42272 + tmp[len-1] = '\0';
42273 + rtmp->rolename = tmp;
42274 +
42275 + if (!rlast) {
42276 + rtmp->prev = NULL;
42277 + rolep->transitions = rtmp;
42278 + } else {
42279 + rlast->next = rtmp;
42280 + rtmp->prev = rlast;
42281 + }
42282 +
42283 + if (!rusertp)
42284 + rtmp->next = NULL;
42285 + }
42286 +
42287 + return 0;
42288 +}
42289 +
42290 +static struct acl_subject_label *
42291 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
42292 +{
42293 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
42294 + unsigned int len;
42295 + char *tmp;
42296 + __u32 num_objs;
42297 + struct acl_ip_label **i_tmp, *i_utmp2;
42298 + struct gr_hash_struct ghash;
42299 + struct subject_map *subjmap;
42300 + unsigned int i_num;
42301 + int err;
42302 +
42303 + s_tmp = lookup_subject_map(userp);
42304 +
42305 + /* we've already copied this subject into the kernel, just return
42306 + the reference to it, and don't copy it over again
42307 + */
42308 + if (s_tmp)
42309 + return(s_tmp);
42310 +
42311 + if ((s_tmp = (struct acl_subject_label *)
42312 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
42313 + return ERR_PTR(-ENOMEM);
42314 +
42315 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
42316 + if (subjmap == NULL)
42317 + return ERR_PTR(-ENOMEM);
42318 +
42319 + subjmap->user = userp;
42320 + subjmap->kernel = s_tmp;
42321 + insert_subj_map_entry(subjmap);
42322 +
42323 + if (copy_from_user(s_tmp, userp,
42324 + sizeof (struct acl_subject_label)))
42325 + return ERR_PTR(-EFAULT);
42326 +
42327 + len = strnlen_user(s_tmp->filename, PATH_MAX);
42328 +
42329 + if (!len || len >= PATH_MAX)
42330 + return ERR_PTR(-EINVAL);
42331 +
42332 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42333 + return ERR_PTR(-ENOMEM);
42334 +
42335 + if (copy_from_user(tmp, s_tmp->filename, len))
42336 + return ERR_PTR(-EFAULT);
42337 + tmp[len-1] = '\0';
42338 + s_tmp->filename = tmp;
42339 +
42340 + if (!strcmp(s_tmp->filename, "/"))
42341 + role->root_label = s_tmp;
42342 +
42343 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
42344 + return ERR_PTR(-EFAULT);
42345 +
42346 + /* copy user and group transition tables */
42347 +
42348 + if (s_tmp->user_trans_num) {
42349 + uid_t *uidlist;
42350 +
42351 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
42352 + if (uidlist == NULL)
42353 + return ERR_PTR(-ENOMEM);
42354 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
42355 + return ERR_PTR(-EFAULT);
42356 +
42357 + s_tmp->user_transitions = uidlist;
42358 + }
42359 +
42360 + if (s_tmp->group_trans_num) {
42361 + gid_t *gidlist;
42362 +
42363 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
42364 + if (gidlist == NULL)
42365 + return ERR_PTR(-ENOMEM);
42366 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
42367 + return ERR_PTR(-EFAULT);
42368 +
42369 + s_tmp->group_transitions = gidlist;
42370 + }
42371 +
42372 + /* set up object hash table */
42373 + num_objs = count_user_objs(ghash.first);
42374 +
42375 + s_tmp->obj_hash_size = num_objs;
42376 + s_tmp->obj_hash =
42377 + (struct acl_object_label **)
42378 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
42379 +
42380 + if (!s_tmp->obj_hash)
42381 + return ERR_PTR(-ENOMEM);
42382 +
42383 + memset(s_tmp->obj_hash, 0,
42384 + s_tmp->obj_hash_size *
42385 + sizeof (struct acl_object_label *));
42386 +
42387 + /* add in objects */
42388 + err = copy_user_objs(ghash.first, s_tmp, role);
42389 +
42390 + if (err)
42391 + return ERR_PTR(err);
42392 +
42393 + /* set pointer for parent subject */
42394 + if (s_tmp->parent_subject) {
42395 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
42396 +
42397 + if (IS_ERR(s_tmp2))
42398 + return s_tmp2;
42399 +
42400 + s_tmp->parent_subject = s_tmp2;
42401 + }
42402 +
42403 + /* add in ip acls */
42404 +
42405 + if (!s_tmp->ip_num) {
42406 + s_tmp->ips = NULL;
42407 + goto insert;
42408 + }
42409 +
42410 + i_tmp =
42411 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
42412 + sizeof (struct acl_ip_label *));
42413 +
42414 + if (!i_tmp)
42415 + return ERR_PTR(-ENOMEM);
42416 +
42417 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
42418 + *(i_tmp + i_num) =
42419 + (struct acl_ip_label *)
42420 + acl_alloc(sizeof (struct acl_ip_label));
42421 + if (!*(i_tmp + i_num))
42422 + return ERR_PTR(-ENOMEM);
42423 +
42424 + if (copy_from_user
42425 + (&i_utmp2, s_tmp->ips + i_num,
42426 + sizeof (struct acl_ip_label *)))
42427 + return ERR_PTR(-EFAULT);
42428 +
42429 + if (copy_from_user
42430 + (*(i_tmp + i_num), i_utmp2,
42431 + sizeof (struct acl_ip_label)))
42432 + return ERR_PTR(-EFAULT);
42433 +
42434 + if ((*(i_tmp + i_num))->iface == NULL)
42435 + continue;
42436 +
42437 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
42438 + if (!len || len >= IFNAMSIZ)
42439 + return ERR_PTR(-EINVAL);
42440 + tmp = acl_alloc(len);
42441 + if (tmp == NULL)
42442 + return ERR_PTR(-ENOMEM);
42443 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
42444 + return ERR_PTR(-EFAULT);
42445 + (*(i_tmp + i_num))->iface = tmp;
42446 + }
42447 +
42448 + s_tmp->ips = i_tmp;
42449 +
42450 +insert:
42451 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
42452 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
42453 + return ERR_PTR(-ENOMEM);
42454 +
42455 + return s_tmp;
42456 +}
42457 +
42458 +static int
42459 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
42460 +{
42461 + struct acl_subject_label s_pre;
42462 + struct acl_subject_label * ret;
42463 + int err;
42464 +
42465 + while (userp) {
42466 + if (copy_from_user(&s_pre, userp,
42467 + sizeof (struct acl_subject_label)))
42468 + return -EFAULT;
42469 +
42470 + /* do not add nested subjects here, add
42471 + while parsing objects
42472 + */
42473 +
42474 + if (s_pre.mode & GR_NESTED) {
42475 + userp = s_pre.prev;
42476 + continue;
42477 + }
42478 +
42479 + ret = do_copy_user_subj(userp, role);
42480 +
42481 + err = PTR_ERR(ret);
42482 + if (IS_ERR(ret))
42483 + return err;
42484 +
42485 + insert_acl_subj_label(ret, role);
42486 +
42487 + userp = s_pre.prev;
42488 + }
42489 +
42490 + return 0;
42491 +}
42492 +
42493 +static int
42494 +copy_user_acl(struct gr_arg *arg)
42495 +{
42496 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
42497 + struct sprole_pw *sptmp;
42498 + struct gr_hash_struct *ghash;
42499 + uid_t *domainlist;
42500 + unsigned int r_num;
42501 + unsigned int len;
42502 + char *tmp;
42503 + int err = 0;
42504 + __u16 i;
42505 + __u32 num_subjs;
42506 +
42507 + /* we need a default and kernel role */
42508 + if (arg->role_db.num_roles < 2)
42509 + return -EINVAL;
42510 +
42511 + /* copy special role authentication info from userspace */
42512 +
42513 + num_sprole_pws = arg->num_sprole_pws;
42514 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
42515 +
42516 + if (!acl_special_roles) {
42517 + err = -ENOMEM;
42518 + goto cleanup;
42519 + }
42520 +
42521 + for (i = 0; i < num_sprole_pws; i++) {
42522 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
42523 + if (!sptmp) {
42524 + err = -ENOMEM;
42525 + goto cleanup;
42526 + }
42527 + if (copy_from_user(sptmp, arg->sprole_pws + i,
42528 + sizeof (struct sprole_pw))) {
42529 + err = -EFAULT;
42530 + goto cleanup;
42531 + }
42532 +
42533 + len =
42534 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
42535 +
42536 + if (!len || len >= GR_SPROLE_LEN) {
42537 + err = -EINVAL;
42538 + goto cleanup;
42539 + }
42540 +
42541 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
42542 + err = -ENOMEM;
42543 + goto cleanup;
42544 + }
42545 +
42546 + if (copy_from_user(tmp, sptmp->rolename, len)) {
42547 + err = -EFAULT;
42548 + goto cleanup;
42549 + }
42550 + tmp[len-1] = '\0';
42551 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
42552 + printk(KERN_ALERT "Copying special role %s\n", tmp);
42553 +#endif
42554 + sptmp->rolename = tmp;
42555 + acl_special_roles[i] = sptmp;
42556 + }
42557 +
42558 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
42559 +
42560 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
42561 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
42562 +
42563 + if (!r_tmp) {
42564 + err = -ENOMEM;
42565 + goto cleanup;
42566 + }
42567 +
42568 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
42569 + sizeof (struct acl_role_label *))) {
42570 + err = -EFAULT;
42571 + goto cleanup;
42572 + }
42573 +
42574 + if (copy_from_user(r_tmp, r_utmp2,
42575 + sizeof (struct acl_role_label))) {
42576 + err = -EFAULT;
42577 + goto cleanup;
42578 + }
42579 +
42580 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
42581 +
42582 + if (!len || len >= PATH_MAX) {
42583 + err = -EINVAL;
42584 + goto cleanup;
42585 + }
42586 +
42587 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
42588 + err = -ENOMEM;
42589 + goto cleanup;
42590 + }
42591 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
42592 + err = -EFAULT;
42593 + goto cleanup;
42594 + }
42595 + tmp[len-1] = '\0';
42596 + r_tmp->rolename = tmp;
42597 +
42598 + if (!strcmp(r_tmp->rolename, "default")
42599 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
42600 + default_role = r_tmp;
42601 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
42602 + kernel_role = r_tmp;
42603 + }
42604 +
42605 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
42606 + err = -ENOMEM;
42607 + goto cleanup;
42608 + }
42609 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
42610 + err = -EFAULT;
42611 + goto cleanup;
42612 + }
42613 +
42614 + r_tmp->hash = ghash;
42615 +
42616 + num_subjs = count_user_subjs(r_tmp->hash->first);
42617 +
42618 + r_tmp->subj_hash_size = num_subjs;
42619 + r_tmp->subj_hash =
42620 + (struct acl_subject_label **)
42621 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
42622 +
42623 + if (!r_tmp->subj_hash) {
42624 + err = -ENOMEM;
42625 + goto cleanup;
42626 + }
42627 +
42628 + err = copy_user_allowedips(r_tmp);
42629 + if (err)
42630 + goto cleanup;
42631 +
42632 + /* copy domain info */
42633 + if (r_tmp->domain_children != NULL) {
42634 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
42635 + if (domainlist == NULL) {
42636 + err = -ENOMEM;
42637 + goto cleanup;
42638 + }
42639 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
42640 + err = -EFAULT;
42641 + goto cleanup;
42642 + }
42643 + r_tmp->domain_children = domainlist;
42644 + }
42645 +
42646 + err = copy_user_transitions(r_tmp);
42647 + if (err)
42648 + goto cleanup;
42649 +
42650 + memset(r_tmp->subj_hash, 0,
42651 + r_tmp->subj_hash_size *
42652 + sizeof (struct acl_subject_label *));
42653 +
42654 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
42655 +
42656 + if (err)
42657 + goto cleanup;
42658 +
42659 + /* set nested subject list to null */
42660 + r_tmp->hash->first = NULL;
42661 +
42662 + insert_acl_role_label(r_tmp);
42663 + }
42664 +
42665 + goto return_err;
42666 + cleanup:
42667 + free_variables();
42668 + return_err:
42669 + return err;
42670 +
42671 +}
42672 +
42673 +static int
42674 +gracl_init(struct gr_arg *args)
42675 +{
42676 + int error = 0;
42677 +
42678 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
42679 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
42680 +
42681 + if (init_variables(args)) {
42682 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
42683 + error = -ENOMEM;
42684 + free_variables();
42685 + goto out;
42686 + }
42687 +
42688 + error = copy_user_acl(args);
42689 + free_init_variables();
42690 + if (error) {
42691 + free_variables();
42692 + goto out;
42693 + }
42694 +
42695 + if ((error = gr_set_acls(0))) {
42696 + free_variables();
42697 + goto out;
42698 + }
42699 +
42700 + pax_open_kernel();
42701 + gr_status |= GR_READY;
42702 + pax_close_kernel();
42703 +
42704 + out:
42705 + return error;
42706 +}
42707 +
42708 +/* derived from glibc fnmatch() 0: match, 1: no match*/
42709 +
42710 +static int
42711 +glob_match(const char *p, const char *n)
42712 +{
42713 + char c;
42714 +
42715 + while ((c = *p++) != '\0') {
42716 + switch (c) {
42717 + case '?':
42718 + if (*n == '\0')
42719 + return 1;
42720 + else if (*n == '/')
42721 + return 1;
42722 + break;
42723 + case '\\':
42724 + if (*n != c)
42725 + return 1;
42726 + break;
42727 + case '*':
42728 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
42729 + if (*n == '/')
42730 + return 1;
42731 + else if (c == '?') {
42732 + if (*n == '\0')
42733 + return 1;
42734 + else
42735 + ++n;
42736 + }
42737 + }
42738 + if (c == '\0') {
42739 + return 0;
42740 + } else {
42741 + const char *endp;
42742 +
42743 + if ((endp = strchr(n, '/')) == NULL)
42744 + endp = n + strlen(n);
42745 +
42746 + if (c == '[') {
42747 + for (--p; n < endp; ++n)
42748 + if (!glob_match(p, n))
42749 + return 0;
42750 + } else if (c == '/') {
42751 + while (*n != '\0' && *n != '/')
42752 + ++n;
42753 + if (*n == '/' && !glob_match(p, n + 1))
42754 + return 0;
42755 + } else {
42756 + for (--p; n < endp; ++n)
42757 + if (*n == c && !glob_match(p, n))
42758 + return 0;
42759 + }
42760 +
42761 + return 1;
42762 + }
42763 + case '[':
42764 + {
42765 + int not;
42766 + char cold;
42767 +
42768 + if (*n == '\0' || *n == '/')
42769 + return 1;
42770 +
42771 + not = (*p == '!' || *p == '^');
42772 + if (not)
42773 + ++p;
42774 +
42775 + c = *p++;
42776 + for (;;) {
42777 + unsigned char fn = (unsigned char)*n;
42778 +
42779 + if (c == '\0')
42780 + return 1;
42781 + else {
42782 + if (c == fn)
42783 + goto matched;
42784 + cold = c;
42785 + c = *p++;
42786 +
42787 + if (c == '-' && *p != ']') {
42788 + unsigned char cend = *p++;
42789 +
42790 + if (cend == '\0')
42791 + return 1;
42792 +
42793 + if (cold <= fn && fn <= cend)
42794 + goto matched;
42795 +
42796 + c = *p++;
42797 + }
42798 + }
42799 +
42800 + if (c == ']')
42801 + break;
42802 + }
42803 + if (!not)
42804 + return 1;
42805 + break;
42806 + matched:
42807 + while (c != ']') {
42808 + if (c == '\0')
42809 + return 1;
42810 +
42811 + c = *p++;
42812 + }
42813 + if (not)
42814 + return 1;
42815 + }
42816 + break;
42817 + default:
42818 + if (c != *n)
42819 + return 1;
42820 + }
42821 +
42822 + ++n;
42823 + }
42824 +
42825 + if (*n == '\0')
42826 + return 0;
42827 +
42828 + if (*n == '/')
42829 + return 0;
42830 +
42831 + return 1;
42832 +}
42833 +
42834 +static struct acl_object_label *
42835 +chk_glob_label(struct acl_object_label *globbed,
42836 + struct dentry *dentry, struct vfsmount *mnt, char **path)
42837 +{
42838 + struct acl_object_label *tmp;
42839 +
42840 + if (*path == NULL)
42841 + *path = gr_to_filename_nolock(dentry, mnt);
42842 +
42843 + tmp = globbed;
42844 +
42845 + while (tmp) {
42846 + if (!glob_match(tmp->filename, *path))
42847 + return tmp;
42848 + tmp = tmp->next;
42849 + }
42850 +
42851 + return NULL;
42852 +}
42853 +
42854 +static struct acl_object_label *
42855 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
42856 + const ino_t curr_ino, const dev_t curr_dev,
42857 + const struct acl_subject_label *subj, char **path, const int checkglob)
42858 +{
42859 + struct acl_subject_label *tmpsubj;
42860 + struct acl_object_label *retval;
42861 + struct acl_object_label *retval2;
42862 +
42863 + tmpsubj = (struct acl_subject_label *) subj;
42864 + read_lock(&gr_inode_lock);
42865 + do {
42866 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
42867 + if (retval) {
42868 + if (checkglob && retval->globbed) {
42869 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
42870 + (struct vfsmount *)orig_mnt, path);
42871 + if (retval2)
42872 + retval = retval2;
42873 + }
42874 + break;
42875 + }
42876 + } while ((tmpsubj = tmpsubj->parent_subject));
42877 + read_unlock(&gr_inode_lock);
42878 +
42879 + return retval;
42880 +}
42881 +
42882 +static __inline__ struct acl_object_label *
42883 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
42884 + struct dentry *curr_dentry,
42885 + const struct acl_subject_label *subj, char **path, const int checkglob)
42886 +{
42887 + int newglob = checkglob;
42888 + ino_t inode;
42889 + dev_t device;
42890 +
42891 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
42892 + as we don't want a / * rule to match instead of the / object
42893 + don't do this for create lookups that call this function though, since they're looking up
42894 + on the parent and thus need globbing checks on all paths
42895 + */
42896 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
42897 + newglob = GR_NO_GLOB;
42898 +
42899 + spin_lock(&curr_dentry->d_lock);
42900 + inode = curr_dentry->d_inode->i_ino;
42901 + device = __get_dev(curr_dentry);
42902 + spin_unlock(&curr_dentry->d_lock);
42903 +
42904 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
42905 +}
42906 +
42907 +static struct acl_object_label *
42908 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
42909 + const struct acl_subject_label *subj, char *path, const int checkglob)
42910 +{
42911 + struct dentry *dentry = (struct dentry *) l_dentry;
42912 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
42913 + struct acl_object_label *retval;
42914 + struct dentry *parent;
42915 +
42916 + write_seqlock(&rename_lock);
42917 + br_read_lock(vfsmount_lock);
42918 +
42919 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
42920 +#ifdef CONFIG_NET
42921 + mnt == sock_mnt ||
42922 +#endif
42923 +#ifdef CONFIG_HUGETLBFS
42924 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
42925 +#endif
42926 + /* ignore Eric Biederman */
42927 + IS_PRIVATE(l_dentry->d_inode))) {
42928 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
42929 + goto out;
42930 + }
42931 +
42932 + for (;;) {
42933 + if (dentry == real_root.dentry && mnt == real_root.mnt)
42934 + break;
42935 +
42936 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
42937 + if (mnt->mnt_parent == mnt)
42938 + break;
42939 +
42940 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
42941 + if (retval != NULL)
42942 + goto out;
42943 +
42944 + dentry = mnt->mnt_mountpoint;
42945 + mnt = mnt->mnt_parent;
42946 + continue;
42947 + }
42948 +
42949 + parent = dentry->d_parent;
42950 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
42951 + if (retval != NULL)
42952 + goto out;
42953 +
42954 + dentry = parent;
42955 + }
42956 +
42957 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
42958 +
42959 + /* real_root is pinned so we don't have to hold a reference */
42960 + if (retval == NULL)
42961 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
42962 +out:
42963 + br_read_unlock(vfsmount_lock);
42964 + write_sequnlock(&rename_lock);
42965 +
42966 + BUG_ON(retval == NULL);
42967 +
42968 + return retval;
42969 +}
42970 +
42971 +static __inline__ struct acl_object_label *
42972 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
42973 + const struct acl_subject_label *subj)
42974 +{
42975 + char *path = NULL;
42976 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
42977 +}
42978 +
42979 +static __inline__ struct acl_object_label *
42980 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
42981 + const struct acl_subject_label *subj)
42982 +{
42983 + char *path = NULL;
42984 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
42985 +}
42986 +
42987 +static __inline__ struct acl_object_label *
42988 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
42989 + const struct acl_subject_label *subj, char *path)
42990 +{
42991 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
42992 +}
42993 +
42994 +static struct acl_subject_label *
42995 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
42996 + const struct acl_role_label *role)
42997 +{
42998 + struct dentry *dentry = (struct dentry *) l_dentry;
42999 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
43000 + struct acl_subject_label *retval;
43001 + struct dentry *parent;
43002 +
43003 + write_seqlock(&rename_lock);
43004 + br_read_lock(vfsmount_lock);
43005 +
43006 + for (;;) {
43007 + if (dentry == real_root.dentry && mnt == real_root.mnt)
43008 + break;
43009 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
43010 + if (mnt->mnt_parent == mnt)
43011 + break;
43012 +
43013 + spin_lock(&dentry->d_lock);
43014 + read_lock(&gr_inode_lock);
43015 + retval =
43016 + lookup_acl_subj_label(dentry->d_inode->i_ino,
43017 + __get_dev(dentry), role);
43018 + read_unlock(&gr_inode_lock);
43019 + spin_unlock(&dentry->d_lock);
43020 + if (retval != NULL)
43021 + goto out;
43022 +
43023 + dentry = mnt->mnt_mountpoint;
43024 + mnt = mnt->mnt_parent;
43025 + continue;
43026 + }
43027 +
43028 + spin_lock(&dentry->d_lock);
43029 + read_lock(&gr_inode_lock);
43030 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
43031 + __get_dev(dentry), role);
43032 + read_unlock(&gr_inode_lock);
43033 + parent = dentry->d_parent;
43034 + spin_unlock(&dentry->d_lock);
43035 +
43036 + if (retval != NULL)
43037 + goto out;
43038 +
43039 + dentry = parent;
43040 + }
43041 +
43042 + spin_lock(&dentry->d_lock);
43043 + read_lock(&gr_inode_lock);
43044 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
43045 + __get_dev(dentry), role);
43046 + read_unlock(&gr_inode_lock);
43047 + spin_unlock(&dentry->d_lock);
43048 +
43049 + if (unlikely(retval == NULL)) {
43050 + /* real_root is pinned, we don't need to hold a reference */
43051 + read_lock(&gr_inode_lock);
43052 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
43053 + __get_dev(real_root.dentry), role);
43054 + read_unlock(&gr_inode_lock);
43055 + }
43056 +out:
43057 + br_read_unlock(vfsmount_lock);
43058 + write_sequnlock(&rename_lock);
43059 +
43060 + BUG_ON(retval == NULL);
43061 +
43062 + return retval;
43063 +}
43064 +
43065 +static void
43066 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
43067 +{
43068 + struct task_struct *task = current;
43069 + const struct cred *cred = current_cred();
43070 +
43071 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
43072 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43073 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43074 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
43075 +
43076 + return;
43077 +}
43078 +
43079 +static void
43080 +gr_log_learn_sysctl(const char *path, const __u32 mode)
43081 +{
43082 + struct task_struct *task = current;
43083 + const struct cred *cred = current_cred();
43084 +
43085 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
43086 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43087 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43088 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
43089 +
43090 + return;
43091 +}
43092 +
43093 +static void
43094 +gr_log_learn_id_change(const char type, const unsigned int real,
43095 + const unsigned int effective, const unsigned int fs)
43096 +{
43097 + struct task_struct *task = current;
43098 + const struct cred *cred = current_cred();
43099 +
43100 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
43101 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43102 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43103 + type, real, effective, fs, &task->signal->saved_ip);
43104 +
43105 + return;
43106 +}
43107 +
43108 +__u32
43109 +gr_check_link(const struct dentry * new_dentry,
43110 + const struct dentry * parent_dentry,
43111 + const struct vfsmount * parent_mnt,
43112 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
43113 +{
43114 + struct acl_object_label *obj;
43115 + __u32 oldmode, newmode;
43116 + __u32 needmode;
43117 +
43118 + if (unlikely(!(gr_status & GR_READY)))
43119 + return (GR_CREATE | GR_LINK);
43120 +
43121 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
43122 + oldmode = obj->mode;
43123 +
43124 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43125 + oldmode |= (GR_CREATE | GR_LINK);
43126 +
43127 + needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
43128 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
43129 + needmode |= GR_SETID | GR_AUDIT_SETID;
43130 +
43131 + newmode =
43132 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
43133 + oldmode | needmode);
43134 +
43135 + needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
43136 + GR_SETID | GR_READ | GR_FIND | GR_DELETE |
43137 + GR_INHERIT | GR_AUDIT_INHERIT);
43138 +
43139 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
43140 + goto bad;
43141 +
43142 + if ((oldmode & needmode) != needmode)
43143 + goto bad;
43144 +
43145 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
43146 + if ((newmode & needmode) != needmode)
43147 + goto bad;
43148 +
43149 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
43150 + return newmode;
43151 +bad:
43152 + needmode = oldmode;
43153 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
43154 + needmode |= GR_SETID;
43155 +
43156 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
43157 + gr_log_learn(old_dentry, old_mnt, needmode);
43158 + return (GR_CREATE | GR_LINK);
43159 + } else if (newmode & GR_SUPPRESS)
43160 + return GR_SUPPRESS;
43161 + else
43162 + return 0;
43163 +}
43164 +
43165 +__u32
43166 +gr_search_file(const struct dentry * dentry, const __u32 mode,
43167 + const struct vfsmount * mnt)
43168 +{
43169 + __u32 retval = mode;
43170 + struct acl_subject_label *curracl;
43171 + struct acl_object_label *currobj;
43172 +
43173 + if (unlikely(!(gr_status & GR_READY)))
43174 + return (mode & ~GR_AUDITS);
43175 +
43176 + curracl = current->acl;
43177 +
43178 + currobj = chk_obj_label(dentry, mnt, curracl);
43179 + retval = currobj->mode & mode;
43180 +
43181 + /* if we're opening a specified transfer file for writing
43182 + (e.g. /dev/initctl), then transfer our role to init
43183 + */
43184 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
43185 + current->role->roletype & GR_ROLE_PERSIST)) {
43186 + struct task_struct *task = init_pid_ns.child_reaper;
43187 +
43188 + if (task->role != current->role) {
43189 + task->acl_sp_role = 0;
43190 + task->acl_role_id = current->acl_role_id;
43191 + task->role = current->role;
43192 + rcu_read_lock();
43193 + read_lock(&grsec_exec_file_lock);
43194 + gr_apply_subject_to_task(task);
43195 + read_unlock(&grsec_exec_file_lock);
43196 + rcu_read_unlock();
43197 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
43198 + }
43199 + }
43200 +
43201 + if (unlikely
43202 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
43203 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
43204 + __u32 new_mode = mode;
43205 +
43206 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43207 +
43208 + retval = new_mode;
43209 +
43210 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
43211 + new_mode |= GR_INHERIT;
43212 +
43213 + if (!(mode & GR_NOLEARN))
43214 + gr_log_learn(dentry, mnt, new_mode);
43215 + }
43216 +
43217 + return retval;
43218 +}
43219 +
43220 +__u32
43221 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
43222 + const struct vfsmount * mnt, const __u32 mode)
43223 +{
43224 + struct name_entry *match;
43225 + struct acl_object_label *matchpo;
43226 + struct acl_subject_label *curracl;
43227 + char *path;
43228 + __u32 retval;
43229 +
43230 + if (unlikely(!(gr_status & GR_READY)))
43231 + return (mode & ~GR_AUDITS);
43232 +
43233 + preempt_disable();
43234 + path = gr_to_filename_rbac(new_dentry, mnt);
43235 + match = lookup_name_entry_create(path);
43236 +
43237 + if (!match)
43238 + goto check_parent;
43239 +
43240 + curracl = current->acl;
43241 +
43242 + read_lock(&gr_inode_lock);
43243 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
43244 + read_unlock(&gr_inode_lock);
43245 +
43246 + if (matchpo) {
43247 + if ((matchpo->mode & mode) !=
43248 + (mode & ~(GR_AUDITS | GR_SUPPRESS))
43249 + && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
43250 + __u32 new_mode = mode;
43251 +
43252 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43253 +
43254 + gr_log_learn(new_dentry, mnt, new_mode);
43255 +
43256 + preempt_enable();
43257 + return new_mode;
43258 + }
43259 + preempt_enable();
43260 + return (matchpo->mode & mode);
43261 + }
43262 +
43263 + check_parent:
43264 + curracl = current->acl;
43265 +
43266 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
43267 + retval = matchpo->mode & mode;
43268 +
43269 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
43270 + && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
43271 + __u32 new_mode = mode;
43272 +
43273 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43274 +
43275 + gr_log_learn(new_dentry, mnt, new_mode);
43276 + preempt_enable();
43277 + return new_mode;
43278 + }
43279 +
43280 + preempt_enable();
43281 + return retval;
43282 +}
43283 +
43284 +int
43285 +gr_check_hidden_task(const struct task_struct *task)
43286 +{
43287 + if (unlikely(!(gr_status & GR_READY)))
43288 + return 0;
43289 +
43290 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
43291 + return 1;
43292 +
43293 + return 0;
43294 +}
43295 +
43296 +int
43297 +gr_check_protected_task(const struct task_struct *task)
43298 +{
43299 + if (unlikely(!(gr_status & GR_READY) || !task))
43300 + return 0;
43301 +
43302 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
43303 + task->acl != current->acl)
43304 + return 1;
43305 +
43306 + return 0;
43307 +}
43308 +
43309 +int
43310 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
43311 +{
43312 + struct task_struct *p;
43313 + int ret = 0;
43314 +
43315 + if (unlikely(!(gr_status & GR_READY) || !pid))
43316 + return ret;
43317 +
43318 + read_lock(&tasklist_lock);
43319 + do_each_pid_task(pid, type, p) {
43320 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
43321 + p->acl != current->acl) {
43322 + ret = 1;
43323 + goto out;
43324 + }
43325 + } while_each_pid_task(pid, type, p);
43326 +out:
43327 + read_unlock(&tasklist_lock);
43328 +
43329 + return ret;
43330 +}
43331 +
43332 +void
43333 +gr_copy_label(struct task_struct *tsk)
43334 +{
43335 + tsk->signal->used_accept = 0;
43336 + tsk->acl_sp_role = 0;
43337 + tsk->acl_role_id = current->acl_role_id;
43338 + tsk->acl = current->acl;
43339 + tsk->role = current->role;
43340 + tsk->signal->curr_ip = current->signal->curr_ip;
43341 + tsk->signal->saved_ip = current->signal->saved_ip;
43342 + if (current->exec_file)
43343 + get_file(current->exec_file);
43344 + tsk->exec_file = current->exec_file;
43345 + tsk->is_writable = current->is_writable;
43346 + if (unlikely(current->signal->used_accept)) {
43347 + current->signal->curr_ip = 0;
43348 + current->signal->saved_ip = 0;
43349 + }
43350 +
43351 + return;
43352 +}
43353 +
43354 +static void
43355 +gr_set_proc_res(struct task_struct *task)
43356 +{
43357 + struct acl_subject_label *proc;
43358 + unsigned short i;
43359 +
43360 + proc = task->acl;
43361 +
43362 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
43363 + return;
43364 +
43365 + for (i = 0; i < RLIM_NLIMITS; i++) {
43366 + if (!(proc->resmask & (1 << i)))
43367 + continue;
43368 +
43369 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
43370 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
43371 + }
43372 +
43373 + return;
43374 +}
43375 +
43376 +extern int __gr_process_user_ban(struct user_struct *user);
43377 +
43378 +int
43379 +gr_check_user_change(int real, int effective, int fs)
43380 +{
43381 + unsigned int i;
43382 + __u16 num;
43383 + uid_t *uidlist;
43384 + int curuid;
43385 + int realok = 0;
43386 + int effectiveok = 0;
43387 + int fsok = 0;
43388 +
43389 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
43390 + struct user_struct *user;
43391 +
43392 + if (real == -1)
43393 + goto skipit;
43394 +
43395 + user = find_user(real);
43396 + if (user == NULL)
43397 + goto skipit;
43398 +
43399 + if (__gr_process_user_ban(user)) {
43400 + /* for find_user */
43401 + free_uid(user);
43402 + return 1;
43403 + }
43404 +
43405 + /* for find_user */
43406 + free_uid(user);
43407 +
43408 +skipit:
43409 +#endif
43410 +
43411 + if (unlikely(!(gr_status & GR_READY)))
43412 + return 0;
43413 +
43414 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43415 + gr_log_learn_id_change('u', real, effective, fs);
43416 +
43417 + num = current->acl->user_trans_num;
43418 + uidlist = current->acl->user_transitions;
43419 +
43420 + if (uidlist == NULL)
43421 + return 0;
43422 +
43423 + if (real == -1)
43424 + realok = 1;
43425 + if (effective == -1)
43426 + effectiveok = 1;
43427 + if (fs == -1)
43428 + fsok = 1;
43429 +
43430 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
43431 + for (i = 0; i < num; i++) {
43432 + curuid = (int)uidlist[i];
43433 + if (real == curuid)
43434 + realok = 1;
43435 + if (effective == curuid)
43436 + effectiveok = 1;
43437 + if (fs == curuid)
43438 + fsok = 1;
43439 + }
43440 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
43441 + for (i = 0; i < num; i++) {
43442 + curuid = (int)uidlist[i];
43443 + if (real == curuid)
43444 + break;
43445 + if (effective == curuid)
43446 + break;
43447 + if (fs == curuid)
43448 + break;
43449 + }
43450 + /* not in deny list */
43451 + if (i == num) {
43452 + realok = 1;
43453 + effectiveok = 1;
43454 + fsok = 1;
43455 + }
43456 + }
43457 +
43458 + if (realok && effectiveok && fsok)
43459 + return 0;
43460 + else {
43461 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
43462 + return 1;
43463 + }
43464 +}
43465 +
43466 +int
43467 +gr_check_group_change(int real, int effective, int fs)
43468 +{
43469 + unsigned int i;
43470 + __u16 num;
43471 + gid_t *gidlist;
43472 + int curgid;
43473 + int realok = 0;
43474 + int effectiveok = 0;
43475 + int fsok = 0;
43476 +
43477 + if (unlikely(!(gr_status & GR_READY)))
43478 + return 0;
43479 +
43480 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43481 + gr_log_learn_id_change('g', real, effective, fs);
43482 +
43483 + num = current->acl->group_trans_num;
43484 + gidlist = current->acl->group_transitions;
43485 +
43486 + if (gidlist == NULL)
43487 + return 0;
43488 +
43489 + if (real == -1)
43490 + realok = 1;
43491 + if (effective == -1)
43492 + effectiveok = 1;
43493 + if (fs == -1)
43494 + fsok = 1;
43495 +
43496 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
43497 + for (i = 0; i < num; i++) {
43498 + curgid = (int)gidlist[i];
43499 + if (real == curgid)
43500 + realok = 1;
43501 + if (effective == curgid)
43502 + effectiveok = 1;
43503 + if (fs == curgid)
43504 + fsok = 1;
43505 + }
43506 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
43507 + for (i = 0; i < num; i++) {
43508 + curgid = (int)gidlist[i];
43509 + if (real == curgid)
43510 + break;
43511 + if (effective == curgid)
43512 + break;
43513 + if (fs == curgid)
43514 + break;
43515 + }
43516 + /* not in deny list */
43517 + if (i == num) {
43518 + realok = 1;
43519 + effectiveok = 1;
43520 + fsok = 1;
43521 + }
43522 + }
43523 +
43524 + if (realok && effectiveok && fsok)
43525 + return 0;
43526 + else {
43527 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
43528 + return 1;
43529 + }
43530 +}
43531 +
43532 +void
43533 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
43534 +{
43535 + struct acl_role_label *role = task->role;
43536 + struct acl_subject_label *subj = NULL;
43537 + struct acl_object_label *obj;
43538 + struct file *filp;
43539 +
43540 + if (unlikely(!(gr_status & GR_READY)))
43541 + return;
43542 +
43543 + filp = task->exec_file;
43544 +
43545 + /* kernel process, we'll give them the kernel role */
43546 + if (unlikely(!filp)) {
43547 + task->role = kernel_role;
43548 + task->acl = kernel_role->root_label;
43549 + return;
43550 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
43551 + role = lookup_acl_role_label(task, uid, gid);
43552 +
43553 + /* perform subject lookup in possibly new role
43554 + we can use this result below in the case where role == task->role
43555 + */
43556 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
43557 +
43558 + /* if we changed uid/gid, but result in the same role
43559 + and are using inheritance, don't lose the inherited subject
43560 + if current subject is other than what normal lookup
43561 + would result in, we arrived via inheritance, don't
43562 + lose subject
43563 + */
43564 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
43565 + (subj == task->acl)))
43566 + task->acl = subj;
43567 +
43568 + task->role = role;
43569 +
43570 + task->is_writable = 0;
43571 +
43572 + /* ignore additional mmap checks for processes that are writable
43573 + by the default ACL */
43574 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
43575 + if (unlikely(obj->mode & GR_WRITE))
43576 + task->is_writable = 1;
43577 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
43578 + if (unlikely(obj->mode & GR_WRITE))
43579 + task->is_writable = 1;
43580 +
43581 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
43582 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
43583 +#endif
43584 +
43585 + gr_set_proc_res(task);
43586 +
43587 + return;
43588 +}
43589 +
43590 +int
43591 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
43592 + const int unsafe_share)
43593 +{
43594 + struct task_struct *task = current;
43595 + struct acl_subject_label *newacl;
43596 + struct acl_object_label *obj;
43597 + __u32 retmode;
43598 +
43599 + if (unlikely(!(gr_status & GR_READY)))
43600 + return 0;
43601 +
43602 + newacl = chk_subj_label(dentry, mnt, task->role);
43603 +
43604 + task_lock(task);
43605 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
43606 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
43607 + !(task->role->roletype & GR_ROLE_GOD) &&
43608 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
43609 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
43610 + task_unlock(task);
43611 + if (unsafe_share)
43612 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
43613 + else
43614 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
43615 + return -EACCES;
43616 + }
43617 + task_unlock(task);
43618 +
43619 + obj = chk_obj_label(dentry, mnt, task->acl);
43620 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
43621 +
43622 + if (!(task->acl->mode & GR_INHERITLEARN) &&
43623 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
43624 + if (obj->nested)
43625 + task->acl = obj->nested;
43626 + else
43627 + task->acl = newacl;
43628 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
43629 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
43630 +
43631 + task->is_writable = 0;
43632 +
43633 + /* ignore additional mmap checks for processes that are writable
43634 + by the default ACL */
43635 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
43636 + if (unlikely(obj->mode & GR_WRITE))
43637 + task->is_writable = 1;
43638 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
43639 + if (unlikely(obj->mode & GR_WRITE))
43640 + task->is_writable = 1;
43641 +
43642 + gr_set_proc_res(task);
43643 +
43644 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
43645 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
43646 +#endif
43647 + return 0;
43648 +}
43649 +
43650 +/* always called with valid inodev ptr */
43651 +static void
43652 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
43653 +{
43654 + struct acl_object_label *matchpo;
43655 + struct acl_subject_label *matchps;
43656 + struct acl_subject_label *subj;
43657 + struct acl_role_label *role;
43658 + unsigned int x;
43659 +
43660 + FOR_EACH_ROLE_START(role)
43661 + FOR_EACH_SUBJECT_START(role, subj, x)
43662 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
43663 + matchpo->mode |= GR_DELETED;
43664 + FOR_EACH_SUBJECT_END(subj,x)
43665 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
43666 + if (subj->inode == ino && subj->device == dev)
43667 + subj->mode |= GR_DELETED;
43668 + FOR_EACH_NESTED_SUBJECT_END(subj)
43669 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
43670 + matchps->mode |= GR_DELETED;
43671 + FOR_EACH_ROLE_END(role)
43672 +
43673 + inodev->nentry->deleted = 1;
43674 +
43675 + return;
43676 +}
43677 +
43678 +void
43679 +gr_handle_delete(const ino_t ino, const dev_t dev)
43680 +{
43681 + struct inodev_entry *inodev;
43682 +
43683 + if (unlikely(!(gr_status & GR_READY)))
43684 + return;
43685 +
43686 + write_lock(&gr_inode_lock);
43687 + inodev = lookup_inodev_entry(ino, dev);
43688 + if (inodev != NULL)
43689 + do_handle_delete(inodev, ino, dev);
43690 + write_unlock(&gr_inode_lock);
43691 +
43692 + return;
43693 +}
43694 +
43695 +static void
43696 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
43697 + const ino_t newinode, const dev_t newdevice,
43698 + struct acl_subject_label *subj)
43699 +{
43700 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
43701 + struct acl_object_label *match;
43702 +
43703 + match = subj->obj_hash[index];
43704 +
43705 + while (match && (match->inode != oldinode ||
43706 + match->device != olddevice ||
43707 + !(match->mode & GR_DELETED)))
43708 + match = match->next;
43709 +
43710 + if (match && (match->inode == oldinode)
43711 + && (match->device == olddevice)
43712 + && (match->mode & GR_DELETED)) {
43713 + if (match->prev == NULL) {
43714 + subj->obj_hash[index] = match->next;
43715 + if (match->next != NULL)
43716 + match->next->prev = NULL;
43717 + } else {
43718 + match->prev->next = match->next;
43719 + if (match->next != NULL)
43720 + match->next->prev = match->prev;
43721 + }
43722 + match->prev = NULL;
43723 + match->next = NULL;
43724 + match->inode = newinode;
43725 + match->device = newdevice;
43726 + match->mode &= ~GR_DELETED;
43727 +
43728 + insert_acl_obj_label(match, subj);
43729 + }
43730 +
43731 + return;
43732 +}
43733 +
43734 +static void
43735 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
43736 + const ino_t newinode, const dev_t newdevice,
43737 + struct acl_role_label *role)
43738 +{
43739 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
43740 + struct acl_subject_label *match;
43741 +
43742 + match = role->subj_hash[index];
43743 +
43744 + while (match && (match->inode != oldinode ||
43745 + match->device != olddevice ||
43746 + !(match->mode & GR_DELETED)))
43747 + match = match->next;
43748 +
43749 + if (match && (match->inode == oldinode)
43750 + && (match->device == olddevice)
43751 + && (match->mode & GR_DELETED)) {
43752 + if (match->prev == NULL) {
43753 + role->subj_hash[index] = match->next;
43754 + if (match->next != NULL)
43755 + match->next->prev = NULL;
43756 + } else {
43757 + match->prev->next = match->next;
43758 + if (match->next != NULL)
43759 + match->next->prev = match->prev;
43760 + }
43761 + match->prev = NULL;
43762 + match->next = NULL;
43763 + match->inode = newinode;
43764 + match->device = newdevice;
43765 + match->mode &= ~GR_DELETED;
43766 +
43767 + insert_acl_subj_label(match, role);
43768 + }
43769 +
43770 + return;
43771 +}
43772 +
43773 +static void
43774 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
43775 + const ino_t newinode, const dev_t newdevice)
43776 +{
43777 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
43778 + struct inodev_entry *match;
43779 +
43780 + match = inodev_set.i_hash[index];
43781 +
43782 + while (match && (match->nentry->inode != oldinode ||
43783 + match->nentry->device != olddevice || !match->nentry->deleted))
43784 + match = match->next;
43785 +
43786 + if (match && (match->nentry->inode == oldinode)
43787 + && (match->nentry->device == olddevice) &&
43788 + match->nentry->deleted) {
43789 + if (match->prev == NULL) {
43790 + inodev_set.i_hash[index] = match->next;
43791 + if (match->next != NULL)
43792 + match->next->prev = NULL;
43793 + } else {
43794 + match->prev->next = match->next;
43795 + if (match->next != NULL)
43796 + match->next->prev = match->prev;
43797 + }
43798 + match->prev = NULL;
43799 + match->next = NULL;
43800 + match->nentry->inode = newinode;
43801 + match->nentry->device = newdevice;
43802 + match->nentry->deleted = 0;
43803 +
43804 + insert_inodev_entry(match);
43805 + }
43806 +
43807 + return;
43808 +}
43809 +
43810 +static void
43811 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
43812 + const struct vfsmount *mnt)
43813 +{
43814 + struct acl_subject_label *subj;
43815 + struct acl_role_label *role;
43816 + unsigned int x;
43817 + ino_t ino = dentry->d_inode->i_ino;
43818 + dev_t dev = __get_dev(dentry);
43819 +
43820 + FOR_EACH_ROLE_START(role)
43821 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
43822 +
43823 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
43824 + if ((subj->inode == ino) && (subj->device == dev)) {
43825 + subj->inode = ino;
43826 + subj->device = dev;
43827 + }
43828 + FOR_EACH_NESTED_SUBJECT_END(subj)
43829 + FOR_EACH_SUBJECT_START(role, subj, x)
43830 + update_acl_obj_label(matchn->inode, matchn->device,
43831 + ino, dev, subj);
43832 + FOR_EACH_SUBJECT_END(subj,x)
43833 + FOR_EACH_ROLE_END(role)
43834 +
43835 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
43836 +
43837 + return;
43838 +}
43839 +
43840 +void
43841 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
43842 +{
43843 + struct name_entry *matchn;
43844 +
43845 + if (unlikely(!(gr_status & GR_READY)))
43846 + return;
43847 +
43848 + preempt_disable();
43849 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
43850 +
43851 + if (unlikely((unsigned long)matchn)) {
43852 + write_lock(&gr_inode_lock);
43853 + do_handle_create(matchn, dentry, mnt);
43854 + write_unlock(&gr_inode_lock);
43855 + }
43856 + preempt_enable();
43857 +
43858 + return;
43859 +}
43860 +
43861 +void
43862 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
43863 + struct dentry *old_dentry,
43864 + struct dentry *new_dentry,
43865 + struct vfsmount *mnt, const __u8 replace)
43866 +{
43867 + struct name_entry *matchn;
43868 + struct inodev_entry *inodev;
43869 + ino_t old_ino = old_dentry->d_inode->i_ino;
43870 + dev_t old_dev = __get_dev(old_dentry);
43871 +
43872 + /* vfs_rename swaps the name and parent link for old_dentry and
43873 + new_dentry
43874 + at this point, old_dentry has the new name, parent link, and inode
43875 + for the renamed file
43876 + if a file is being replaced by a rename, new_dentry has the inode
43877 + and name for the replaced file
43878 + */
43879 +
43880 + if (unlikely(!(gr_status & GR_READY)))
43881 + return;
43882 +
43883 + preempt_disable();
43884 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
43885 +
43886 + /* we wouldn't have to check d_inode if it weren't for
43887 + NFS silly-renaming
43888 + */
43889 +
43890 + write_lock(&gr_inode_lock);
43891 + if (unlikely(replace && new_dentry->d_inode)) {
43892 + ino_t new_ino = new_dentry->d_inode->i_ino;
43893 + dev_t new_dev = __get_dev(new_dentry);
43894 +
43895 + inodev = lookup_inodev_entry(new_ino, new_dev);
43896 + if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
43897 + do_handle_delete(inodev, new_ino, new_dev);
43898 + }
43899 +
43900 + inodev = lookup_inodev_entry(old_ino, old_dev);
43901 + if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
43902 + do_handle_delete(inodev, old_ino, old_dev);
43903 +
43904 + if (unlikely((unsigned long)matchn))
43905 + do_handle_create(matchn, old_dentry, mnt);
43906 +
43907 + write_unlock(&gr_inode_lock);
43908 + preempt_enable();
43909 +
43910 + return;
43911 +}
43912 +
43913 +static int
43914 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
43915 + unsigned char **sum)
43916 +{
43917 + struct acl_role_label *r;
43918 + struct role_allowed_ip *ipp;
43919 + struct role_transition *trans;
43920 + unsigned int i;
43921 + int found = 0;
43922 + u32 curr_ip = current->signal->curr_ip;
43923 +
43924 + current->signal->saved_ip = curr_ip;
43925 +
43926 + /* check transition table */
43927 +
43928 + for (trans = current->role->transitions; trans; trans = trans->next) {
43929 + if (!strcmp(rolename, trans->rolename)) {
43930 + found = 1;
43931 + break;
43932 + }
43933 + }
43934 +
43935 + if (!found)
43936 + return 0;
43937 +
43938 + /* handle special roles that do not require authentication
43939 + and check ip */
43940 +
43941 + FOR_EACH_ROLE_START(r)
43942 + if (!strcmp(rolename, r->rolename) &&
43943 + (r->roletype & GR_ROLE_SPECIAL)) {
43944 + found = 0;
43945 + if (r->allowed_ips != NULL) {
43946 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
43947 + if ((ntohl(curr_ip) & ipp->netmask) ==
43948 + (ntohl(ipp->addr) & ipp->netmask))
43949 + found = 1;
43950 + }
43951 + } else
43952 + found = 2;
43953 + if (!found)
43954 + return 0;
43955 +
43956 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
43957 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
43958 + *salt = NULL;
43959 + *sum = NULL;
43960 + return 1;
43961 + }
43962 + }
43963 + FOR_EACH_ROLE_END(r)
43964 +
43965 + for (i = 0; i < num_sprole_pws; i++) {
43966 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
43967 + *salt = acl_special_roles[i]->salt;
43968 + *sum = acl_special_roles[i]->sum;
43969 + return 1;
43970 + }
43971 + }
43972 +
43973 + return 0;
43974 +}
43975 +
43976 +static void
43977 +assign_special_role(char *rolename)
43978 +{
43979 + struct acl_object_label *obj;
43980 + struct acl_role_label *r;
43981 + struct acl_role_label *assigned = NULL;
43982 + struct task_struct *tsk;
43983 + struct file *filp;
43984 +
43985 + FOR_EACH_ROLE_START(r)
43986 + if (!strcmp(rolename, r->rolename) &&
43987 + (r->roletype & GR_ROLE_SPECIAL)) {
43988 + assigned = r;
43989 + break;
43990 + }
43991 + FOR_EACH_ROLE_END(r)
43992 +
43993 + if (!assigned)
43994 + return;
43995 +
43996 + read_lock(&tasklist_lock);
43997 + read_lock(&grsec_exec_file_lock);
43998 +
43999 + tsk = current->real_parent;
44000 + if (tsk == NULL)
44001 + goto out_unlock;
44002 +
44003 + filp = tsk->exec_file;
44004 + if (filp == NULL)
44005 + goto out_unlock;
44006 +
44007 + tsk->is_writable = 0;
44008 +
44009 + tsk->acl_sp_role = 1;
44010 + tsk->acl_role_id = ++acl_sp_role_value;
44011 + tsk->role = assigned;
44012 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
44013 +
44014 + /* ignore additional mmap checks for processes that are writable
44015 + by the default ACL */
44016 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
44017 + if (unlikely(obj->mode & GR_WRITE))
44018 + tsk->is_writable = 1;
44019 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
44020 + if (unlikely(obj->mode & GR_WRITE))
44021 + tsk->is_writable = 1;
44022 +
44023 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44024 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
44025 +#endif
44026 +
44027 +out_unlock:
44028 + read_unlock(&grsec_exec_file_lock);
44029 + read_unlock(&tasklist_lock);
44030 + return;
44031 +}
44032 +
44033 +int gr_check_secure_terminal(struct task_struct *task)
44034 +{
44035 + struct task_struct *p, *p2, *p3;
44036 + struct files_struct *files;
44037 + struct fdtable *fdt;
44038 + struct file *our_file = NULL, *file;
44039 + int i;
44040 +
44041 + if (task->signal->tty == NULL)
44042 + return 1;
44043 +
44044 + files = get_files_struct(task);
44045 + if (files != NULL) {
44046 + rcu_read_lock();
44047 + fdt = files_fdtable(files);
44048 + for (i=0; i < fdt->max_fds; i++) {
44049 + file = fcheck_files(files, i);
44050 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
44051 + get_file(file);
44052 + our_file = file;
44053 + }
44054 + }
44055 + rcu_read_unlock();
44056 + put_files_struct(files);
44057 + }
44058 +
44059 + if (our_file == NULL)
44060 + return 1;
44061 +
44062 + read_lock(&tasklist_lock);
44063 + do_each_thread(p2, p) {
44064 + files = get_files_struct(p);
44065 + if (files == NULL ||
44066 + (p->signal && p->signal->tty == task->signal->tty)) {
44067 + if (files != NULL)
44068 + put_files_struct(files);
44069 + continue;
44070 + }
44071 + rcu_read_lock();
44072 + fdt = files_fdtable(files);
44073 + for (i=0; i < fdt->max_fds; i++) {
44074 + file = fcheck_files(files, i);
44075 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
44076 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
44077 + p3 = task;
44078 + while (p3->pid > 0) {
44079 + if (p3 == p)
44080 + break;
44081 + p3 = p3->real_parent;
44082 + }
44083 + if (p3 == p)
44084 + break;
44085 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
44086 + gr_handle_alertkill(p);
44087 + rcu_read_unlock();
44088 + put_files_struct(files);
44089 + read_unlock(&tasklist_lock);
44090 + fput(our_file);
44091 + return 0;
44092 + }
44093 + }
44094 + rcu_read_unlock();
44095 + put_files_struct(files);
44096 + } while_each_thread(p2, p);
44097 + read_unlock(&tasklist_lock);
44098 +
44099 + fput(our_file);
44100 + return 1;
44101 +}
44102 +
44103 +ssize_t
44104 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
44105 +{
44106 + struct gr_arg_wrapper uwrap;
44107 + unsigned char *sprole_salt = NULL;
44108 + unsigned char *sprole_sum = NULL;
44109 + int error = sizeof (struct gr_arg_wrapper);
44110 + int error2 = 0;
44111 +
44112 + mutex_lock(&gr_dev_mutex);
44113 +
44114 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
44115 + error = -EPERM;
44116 + goto out;
44117 + }
44118 +
44119 + if (count != sizeof (struct gr_arg_wrapper)) {
44120 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
44121 + error = -EINVAL;
44122 + goto out;
44123 + }
44124 +
44125 +
44126 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
44127 + gr_auth_expires = 0;
44128 + gr_auth_attempts = 0;
44129 + }
44130 +
44131 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
44132 + error = -EFAULT;
44133 + goto out;
44134 + }
44135 +
44136 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
44137 + error = -EINVAL;
44138 + goto out;
44139 + }
44140 +
44141 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
44142 + error = -EFAULT;
44143 + goto out;
44144 + }
44145 +
44146 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
44147 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
44148 + time_after(gr_auth_expires, get_seconds())) {
44149 + error = -EBUSY;
44150 + goto out;
44151 + }
44152 +
44153 + /* if non-root trying to do anything other than use a special role,
44154 + do not attempt authentication, do not count towards authentication
44155 + locking
44156 + */
44157 +
44158 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
44159 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
44160 + current_uid()) {
44161 + error = -EPERM;
44162 + goto out;
44163 + }
44164 +
44165 + /* ensure pw and special role name are null terminated */
44166 +
44167 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
44168 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
44169 +
44170 + /* Okay.
44171 + * We have our enough of the argument structure..(we have yet
44172 + * to copy_from_user the tables themselves) . Copy the tables
44173 + * only if we need them, i.e. for loading operations. */
44174 +
44175 + switch (gr_usermode->mode) {
44176 + case GR_STATUS:
44177 + if (gr_status & GR_READY) {
44178 + error = 1;
44179 + if (!gr_check_secure_terminal(current))
44180 + error = 3;
44181 + } else
44182 + error = 2;
44183 + goto out;
44184 + case GR_SHUTDOWN:
44185 + if ((gr_status & GR_READY)
44186 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44187 + pax_open_kernel();
44188 + gr_status &= ~GR_READY;
44189 + pax_close_kernel();
44190 +
44191 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
44192 + free_variables();
44193 + memset(gr_usermode, 0, sizeof (struct gr_arg));
44194 + memset(gr_system_salt, 0, GR_SALT_LEN);
44195 + memset(gr_system_sum, 0, GR_SHA_LEN);
44196 + } else if (gr_status & GR_READY) {
44197 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
44198 + error = -EPERM;
44199 + } else {
44200 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
44201 + error = -EAGAIN;
44202 + }
44203 + break;
44204 + case GR_ENABLE:
44205 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
44206 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
44207 + else {
44208 + if (gr_status & GR_READY)
44209 + error = -EAGAIN;
44210 + else
44211 + error = error2;
44212 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
44213 + }
44214 + break;
44215 + case GR_RELOAD:
44216 + if (!(gr_status & GR_READY)) {
44217 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
44218 + error = -EAGAIN;
44219 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44220 + preempt_disable();
44221 +
44222 + pax_open_kernel();
44223 + gr_status &= ~GR_READY;
44224 + pax_close_kernel();
44225 +
44226 + free_variables();
44227 + if (!(error2 = gracl_init(gr_usermode))) {
44228 + preempt_enable();
44229 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
44230 + } else {
44231 + preempt_enable();
44232 + error = error2;
44233 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
44234 + }
44235 + } else {
44236 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
44237 + error = -EPERM;
44238 + }
44239 + break;
44240 + case GR_SEGVMOD:
44241 + if (unlikely(!(gr_status & GR_READY))) {
44242 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
44243 + error = -EAGAIN;
44244 + break;
44245 + }
44246 +
44247 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44248 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
44249 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
44250 + struct acl_subject_label *segvacl;
44251 + segvacl =
44252 + lookup_acl_subj_label(gr_usermode->segv_inode,
44253 + gr_usermode->segv_device,
44254 + current->role);
44255 + if (segvacl) {
44256 + segvacl->crashes = 0;
44257 + segvacl->expires = 0;
44258 + }
44259 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
44260 + gr_remove_uid(gr_usermode->segv_uid);
44261 + }
44262 + } else {
44263 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
44264 + error = -EPERM;
44265 + }
44266 + break;
44267 + case GR_SPROLE:
44268 + case GR_SPROLEPAM:
44269 + if (unlikely(!(gr_status & GR_READY))) {
44270 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
44271 + error = -EAGAIN;
44272 + break;
44273 + }
44274 +
44275 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
44276 + current->role->expires = 0;
44277 + current->role->auth_attempts = 0;
44278 + }
44279 +
44280 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
44281 + time_after(current->role->expires, get_seconds())) {
44282 + error = -EBUSY;
44283 + goto out;
44284 + }
44285 +
44286 + if (lookup_special_role_auth
44287 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
44288 + && ((!sprole_salt && !sprole_sum)
44289 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
44290 + char *p = "";
44291 + assign_special_role(gr_usermode->sp_role);
44292 + read_lock(&tasklist_lock);
44293 + if (current->real_parent)
44294 + p = current->real_parent->role->rolename;
44295 + read_unlock(&tasklist_lock);
44296 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
44297 + p, acl_sp_role_value);
44298 + } else {
44299 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
44300 + error = -EPERM;
44301 + if(!(current->role->auth_attempts++))
44302 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
44303 +
44304 + goto out;
44305 + }
44306 + break;
44307 + case GR_UNSPROLE:
44308 + if (unlikely(!(gr_status & GR_READY))) {
44309 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
44310 + error = -EAGAIN;
44311 + break;
44312 + }
44313 +
44314 + if (current->role->roletype & GR_ROLE_SPECIAL) {
44315 + char *p = "";
44316 + int i = 0;
44317 +
44318 + read_lock(&tasklist_lock);
44319 + if (current->real_parent) {
44320 + p = current->real_parent->role->rolename;
44321 + i = current->real_parent->acl_role_id;
44322 + }
44323 + read_unlock(&tasklist_lock);
44324 +
44325 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
44326 + gr_set_acls(1);
44327 + } else {
44328 + error = -EPERM;
44329 + goto out;
44330 + }
44331 + break;
44332 + default:
44333 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
44334 + error = -EINVAL;
44335 + break;
44336 + }
44337 +
44338 + if (error != -EPERM)
44339 + goto out;
44340 +
44341 + if(!(gr_auth_attempts++))
44342 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
44343 +
44344 + out:
44345 + mutex_unlock(&gr_dev_mutex);
44346 + return error;
44347 +}
44348 +
44349 +/* must be called with
44350 + rcu_read_lock();
44351 + read_lock(&tasklist_lock);
44352 + read_lock(&grsec_exec_file_lock);
44353 +*/
44354 +int gr_apply_subject_to_task(struct task_struct *task)
44355 +{
44356 + struct acl_object_label *obj;
44357 + char *tmpname;
44358 + struct acl_subject_label *tmpsubj;
44359 + struct file *filp;
44360 + struct name_entry *nmatch;
44361 +
44362 + filp = task->exec_file;
44363 + if (filp == NULL)
44364 + return 0;
44365 +
44366 + /* the following is to apply the correct subject
44367 + on binaries running when the RBAC system
44368 + is enabled, when the binaries have been
44369 + replaced or deleted since their execution
44370 + -----
44371 + when the RBAC system starts, the inode/dev
44372 + from exec_file will be one the RBAC system
44373 + is unaware of. It only knows the inode/dev
44374 + of the present file on disk, or the absence
44375 + of it.
44376 + */
44377 + preempt_disable();
44378 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
44379 +
44380 + nmatch = lookup_name_entry(tmpname);
44381 + preempt_enable();
44382 + tmpsubj = NULL;
44383 + if (nmatch) {
44384 + if (nmatch->deleted)
44385 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
44386 + else
44387 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
44388 + if (tmpsubj != NULL)
44389 + task->acl = tmpsubj;
44390 + }
44391 + if (tmpsubj == NULL)
44392 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
44393 + task->role);
44394 + if (task->acl) {
44395 + task->is_writable = 0;
44396 + /* ignore additional mmap checks for processes that are writable
44397 + by the default ACL */
44398 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
44399 + if (unlikely(obj->mode & GR_WRITE))
44400 + task->is_writable = 1;
44401 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
44402 + if (unlikely(obj->mode & GR_WRITE))
44403 + task->is_writable = 1;
44404 +
44405 + gr_set_proc_res(task);
44406 +
44407 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44408 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
44409 +#endif
44410 + } else {
44411 + return 1;
44412 + }
44413 +
44414 + return 0;
44415 +}
44416 +
44417 +int
44418 +gr_set_acls(const int type)
44419 +{
44420 + struct task_struct *task, *task2;
44421 + struct acl_role_label *role = current->role;
44422 + __u16 acl_role_id = current->acl_role_id;
44423 + const struct cred *cred;
44424 + int ret;
44425 +
44426 + rcu_read_lock();
44427 + read_lock(&tasklist_lock);
44428 + read_lock(&grsec_exec_file_lock);
44429 + do_each_thread(task2, task) {
44430 + /* check to see if we're called from the exit handler,
44431 + if so, only replace ACLs that have inherited the admin
44432 + ACL */
44433 +
44434 + if (type && (task->role != role ||
44435 + task->acl_role_id != acl_role_id))
44436 + continue;
44437 +
44438 + task->acl_role_id = 0;
44439 + task->acl_sp_role = 0;
44440 +
44441 + if (task->exec_file) {
44442 + cred = __task_cred(task);
44443 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
44444 + ret = gr_apply_subject_to_task(task);
44445 + if (ret) {
44446 + read_unlock(&grsec_exec_file_lock);
44447 + read_unlock(&tasklist_lock);
44448 + rcu_read_unlock();
44449 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
44450 + return ret;
44451 + }
44452 + } else {
44453 + // it's a kernel process
44454 + task->role = kernel_role;
44455 + task->acl = kernel_role->root_label;
44456 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
44457 + task->acl->mode &= ~GR_PROCFIND;
44458 +#endif
44459 + }
44460 + } while_each_thread(task2, task);
44461 + read_unlock(&grsec_exec_file_lock);
44462 + read_unlock(&tasklist_lock);
44463 + rcu_read_unlock();
44464 +
44465 + return 0;
44466 +}
44467 +
44468 +void
44469 +gr_learn_resource(const struct task_struct *task,
44470 + const int res, const unsigned long wanted, const int gt)
44471 +{
44472 + struct acl_subject_label *acl;
44473 + const struct cred *cred;
44474 +
44475 + if (unlikely((gr_status & GR_READY) &&
44476 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
44477 + goto skip_reslog;
44478 +
44479 +#ifdef CONFIG_GRKERNSEC_RESLOG
44480 + gr_log_resource(task, res, wanted, gt);
44481 +#endif
44482 + skip_reslog:
44483 +
44484 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
44485 + return;
44486 +
44487 + acl = task->acl;
44488 +
44489 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
44490 + !(acl->resmask & (1 << (unsigned short) res))))
44491 + return;
44492 +
44493 + if (wanted >= acl->res[res].rlim_cur) {
44494 + unsigned long res_add;
44495 +
44496 + res_add = wanted;
44497 + switch (res) {
44498 + case RLIMIT_CPU:
44499 + res_add += GR_RLIM_CPU_BUMP;
44500 + break;
44501 + case RLIMIT_FSIZE:
44502 + res_add += GR_RLIM_FSIZE_BUMP;
44503 + break;
44504 + case RLIMIT_DATA:
44505 + res_add += GR_RLIM_DATA_BUMP;
44506 + break;
44507 + case RLIMIT_STACK:
44508 + res_add += GR_RLIM_STACK_BUMP;
44509 + break;
44510 + case RLIMIT_CORE:
44511 + res_add += GR_RLIM_CORE_BUMP;
44512 + break;
44513 + case RLIMIT_RSS:
44514 + res_add += GR_RLIM_RSS_BUMP;
44515 + break;
44516 + case RLIMIT_NPROC:
44517 + res_add += GR_RLIM_NPROC_BUMP;
44518 + break;
44519 + case RLIMIT_NOFILE:
44520 + res_add += GR_RLIM_NOFILE_BUMP;
44521 + break;
44522 + case RLIMIT_MEMLOCK:
44523 + res_add += GR_RLIM_MEMLOCK_BUMP;
44524 + break;
44525 + case RLIMIT_AS:
44526 + res_add += GR_RLIM_AS_BUMP;
44527 + break;
44528 + case RLIMIT_LOCKS:
44529 + res_add += GR_RLIM_LOCKS_BUMP;
44530 + break;
44531 + case RLIMIT_SIGPENDING:
44532 + res_add += GR_RLIM_SIGPENDING_BUMP;
44533 + break;
44534 + case RLIMIT_MSGQUEUE:
44535 + res_add += GR_RLIM_MSGQUEUE_BUMP;
44536 + break;
44537 + case RLIMIT_NICE:
44538 + res_add += GR_RLIM_NICE_BUMP;
44539 + break;
44540 + case RLIMIT_RTPRIO:
44541 + res_add += GR_RLIM_RTPRIO_BUMP;
44542 + break;
44543 + case RLIMIT_RTTIME:
44544 + res_add += GR_RLIM_RTTIME_BUMP;
44545 + break;
44546 + }
44547 +
44548 + acl->res[res].rlim_cur = res_add;
44549 +
44550 + if (wanted > acl->res[res].rlim_max)
44551 + acl->res[res].rlim_max = res_add;
44552 +
44553 + /* only log the subject filename, since resource logging is supported for
44554 + single-subject learning only */
44555 + rcu_read_lock();
44556 + cred = __task_cred(task);
44557 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
44558 + task->role->roletype, cred->uid, cred->gid, acl->filename,
44559 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
44560 + "", (unsigned long) res, &task->signal->saved_ip);
44561 + rcu_read_unlock();
44562 + }
44563 +
44564 + return;
44565 +}
44566 +
44567 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
44568 +void
44569 +pax_set_initial_flags(struct linux_binprm *bprm)
44570 +{
44571 + struct task_struct *task = current;
44572 + struct acl_subject_label *proc;
44573 + unsigned long flags;
44574 +
44575 + if (unlikely(!(gr_status & GR_READY)))
44576 + return;
44577 +
44578 + flags = pax_get_flags(task);
44579 +
44580 + proc = task->acl;
44581 +
44582 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
44583 + flags &= ~MF_PAX_PAGEEXEC;
44584 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
44585 + flags &= ~MF_PAX_SEGMEXEC;
44586 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
44587 + flags &= ~MF_PAX_RANDMMAP;
44588 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
44589 + flags &= ~MF_PAX_EMUTRAMP;
44590 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
44591 + flags &= ~MF_PAX_MPROTECT;
44592 +
44593 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
44594 + flags |= MF_PAX_PAGEEXEC;
44595 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
44596 + flags |= MF_PAX_SEGMEXEC;
44597 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
44598 + flags |= MF_PAX_RANDMMAP;
44599 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
44600 + flags |= MF_PAX_EMUTRAMP;
44601 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
44602 + flags |= MF_PAX_MPROTECT;
44603 +
44604 + pax_set_flags(task, flags);
44605 +
44606 + return;
44607 +}
44608 +#endif
44609 +
44610 +#ifdef CONFIG_SYSCTL
44611 +/* Eric Biederman likes breaking userland ABI and every inode-based security
44612 + system to save 35kb of memory */
44613 +
44614 +/* we modify the passed in filename, but adjust it back before returning */
44615 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
44616 +{
44617 + struct name_entry *nmatch;
44618 + char *p, *lastp = NULL;
44619 + struct acl_object_label *obj = NULL, *tmp;
44620 + struct acl_subject_label *tmpsubj;
44621 + char c = '\0';
44622 +
44623 + read_lock(&gr_inode_lock);
44624 +
44625 + p = name + len - 1;
44626 + do {
44627 + nmatch = lookup_name_entry(name);
44628 + if (lastp != NULL)
44629 + *lastp = c;
44630 +
44631 + if (nmatch == NULL)
44632 + goto next_component;
44633 + tmpsubj = current->acl;
44634 + do {
44635 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
44636 + if (obj != NULL) {
44637 + tmp = obj->globbed;
44638 + while (tmp) {
44639 + if (!glob_match(tmp->filename, name)) {
44640 + obj = tmp;
44641 + goto found_obj;
44642 + }
44643 + tmp = tmp->next;
44644 + }
44645 + goto found_obj;
44646 + }
44647 + } while ((tmpsubj = tmpsubj->parent_subject));
44648 +next_component:
44649 + /* end case */
44650 + if (p == name)
44651 + break;
44652 +
44653 + while (*p != '/')
44654 + p--;
44655 + if (p == name)
44656 + lastp = p + 1;
44657 + else {
44658 + lastp = p;
44659 + p--;
44660 + }
44661 + c = *lastp;
44662 + *lastp = '\0';
44663 + } while (1);
44664 +found_obj:
44665 + read_unlock(&gr_inode_lock);
44666 + /* obj returned will always be non-null */
44667 + return obj;
44668 +}
44669 +
44670 +/* returns 0 when allowing, non-zero on error
44671 + op of 0 is used for readdir, so we don't log the names of hidden files
44672 +*/
44673 +__u32
44674 +gr_handle_sysctl(const struct ctl_table *table, const int op)
44675 +{
44676 + struct ctl_table *tmp;
44677 + const char *proc_sys = "/proc/sys";
44678 + char *path;
44679 + struct acl_object_label *obj;
44680 + unsigned short len = 0, pos = 0, depth = 0, i;
44681 + __u32 err = 0;
44682 + __u32 mode = 0;
44683 +
44684 + if (unlikely(!(gr_status & GR_READY)))
44685 + return 0;
44686 +
44687 + /* for now, ignore operations on non-sysctl entries if it's not a
44688 + readdir*/
44689 + if (table->child != NULL && op != 0)
44690 + return 0;
44691 +
44692 + mode |= GR_FIND;
44693 + /* it's only a read if it's an entry, read on dirs is for readdir */
44694 + if (op & MAY_READ)
44695 + mode |= GR_READ;
44696 + if (op & MAY_WRITE)
44697 + mode |= GR_WRITE;
44698 +
44699 + preempt_disable();
44700 +
44701 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
44702 +
44703 + /* it's only a read/write if it's an actual entry, not a dir
44704 + (which are opened for readdir)
44705 + */
44706 +
44707 + /* convert the requested sysctl entry into a pathname */
44708 +
44709 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
44710 + len += strlen(tmp->procname);
44711 + len++;
44712 + depth++;
44713 + }
44714 +
44715 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
44716 + /* deny */
44717 + goto out;
44718 + }
44719 +
44720 + memset(path, 0, PAGE_SIZE);
44721 +
44722 + memcpy(path, proc_sys, strlen(proc_sys));
44723 +
44724 + pos += strlen(proc_sys);
44725 +
44726 + for (; depth > 0; depth--) {
44727 + path[pos] = '/';
44728 + pos++;
44729 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
44730 + if (depth == i) {
44731 + memcpy(path + pos, tmp->procname,
44732 + strlen(tmp->procname));
44733 + pos += strlen(tmp->procname);
44734 + }
44735 + i++;
44736 + }
44737 + }
44738 +
44739 + obj = gr_lookup_by_name(path, pos);
44740 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
44741 +
44742 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
44743 + ((err & mode) != mode))) {
44744 + __u32 new_mode = mode;
44745 +
44746 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
44747 +
44748 + err = 0;
44749 + gr_log_learn_sysctl(path, new_mode);
44750 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
44751 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
44752 + err = -ENOENT;
44753 + } else if (!(err & GR_FIND)) {
44754 + err = -ENOENT;
44755 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
44756 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
44757 + path, (mode & GR_READ) ? " reading" : "",
44758 + (mode & GR_WRITE) ? " writing" : "");
44759 + err = -EACCES;
44760 + } else if ((err & mode) != mode) {
44761 + err = -EACCES;
44762 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
44763 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
44764 + path, (mode & GR_READ) ? " reading" : "",
44765 + (mode & GR_WRITE) ? " writing" : "");
44766 + err = 0;
44767 + } else
44768 + err = 0;
44769 +
44770 + out:
44771 + preempt_enable();
44772 +
44773 + return err;
44774 +}
44775 +#endif
44776 +
44777 +int
44778 +gr_handle_proc_ptrace(struct task_struct *task)
44779 +{
44780 + struct file *filp;
44781 + struct task_struct *tmp = task;
44782 + struct task_struct *curtemp = current;
44783 + __u32 retmode;
44784 +
44785 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
44786 + if (unlikely(!(gr_status & GR_READY)))
44787 + return 0;
44788 +#endif
44789 +
44790 + read_lock(&tasklist_lock);
44791 + read_lock(&grsec_exec_file_lock);
44792 + filp = task->exec_file;
44793 +
44794 + while (tmp->pid > 0) {
44795 + if (tmp == curtemp)
44796 + break;
44797 + tmp = tmp->real_parent;
44798 + }
44799 +
44800 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
44801 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
44802 + read_unlock(&grsec_exec_file_lock);
44803 + read_unlock(&tasklist_lock);
44804 + return 1;
44805 + }
44806 +
44807 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
44808 + if (!(gr_status & GR_READY)) {
44809 + read_unlock(&grsec_exec_file_lock);
44810 + read_unlock(&tasklist_lock);
44811 + return 0;
44812 + }
44813 +#endif
44814 +
44815 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
44816 + read_unlock(&grsec_exec_file_lock);
44817 + read_unlock(&tasklist_lock);
44818 +
44819 + if (retmode & GR_NOPTRACE)
44820 + return 1;
44821 +
44822 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
44823 + && (current->acl != task->acl || (current->acl != current->role->root_label
44824 + && current->pid != task->pid)))
44825 + return 1;
44826 +
44827 + return 0;
44828 +}
44829 +
44830 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
44831 +{
44832 + if (unlikely(!(gr_status & GR_READY)))
44833 + return;
44834 +
44835 + if (!(current->role->roletype & GR_ROLE_GOD))
44836 + return;
44837 +
44838 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
44839 + p->role->rolename, gr_task_roletype_to_char(p),
44840 + p->acl->filename);
44841 +}
44842 +
44843 +int
44844 +gr_handle_ptrace(struct task_struct *task, const long request)
44845 +{
44846 + struct task_struct *tmp = task;
44847 + struct task_struct *curtemp = current;
44848 + __u32 retmode;
44849 +
44850 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
44851 + if (unlikely(!(gr_status & GR_READY)))
44852 + return 0;
44853 +#endif
44854 +
44855 + read_lock(&tasklist_lock);
44856 + while (tmp->pid > 0) {
44857 + if (tmp == curtemp)
44858 + break;
44859 + tmp = tmp->real_parent;
44860 + }
44861 +
44862 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
44863 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
44864 + read_unlock(&tasklist_lock);
44865 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
44866 + return 1;
44867 + }
44868 + read_unlock(&tasklist_lock);
44869 +
44870 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
44871 + if (!(gr_status & GR_READY))
44872 + return 0;
44873 +#endif
44874 +
44875 + read_lock(&grsec_exec_file_lock);
44876 + if (unlikely(!task->exec_file)) {
44877 + read_unlock(&grsec_exec_file_lock);
44878 + return 0;
44879 + }
44880 +
44881 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
44882 + read_unlock(&grsec_exec_file_lock);
44883 +
44884 + if (retmode & GR_NOPTRACE) {
44885 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
44886 + return 1;
44887 + }
44888 +
44889 + if (retmode & GR_PTRACERD) {
44890 + switch (request) {
44891 + case PTRACE_POKETEXT:
44892 + case PTRACE_POKEDATA:
44893 + case PTRACE_POKEUSR:
44894 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
44895 + case PTRACE_SETREGS:
44896 + case PTRACE_SETFPREGS:
44897 +#endif
44898 +#ifdef CONFIG_X86
44899 + case PTRACE_SETFPXREGS:
44900 +#endif
44901 +#ifdef CONFIG_ALTIVEC
44902 + case PTRACE_SETVRREGS:
44903 +#endif
44904 + return 1;
44905 + default:
44906 + return 0;
44907 + }
44908 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
44909 + !(current->role->roletype & GR_ROLE_GOD) &&
44910 + (current->acl != task->acl)) {
44911 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
44912 + return 1;
44913 + }
44914 +
44915 + return 0;
44916 +}
44917 +
44918 +static int is_writable_mmap(const struct file *filp)
44919 +{
44920 + struct task_struct *task = current;
44921 + struct acl_object_label *obj, *obj2;
44922 +
44923 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
44924 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
44925 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
44926 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
44927 + task->role->root_label);
44928 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
44929 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
44930 + return 1;
44931 + }
44932 + }
44933 + return 0;
44934 +}
44935 +
44936 +int
44937 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
44938 +{
44939 + __u32 mode;
44940 +
44941 + if (unlikely(!file || !(prot & PROT_EXEC)))
44942 + return 1;
44943 +
44944 + if (is_writable_mmap(file))
44945 + return 0;
44946 +
44947 + mode =
44948 + gr_search_file(file->f_path.dentry,
44949 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
44950 + file->f_path.mnt);
44951 +
44952 + if (!gr_tpe_allow(file))
44953 + return 0;
44954 +
44955 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
44956 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
44957 + return 0;
44958 + } else if (unlikely(!(mode & GR_EXEC))) {
44959 + return 0;
44960 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
44961 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
44962 + return 1;
44963 + }
44964 +
44965 + return 1;
44966 +}
44967 +
44968 +int
44969 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
44970 +{
44971 + __u32 mode;
44972 +
44973 + if (unlikely(!file || !(prot & PROT_EXEC)))
44974 + return 1;
44975 +
44976 + if (is_writable_mmap(file))
44977 + return 0;
44978 +
44979 + mode =
44980 + gr_search_file(file->f_path.dentry,
44981 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
44982 + file->f_path.mnt);
44983 +
44984 + if (!gr_tpe_allow(file))
44985 + return 0;
44986 +
44987 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
44988 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
44989 + return 0;
44990 + } else if (unlikely(!(mode & GR_EXEC))) {
44991 + return 0;
44992 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
44993 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
44994 + return 1;
44995 + }
44996 +
44997 + return 1;
44998 +}
44999 +
45000 +void
45001 +gr_acl_handle_psacct(struct task_struct *task, const long code)
45002 +{
45003 + unsigned long runtime;
45004 + unsigned long cputime;
45005 + unsigned int wday, cday;
45006 + __u8 whr, chr;
45007 + __u8 wmin, cmin;
45008 + __u8 wsec, csec;
45009 + struct timespec timeval;
45010 +
45011 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
45012 + !(task->acl->mode & GR_PROCACCT)))
45013 + return;
45014 +
45015 + do_posix_clock_monotonic_gettime(&timeval);
45016 + runtime = timeval.tv_sec - task->start_time.tv_sec;
45017 + wday = runtime / (3600 * 24);
45018 + runtime -= wday * (3600 * 24);
45019 + whr = runtime / 3600;
45020 + runtime -= whr * 3600;
45021 + wmin = runtime / 60;
45022 + runtime -= wmin * 60;
45023 + wsec = runtime;
45024 +
45025 + cputime = (task->utime + task->stime) / HZ;
45026 + cday = cputime / (3600 * 24);
45027 + cputime -= cday * (3600 * 24);
45028 + chr = cputime / 3600;
45029 + cputime -= chr * 3600;
45030 + cmin = cputime / 60;
45031 + cputime -= cmin * 60;
45032 + csec = cputime;
45033 +
45034 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
45035 +
45036 + return;
45037 +}
45038 +
45039 +void gr_set_kernel_label(struct task_struct *task)
45040 +{
45041 + if (gr_status & GR_READY) {
45042 + task->role = kernel_role;
45043 + task->acl = kernel_role->root_label;
45044 + }
45045 + return;
45046 +}
45047 +
45048 +#ifdef CONFIG_TASKSTATS
45049 +int gr_is_taskstats_denied(int pid)
45050 +{
45051 + struct task_struct *task;
45052 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45053 + const struct cred *cred;
45054 +#endif
45055 + int ret = 0;
45056 +
45057 + /* restrict taskstats viewing to un-chrooted root users
45058 + who have the 'view' subject flag if the RBAC system is enabled
45059 + */
45060 +
45061 + rcu_read_lock();
45062 + read_lock(&tasklist_lock);
45063 + task = find_task_by_vpid(pid);
45064 + if (task) {
45065 +#ifdef CONFIG_GRKERNSEC_CHROOT
45066 + if (proc_is_chrooted(task))
45067 + ret = -EACCES;
45068 +#endif
45069 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45070 + cred = __task_cred(task);
45071 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45072 + if (cred->uid != 0)
45073 + ret = -EACCES;
45074 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45075 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
45076 + ret = -EACCES;
45077 +#endif
45078 +#endif
45079 + if (gr_status & GR_READY) {
45080 + if (!(task->acl->mode & GR_VIEW))
45081 + ret = -EACCES;
45082 + }
45083 + } else
45084 + ret = -ENOENT;
45085 +
45086 + read_unlock(&tasklist_lock);
45087 + rcu_read_unlock();
45088 +
45089 + return ret;
45090 +}
45091 +#endif
45092 +
45093 +/* AUXV entries are filled via a descendant of search_binary_handler
45094 + after we've already applied the subject for the target
45095 +*/
45096 +int gr_acl_enable_at_secure(void)
45097 +{
45098 + if (unlikely(!(gr_status & GR_READY)))
45099 + return 0;
45100 +
45101 + if (current->acl->mode & GR_ATSECURE)
45102 + return 1;
45103 +
45104 + return 0;
45105 +}
45106 +
45107 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
45108 +{
45109 + struct task_struct *task = current;
45110 + struct dentry *dentry = file->f_path.dentry;
45111 + struct vfsmount *mnt = file->f_path.mnt;
45112 + struct acl_object_label *obj, *tmp;
45113 + struct acl_subject_label *subj;
45114 + unsigned int bufsize;
45115 + int is_not_root;
45116 + char *path;
45117 + dev_t dev = __get_dev(dentry);
45118 +
45119 + if (unlikely(!(gr_status & GR_READY)))
45120 + return 1;
45121 +
45122 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
45123 + return 1;
45124 +
45125 + /* ignore Eric Biederman */
45126 + if (IS_PRIVATE(dentry->d_inode))
45127 + return 1;
45128 +
45129 + subj = task->acl;
45130 + do {
45131 + obj = lookup_acl_obj_label(ino, dev, subj);
45132 + if (obj != NULL)
45133 + return (obj->mode & GR_FIND) ? 1 : 0;
45134 + } while ((subj = subj->parent_subject));
45135 +
45136 + /* this is purely an optimization since we're looking for an object
45137 + for the directory we're doing a readdir on
45138 + if it's possible for any globbed object to match the entry we're
45139 + filling into the directory, then the object we find here will be
45140 + an anchor point with attached globbed objects
45141 + */
45142 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
45143 + if (obj->globbed == NULL)
45144 + return (obj->mode & GR_FIND) ? 1 : 0;
45145 +
45146 + is_not_root = ((obj->filename[0] == '/') &&
45147 + (obj->filename[1] == '\0')) ? 0 : 1;
45148 + bufsize = PAGE_SIZE - namelen - is_not_root;
45149 +
45150 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
45151 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
45152 + return 1;
45153 +
45154 + preempt_disable();
45155 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
45156 + bufsize);
45157 +
45158 + bufsize = strlen(path);
45159 +
45160 + /* if base is "/", don't append an additional slash */
45161 + if (is_not_root)
45162 + *(path + bufsize) = '/';
45163 + memcpy(path + bufsize + is_not_root, name, namelen);
45164 + *(path + bufsize + namelen + is_not_root) = '\0';
45165 +
45166 + tmp = obj->globbed;
45167 + while (tmp) {
45168 + if (!glob_match(tmp->filename, path)) {
45169 + preempt_enable();
45170 + return (tmp->mode & GR_FIND) ? 1 : 0;
45171 + }
45172 + tmp = tmp->next;
45173 + }
45174 + preempt_enable();
45175 + return (obj->mode & GR_FIND) ? 1 : 0;
45176 +}
45177 +
45178 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
45179 +EXPORT_SYMBOL(gr_acl_is_enabled);
45180 +#endif
45181 +EXPORT_SYMBOL(gr_learn_resource);
45182 +EXPORT_SYMBOL(gr_set_kernel_label);
45183 +#ifdef CONFIG_SECURITY
45184 +EXPORT_SYMBOL(gr_check_user_change);
45185 +EXPORT_SYMBOL(gr_check_group_change);
45186 +#endif
45187 +
45188 diff -urNp linux-3.0.3/grsecurity/gracl_cap.c linux-3.0.3/grsecurity/gracl_cap.c
45189 --- linux-3.0.3/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
45190 +++ linux-3.0.3/grsecurity/gracl_cap.c 2011-08-23 21:48:14.000000000 -0400
45191 @@ -0,0 +1,139 @@
45192 +#include <linux/kernel.h>
45193 +#include <linux/module.h>
45194 +#include <linux/sched.h>
45195 +#include <linux/gracl.h>
45196 +#include <linux/grsecurity.h>
45197 +#include <linux/grinternal.h>
45198 +
45199 +static const char *captab_log[] = {
45200 + "CAP_CHOWN",
45201 + "CAP_DAC_OVERRIDE",
45202 + "CAP_DAC_READ_SEARCH",
45203 + "CAP_FOWNER",
45204 + "CAP_FSETID",
45205 + "CAP_KILL",
45206 + "CAP_SETGID",
45207 + "CAP_SETUID",
45208 + "CAP_SETPCAP",
45209 + "CAP_LINUX_IMMUTABLE",
45210 + "CAP_NET_BIND_SERVICE",
45211 + "CAP_NET_BROADCAST",
45212 + "CAP_NET_ADMIN",
45213 + "CAP_NET_RAW",
45214 + "CAP_IPC_LOCK",
45215 + "CAP_IPC_OWNER",
45216 + "CAP_SYS_MODULE",
45217 + "CAP_SYS_RAWIO",
45218 + "CAP_SYS_CHROOT",
45219 + "CAP_SYS_PTRACE",
45220 + "CAP_SYS_PACCT",
45221 + "CAP_SYS_ADMIN",
45222 + "CAP_SYS_BOOT",
45223 + "CAP_SYS_NICE",
45224 + "CAP_SYS_RESOURCE",
45225 + "CAP_SYS_TIME",
45226 + "CAP_SYS_TTY_CONFIG",
45227 + "CAP_MKNOD",
45228 + "CAP_LEASE",
45229 + "CAP_AUDIT_WRITE",
45230 + "CAP_AUDIT_CONTROL",
45231 + "CAP_SETFCAP",
45232 + "CAP_MAC_OVERRIDE",
45233 + "CAP_MAC_ADMIN",
45234 + "CAP_SYSLOG"
45235 +};
45236 +
45237 +EXPORT_SYMBOL(gr_is_capable);
45238 +EXPORT_SYMBOL(gr_is_capable_nolog);
45239 +
45240 +int
45241 +gr_is_capable(const int cap)
45242 +{
45243 + struct task_struct *task = current;
45244 + const struct cred *cred = current_cred();
45245 + struct acl_subject_label *curracl;
45246 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
45247 + kernel_cap_t cap_audit = __cap_empty_set;
45248 +
45249 + if (!gr_acl_is_enabled())
45250 + return 1;
45251 +
45252 + curracl = task->acl;
45253 +
45254 + cap_drop = curracl->cap_lower;
45255 + cap_mask = curracl->cap_mask;
45256 + cap_audit = curracl->cap_invert_audit;
45257 +
45258 + while ((curracl = curracl->parent_subject)) {
45259 + /* if the cap isn't specified in the current computed mask but is specified in the
45260 + current level subject, and is lowered in the current level subject, then add
45261 + it to the set of dropped capabilities
45262 + otherwise, add the current level subject's mask to the current computed mask
45263 + */
45264 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
45265 + cap_raise(cap_mask, cap);
45266 + if (cap_raised(curracl->cap_lower, cap))
45267 + cap_raise(cap_drop, cap);
45268 + if (cap_raised(curracl->cap_invert_audit, cap))
45269 + cap_raise(cap_audit, cap);
45270 + }
45271 + }
45272 +
45273 + if (!cap_raised(cap_drop, cap)) {
45274 + if (cap_raised(cap_audit, cap))
45275 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
45276 + return 1;
45277 + }
45278 +
45279 + curracl = task->acl;
45280 +
45281 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
45282 + && cap_raised(cred->cap_effective, cap)) {
45283 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
45284 + task->role->roletype, cred->uid,
45285 + cred->gid, task->exec_file ?
45286 + gr_to_filename(task->exec_file->f_path.dentry,
45287 + task->exec_file->f_path.mnt) : curracl->filename,
45288 + curracl->filename, 0UL,
45289 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
45290 + return 1;
45291 + }
45292 +
45293 + if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
45294 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
45295 + return 0;
45296 +}
45297 +
45298 +int
45299 +gr_is_capable_nolog(const int cap)
45300 +{
45301 + struct acl_subject_label *curracl;
45302 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
45303 +
45304 + if (!gr_acl_is_enabled())
45305 + return 1;
45306 +
45307 + curracl = current->acl;
45308 +
45309 + cap_drop = curracl->cap_lower;
45310 + cap_mask = curracl->cap_mask;
45311 +
45312 + while ((curracl = curracl->parent_subject)) {
45313 + /* if the cap isn't specified in the current computed mask but is specified in the
45314 + current level subject, and is lowered in the current level subject, then add
45315 + it to the set of dropped capabilities
45316 + otherwise, add the current level subject's mask to the current computed mask
45317 + */
45318 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
45319 + cap_raise(cap_mask, cap);
45320 + if (cap_raised(curracl->cap_lower, cap))
45321 + cap_raise(cap_drop, cap);
45322 + }
45323 + }
45324 +
45325 + if (!cap_raised(cap_drop, cap))
45326 + return 1;
45327 +
45328 + return 0;
45329 +}
45330 +
45331 diff -urNp linux-3.0.3/grsecurity/gracl_fs.c linux-3.0.3/grsecurity/gracl_fs.c
45332 --- linux-3.0.3/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
45333 +++ linux-3.0.3/grsecurity/gracl_fs.c 2011-08-23 21:48:14.000000000 -0400
45334 @@ -0,0 +1,431 @@
45335 +#include <linux/kernel.h>
45336 +#include <linux/sched.h>
45337 +#include <linux/types.h>
45338 +#include <linux/fs.h>
45339 +#include <linux/file.h>
45340 +#include <linux/stat.h>
45341 +#include <linux/grsecurity.h>
45342 +#include <linux/grinternal.h>
45343 +#include <linux/gracl.h>
45344 +
45345 +__u32
45346 +gr_acl_handle_hidden_file(const struct dentry * dentry,
45347 + const struct vfsmount * mnt)
45348 +{
45349 + __u32 mode;
45350 +
45351 + if (unlikely(!dentry->d_inode))
45352 + return GR_FIND;
45353 +
45354 + mode =
45355 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
45356 +
45357 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
45358 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
45359 + return mode;
45360 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
45361 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
45362 + return 0;
45363 + } else if (unlikely(!(mode & GR_FIND)))
45364 + return 0;
45365 +
45366 + return GR_FIND;
45367 +}
45368 +
45369 +__u32
45370 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
45371 + const int fmode)
45372 +{
45373 + __u32 reqmode = GR_FIND;
45374 + __u32 mode;
45375 +
45376 + if (unlikely(!dentry->d_inode))
45377 + return reqmode;
45378 +
45379 + if (unlikely(fmode & O_APPEND))
45380 + reqmode |= GR_APPEND;
45381 + else if (unlikely(fmode & FMODE_WRITE))
45382 + reqmode |= GR_WRITE;
45383 + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
45384 + reqmode |= GR_READ;
45385 + if ((fmode & FMODE_GREXEC) && (fmode & __FMODE_EXEC))
45386 + reqmode &= ~GR_READ;
45387 + mode =
45388 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
45389 + mnt);
45390 +
45391 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45392 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
45393 + reqmode & GR_READ ? " reading" : "",
45394 + reqmode & GR_WRITE ? " writing" : reqmode &
45395 + GR_APPEND ? " appending" : "");
45396 + return reqmode;
45397 + } else
45398 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45399 + {
45400 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
45401 + reqmode & GR_READ ? " reading" : "",
45402 + reqmode & GR_WRITE ? " writing" : reqmode &
45403 + GR_APPEND ? " appending" : "");
45404 + return 0;
45405 + } else if (unlikely((mode & reqmode) != reqmode))
45406 + return 0;
45407 +
45408 + return reqmode;
45409 +}
45410 +
45411 +__u32
45412 +gr_acl_handle_creat(const struct dentry * dentry,
45413 + const struct dentry * p_dentry,
45414 + const struct vfsmount * p_mnt, const int fmode,
45415 + const int imode)
45416 +{
45417 + __u32 reqmode = GR_WRITE | GR_CREATE;
45418 + __u32 mode;
45419 +
45420 + if (unlikely(fmode & O_APPEND))
45421 + reqmode |= GR_APPEND;
45422 + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
45423 + reqmode |= GR_READ;
45424 + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
45425 + reqmode |= GR_SETID;
45426 +
45427 + mode =
45428 + gr_check_create(dentry, p_dentry, p_mnt,
45429 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
45430 +
45431 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45432 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
45433 + reqmode & GR_READ ? " reading" : "",
45434 + reqmode & GR_WRITE ? " writing" : reqmode &
45435 + GR_APPEND ? " appending" : "");
45436 + return reqmode;
45437 + } else
45438 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45439 + {
45440 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
45441 + reqmode & GR_READ ? " reading" : "",
45442 + reqmode & GR_WRITE ? " writing" : reqmode &
45443 + GR_APPEND ? " appending" : "");
45444 + return 0;
45445 + } else if (unlikely((mode & reqmode) != reqmode))
45446 + return 0;
45447 +
45448 + return reqmode;
45449 +}
45450 +
45451 +__u32
45452 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
45453 + const int fmode)
45454 +{
45455 + __u32 mode, reqmode = GR_FIND;
45456 +
45457 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
45458 + reqmode |= GR_EXEC;
45459 + if (fmode & S_IWOTH)
45460 + reqmode |= GR_WRITE;
45461 + if (fmode & S_IROTH)
45462 + reqmode |= GR_READ;
45463 +
45464 + mode =
45465 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
45466 + mnt);
45467 +
45468 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45469 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
45470 + reqmode & GR_READ ? " reading" : "",
45471 + reqmode & GR_WRITE ? " writing" : "",
45472 + reqmode & GR_EXEC ? " executing" : "");
45473 + return reqmode;
45474 + } else
45475 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45476 + {
45477 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
45478 + reqmode & GR_READ ? " reading" : "",
45479 + reqmode & GR_WRITE ? " writing" : "",
45480 + reqmode & GR_EXEC ? " executing" : "");
45481 + return 0;
45482 + } else if (unlikely((mode & reqmode) != reqmode))
45483 + return 0;
45484 +
45485 + return reqmode;
45486 +}
45487 +
45488 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
45489 +{
45490 + __u32 mode;
45491 +
45492 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
45493 +
45494 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
45495 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
45496 + return mode;
45497 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
45498 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
45499 + return 0;
45500 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
45501 + return 0;
45502 +
45503 + return (reqmode);
45504 +}
45505 +
45506 +__u32
45507 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
45508 +{
45509 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
45510 +}
45511 +
45512 +__u32
45513 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
45514 +{
45515 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
45516 +}
45517 +
45518 +__u32
45519 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
45520 +{
45521 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
45522 +}
45523 +
45524 +__u32
45525 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
45526 +{
45527 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
45528 +}
45529 +
45530 +__u32
45531 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
45532 + mode_t mode)
45533 +{
45534 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
45535 + return 1;
45536 +
45537 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
45538 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
45539 + GR_FCHMOD_ACL_MSG);
45540 + } else {
45541 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
45542 + }
45543 +}
45544 +
45545 +__u32
45546 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
45547 + mode_t mode)
45548 +{
45549 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
45550 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
45551 + GR_CHMOD_ACL_MSG);
45552 + } else {
45553 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
45554 + }
45555 +}
45556 +
45557 +__u32
45558 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
45559 +{
45560 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
45561 +}
45562 +
45563 +__u32
45564 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
45565 +{
45566 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
45567 +}
45568 +
45569 +__u32
45570 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
45571 +{
45572 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
45573 +}
45574 +
45575 +__u32
45576 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
45577 +{
45578 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
45579 + GR_UNIXCONNECT_ACL_MSG);
45580 +}
45581 +
45582 +/* hardlinks require at minimum create permission,
45583 + any additional privilege required is based on the
45584 + privilege of the file being linked to
45585 +*/
45586 +__u32
45587 +gr_acl_handle_link(const struct dentry * new_dentry,
45588 + const struct dentry * parent_dentry,
45589 + const struct vfsmount * parent_mnt,
45590 + const struct dentry * old_dentry,
45591 + const struct vfsmount * old_mnt, const char *to)
45592 +{
45593 + __u32 mode;
45594 + __u32 needmode = GR_CREATE | GR_LINK;
45595 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
45596 +
45597 + mode =
45598 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
45599 + old_mnt);
45600 +
45601 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
45602 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
45603 + return mode;
45604 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
45605 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
45606 + return 0;
45607 + } else if (unlikely((mode & needmode) != needmode))
45608 + return 0;
45609 +
45610 + return 1;
45611 +}
45612 +
45613 +__u32
45614 +gr_acl_handle_symlink(const struct dentry * new_dentry,
45615 + const struct dentry * parent_dentry,
45616 + const struct vfsmount * parent_mnt, const char *from)
45617 +{
45618 + __u32 needmode = GR_WRITE | GR_CREATE;
45619 + __u32 mode;
45620 +
45621 + mode =
45622 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
45623 + GR_CREATE | GR_AUDIT_CREATE |
45624 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
45625 +
45626 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
45627 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
45628 + return mode;
45629 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
45630 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
45631 + return 0;
45632 + } else if (unlikely((mode & needmode) != needmode))
45633 + return 0;
45634 +
45635 + return (GR_WRITE | GR_CREATE);
45636 +}
45637 +
45638 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
45639 +{
45640 + __u32 mode;
45641 +
45642 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
45643 +
45644 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
45645 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
45646 + return mode;
45647 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
45648 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
45649 + return 0;
45650 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
45651 + return 0;
45652 +
45653 + return (reqmode);
45654 +}
45655 +
45656 +__u32
45657 +gr_acl_handle_mknod(const struct dentry * new_dentry,
45658 + const struct dentry * parent_dentry,
45659 + const struct vfsmount * parent_mnt,
45660 + const int mode)
45661 +{
45662 + __u32 reqmode = GR_WRITE | GR_CREATE;
45663 + if (unlikely(mode & (S_ISUID | S_ISGID)))
45664 + reqmode |= GR_SETID;
45665 +
45666 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
45667 + reqmode, GR_MKNOD_ACL_MSG);
45668 +}
45669 +
45670 +__u32
45671 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
45672 + const struct dentry *parent_dentry,
45673 + const struct vfsmount *parent_mnt)
45674 +{
45675 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
45676 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
45677 +}
45678 +
45679 +#define RENAME_CHECK_SUCCESS(old, new) \
45680 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
45681 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
45682 +
45683 +int
45684 +gr_acl_handle_rename(struct dentry *new_dentry,
45685 + struct dentry *parent_dentry,
45686 + const struct vfsmount *parent_mnt,
45687 + struct dentry *old_dentry,
45688 + struct inode *old_parent_inode,
45689 + struct vfsmount *old_mnt, const char *newname)
45690 +{
45691 + __u32 comp1, comp2;
45692 + int error = 0;
45693 +
45694 + if (unlikely(!gr_acl_is_enabled()))
45695 + return 0;
45696 +
45697 + if (!new_dentry->d_inode) {
45698 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
45699 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
45700 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
45701 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
45702 + GR_DELETE | GR_AUDIT_DELETE |
45703 + GR_AUDIT_READ | GR_AUDIT_WRITE |
45704 + GR_SUPPRESS, old_mnt);
45705 + } else {
45706 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
45707 + GR_CREATE | GR_DELETE |
45708 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
45709 + GR_AUDIT_READ | GR_AUDIT_WRITE |
45710 + GR_SUPPRESS, parent_mnt);
45711 + comp2 =
45712 + gr_search_file(old_dentry,
45713 + GR_READ | GR_WRITE | GR_AUDIT_READ |
45714 + GR_DELETE | GR_AUDIT_DELETE |
45715 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
45716 + }
45717 +
45718 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
45719 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
45720 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
45721 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
45722 + && !(comp2 & GR_SUPPRESS)) {
45723 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
45724 + error = -EACCES;
45725 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
45726 + error = -EACCES;
45727 +
45728 + return error;
45729 +}
45730 +
45731 +void
45732 +gr_acl_handle_exit(void)
45733 +{
45734 + u16 id;
45735 + char *rolename;
45736 + struct file *exec_file;
45737 +
45738 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
45739 + !(current->role->roletype & GR_ROLE_PERSIST))) {
45740 + id = current->acl_role_id;
45741 + rolename = current->role->rolename;
45742 + gr_set_acls(1);
45743 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
45744 + }
45745 +
45746 + write_lock(&grsec_exec_file_lock);
45747 + exec_file = current->exec_file;
45748 + current->exec_file = NULL;
45749 + write_unlock(&grsec_exec_file_lock);
45750 +
45751 + if (exec_file)
45752 + fput(exec_file);
45753 +}
45754 +
45755 +int
45756 +gr_acl_handle_procpidmem(const struct task_struct *task)
45757 +{
45758 + if (unlikely(!gr_acl_is_enabled()))
45759 + return 0;
45760 +
45761 + if (task != current && task->acl->mode & GR_PROTPROCFD)
45762 + return -EACCES;
45763 +
45764 + return 0;
45765 +}
45766 diff -urNp linux-3.0.3/grsecurity/gracl_ip.c linux-3.0.3/grsecurity/gracl_ip.c
45767 --- linux-3.0.3/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
45768 +++ linux-3.0.3/grsecurity/gracl_ip.c 2011-08-23 21:48:14.000000000 -0400
45769 @@ -0,0 +1,381 @@
45770 +#include <linux/kernel.h>
45771 +#include <asm/uaccess.h>
45772 +#include <asm/errno.h>
45773 +#include <net/sock.h>
45774 +#include <linux/file.h>
45775 +#include <linux/fs.h>
45776 +#include <linux/net.h>
45777 +#include <linux/in.h>
45778 +#include <linux/skbuff.h>
45779 +#include <linux/ip.h>
45780 +#include <linux/udp.h>
45781 +#include <linux/types.h>
45782 +#include <linux/sched.h>
45783 +#include <linux/netdevice.h>
45784 +#include <linux/inetdevice.h>
45785 +#include <linux/gracl.h>
45786 +#include <linux/grsecurity.h>
45787 +#include <linux/grinternal.h>
45788 +
45789 +#define GR_BIND 0x01
45790 +#define GR_CONNECT 0x02
45791 +#define GR_INVERT 0x04
45792 +#define GR_BINDOVERRIDE 0x08
45793 +#define GR_CONNECTOVERRIDE 0x10
45794 +#define GR_SOCK_FAMILY 0x20
45795 +
45796 +static const char * gr_protocols[IPPROTO_MAX] = {
45797 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
45798 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
45799 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
45800 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
45801 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
45802 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
45803 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
45804 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
45805 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
45806 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
45807 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
45808 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
45809 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
45810 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
45811 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
45812 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
45813 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
45814 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
45815 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
45816 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
45817 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
45818 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
45819 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
45820 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
45821 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
45822 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
45823 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
45824 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
45825 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
45826 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
45827 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
45828 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
45829 + };
45830 +
45831 +static const char * gr_socktypes[SOCK_MAX] = {
45832 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
45833 + "unknown:7", "unknown:8", "unknown:9", "packet"
45834 + };
45835 +
45836 +static const char * gr_sockfamilies[AF_MAX+1] = {
45837 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
45838 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
45839 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
45840 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
45841 + };
45842 +
45843 +const char *
45844 +gr_proto_to_name(unsigned char proto)
45845 +{
45846 + return gr_protocols[proto];
45847 +}
45848 +
45849 +const char *
45850 +gr_socktype_to_name(unsigned char type)
45851 +{
45852 + return gr_socktypes[type];
45853 +}
45854 +
45855 +const char *
45856 +gr_sockfamily_to_name(unsigned char family)
45857 +{
45858 + return gr_sockfamilies[family];
45859 +}
45860 +
45861 +int
45862 +gr_search_socket(const int domain, const int type, const int protocol)
45863 +{
45864 + struct acl_subject_label *curr;
45865 + const struct cred *cred = current_cred();
45866 +
45867 + if (unlikely(!gr_acl_is_enabled()))
45868 + goto exit;
45869 +
45870 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
45871 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
45872 + goto exit; // let the kernel handle it
45873 +
45874 + curr = current->acl;
45875 +
45876 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
45877 + /* the family is allowed, if this is PF_INET allow it only if
45878 + the extra sock type/protocol checks pass */
45879 + if (domain == PF_INET)
45880 + goto inet_check;
45881 + goto exit;
45882 + } else {
45883 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
45884 + __u32 fakeip = 0;
45885 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
45886 + current->role->roletype, cred->uid,
45887 + cred->gid, current->exec_file ?
45888 + gr_to_filename(current->exec_file->f_path.dentry,
45889 + current->exec_file->f_path.mnt) :
45890 + curr->filename, curr->filename,
45891 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
45892 + &current->signal->saved_ip);
45893 + goto exit;
45894 + }
45895 + goto exit_fail;
45896 + }
45897 +
45898 +inet_check:
45899 + /* the rest of this checking is for IPv4 only */
45900 + if (!curr->ips)
45901 + goto exit;
45902 +
45903 + if ((curr->ip_type & (1 << type)) &&
45904 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
45905 + goto exit;
45906 +
45907 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
45908 + /* we don't place acls on raw sockets , and sometimes
45909 + dgram/ip sockets are opened for ioctl and not
45910 + bind/connect, so we'll fake a bind learn log */
45911 + if (type == SOCK_RAW || type == SOCK_PACKET) {
45912 + __u32 fakeip = 0;
45913 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
45914 + current->role->roletype, cred->uid,
45915 + cred->gid, current->exec_file ?
45916 + gr_to_filename(current->exec_file->f_path.dentry,
45917 + current->exec_file->f_path.mnt) :
45918 + curr->filename, curr->filename,
45919 + &fakeip, 0, type,
45920 + protocol, GR_CONNECT, &current->signal->saved_ip);
45921 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
45922 + __u32 fakeip = 0;
45923 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
45924 + current->role->roletype, cred->uid,
45925 + cred->gid, current->exec_file ?
45926 + gr_to_filename(current->exec_file->f_path.dentry,
45927 + current->exec_file->f_path.mnt) :
45928 + curr->filename, curr->filename,
45929 + &fakeip, 0, type,
45930 + protocol, GR_BIND, &current->signal->saved_ip);
45931 + }
45932 + /* we'll log when they use connect or bind */
45933 + goto exit;
45934 + }
45935 +
45936 +exit_fail:
45937 + if (domain == PF_INET)
45938 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
45939 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
45940 + else
45941 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
45942 + gr_socktype_to_name(type), protocol);
45943 +
45944 + return 0;
45945 +exit:
45946 + return 1;
45947 +}
45948 +
45949 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
45950 +{
45951 + if ((ip->mode & mode) &&
45952 + (ip_port >= ip->low) &&
45953 + (ip_port <= ip->high) &&
45954 + ((ntohl(ip_addr) & our_netmask) ==
45955 + (ntohl(our_addr) & our_netmask))
45956 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
45957 + && (ip->type & (1 << type))) {
45958 + if (ip->mode & GR_INVERT)
45959 + return 2; // specifically denied
45960 + else
45961 + return 1; // allowed
45962 + }
45963 +
45964 + return 0; // not specifically allowed, may continue parsing
45965 +}
45966 +
45967 +static int
45968 +gr_search_connectbind(const int full_mode, struct sock *sk,
45969 + struct sockaddr_in *addr, const int type)
45970 +{
45971 + char iface[IFNAMSIZ] = {0};
45972 + struct acl_subject_label *curr;
45973 + struct acl_ip_label *ip;
45974 + struct inet_sock *isk;
45975 + struct net_device *dev;
45976 + struct in_device *idev;
45977 + unsigned long i;
45978 + int ret;
45979 + int mode = full_mode & (GR_BIND | GR_CONNECT);
45980 + __u32 ip_addr = 0;
45981 + __u32 our_addr;
45982 + __u32 our_netmask;
45983 + char *p;
45984 + __u16 ip_port = 0;
45985 + const struct cred *cred = current_cred();
45986 +
45987 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
45988 + return 0;
45989 +
45990 + curr = current->acl;
45991 + isk = inet_sk(sk);
45992 +
45993 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
45994 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
45995 + addr->sin_addr.s_addr = curr->inaddr_any_override;
45996 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
45997 + struct sockaddr_in saddr;
45998 + int err;
45999 +
46000 + saddr.sin_family = AF_INET;
46001 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
46002 + saddr.sin_port = isk->inet_sport;
46003 +
46004 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
46005 + if (err)
46006 + return err;
46007 +
46008 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
46009 + if (err)
46010 + return err;
46011 + }
46012 +
46013 + if (!curr->ips)
46014 + return 0;
46015 +
46016 + ip_addr = addr->sin_addr.s_addr;
46017 + ip_port = ntohs(addr->sin_port);
46018 +
46019 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46020 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46021 + current->role->roletype, cred->uid,
46022 + cred->gid, current->exec_file ?
46023 + gr_to_filename(current->exec_file->f_path.dentry,
46024 + current->exec_file->f_path.mnt) :
46025 + curr->filename, curr->filename,
46026 + &ip_addr, ip_port, type,
46027 + sk->sk_protocol, mode, &current->signal->saved_ip);
46028 + return 0;
46029 + }
46030 +
46031 + for (i = 0; i < curr->ip_num; i++) {
46032 + ip = *(curr->ips + i);
46033 + if (ip->iface != NULL) {
46034 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
46035 + p = strchr(iface, ':');
46036 + if (p != NULL)
46037 + *p = '\0';
46038 + dev = dev_get_by_name(sock_net(sk), iface);
46039 + if (dev == NULL)
46040 + continue;
46041 + idev = in_dev_get(dev);
46042 + if (idev == NULL) {
46043 + dev_put(dev);
46044 + continue;
46045 + }
46046 + rcu_read_lock();
46047 + for_ifa(idev) {
46048 + if (!strcmp(ip->iface, ifa->ifa_label)) {
46049 + our_addr = ifa->ifa_address;
46050 + our_netmask = 0xffffffff;
46051 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
46052 + if (ret == 1) {
46053 + rcu_read_unlock();
46054 + in_dev_put(idev);
46055 + dev_put(dev);
46056 + return 0;
46057 + } else if (ret == 2) {
46058 + rcu_read_unlock();
46059 + in_dev_put(idev);
46060 + dev_put(dev);
46061 + goto denied;
46062 + }
46063 + }
46064 + } endfor_ifa(idev);
46065 + rcu_read_unlock();
46066 + in_dev_put(idev);
46067 + dev_put(dev);
46068 + } else {
46069 + our_addr = ip->addr;
46070 + our_netmask = ip->netmask;
46071 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
46072 + if (ret == 1)
46073 + return 0;
46074 + else if (ret == 2)
46075 + goto denied;
46076 + }
46077 + }
46078 +
46079 +denied:
46080 + if (mode == GR_BIND)
46081 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
46082 + else if (mode == GR_CONNECT)
46083 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
46084 +
46085 + return -EACCES;
46086 +}
46087 +
46088 +int
46089 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
46090 +{
46091 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
46092 +}
46093 +
46094 +int
46095 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
46096 +{
46097 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
46098 +}
46099 +
46100 +int gr_search_listen(struct socket *sock)
46101 +{
46102 + struct sock *sk = sock->sk;
46103 + struct sockaddr_in addr;
46104 +
46105 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
46106 + addr.sin_port = inet_sk(sk)->inet_sport;
46107 +
46108 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
46109 +}
46110 +
46111 +int gr_search_accept(struct socket *sock)
46112 +{
46113 + struct sock *sk = sock->sk;
46114 + struct sockaddr_in addr;
46115 +
46116 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
46117 + addr.sin_port = inet_sk(sk)->inet_sport;
46118 +
46119 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
46120 +}
46121 +
46122 +int
46123 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
46124 +{
46125 + if (addr)
46126 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
46127 + else {
46128 + struct sockaddr_in sin;
46129 + const struct inet_sock *inet = inet_sk(sk);
46130 +
46131 + sin.sin_addr.s_addr = inet->inet_daddr;
46132 + sin.sin_port = inet->inet_dport;
46133 +
46134 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
46135 + }
46136 +}
46137 +
46138 +int
46139 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
46140 +{
46141 + struct sockaddr_in sin;
46142 +
46143 + if (unlikely(skb->len < sizeof (struct udphdr)))
46144 + return 0; // skip this packet
46145 +
46146 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
46147 + sin.sin_port = udp_hdr(skb)->source;
46148 +
46149 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
46150 +}
46151 diff -urNp linux-3.0.3/grsecurity/gracl_learn.c linux-3.0.3/grsecurity/gracl_learn.c
46152 --- linux-3.0.3/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
46153 +++ linux-3.0.3/grsecurity/gracl_learn.c 2011-08-23 21:48:14.000000000 -0400
46154 @@ -0,0 +1,207 @@
46155 +#include <linux/kernel.h>
46156 +#include <linux/mm.h>
46157 +#include <linux/sched.h>
46158 +#include <linux/poll.h>
46159 +#include <linux/string.h>
46160 +#include <linux/file.h>
46161 +#include <linux/types.h>
46162 +#include <linux/vmalloc.h>
46163 +#include <linux/grinternal.h>
46164 +
46165 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
46166 + size_t count, loff_t *ppos);
46167 +extern int gr_acl_is_enabled(void);
46168 +
46169 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
46170 +static int gr_learn_attached;
46171 +
46172 +/* use a 512k buffer */
46173 +#define LEARN_BUFFER_SIZE (512 * 1024)
46174 +
46175 +static DEFINE_SPINLOCK(gr_learn_lock);
46176 +static DEFINE_MUTEX(gr_learn_user_mutex);
46177 +
46178 +/* we need to maintain two buffers, so that the kernel context of grlearn
46179 + uses a semaphore around the userspace copying, and the other kernel contexts
46180 + use a spinlock when copying into the buffer, since they cannot sleep
46181 +*/
46182 +static char *learn_buffer;
46183 +static char *learn_buffer_user;
46184 +static int learn_buffer_len;
46185 +static int learn_buffer_user_len;
46186 +
46187 +static ssize_t
46188 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
46189 +{
46190 + DECLARE_WAITQUEUE(wait, current);
46191 + ssize_t retval = 0;
46192 +
46193 + add_wait_queue(&learn_wait, &wait);
46194 + set_current_state(TASK_INTERRUPTIBLE);
46195 + do {
46196 + mutex_lock(&gr_learn_user_mutex);
46197 + spin_lock(&gr_learn_lock);
46198 + if (learn_buffer_len)
46199 + break;
46200 + spin_unlock(&gr_learn_lock);
46201 + mutex_unlock(&gr_learn_user_mutex);
46202 + if (file->f_flags & O_NONBLOCK) {
46203 + retval = -EAGAIN;
46204 + goto out;
46205 + }
46206 + if (signal_pending(current)) {
46207 + retval = -ERESTARTSYS;
46208 + goto out;
46209 + }
46210 +
46211 + schedule();
46212 + } while (1);
46213 +
46214 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
46215 + learn_buffer_user_len = learn_buffer_len;
46216 + retval = learn_buffer_len;
46217 + learn_buffer_len = 0;
46218 +
46219 + spin_unlock(&gr_learn_lock);
46220 +
46221 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
46222 + retval = -EFAULT;
46223 +
46224 + mutex_unlock(&gr_learn_user_mutex);
46225 +out:
46226 + set_current_state(TASK_RUNNING);
46227 + remove_wait_queue(&learn_wait, &wait);
46228 + return retval;
46229 +}
46230 +
46231 +static unsigned int
46232 +poll_learn(struct file * file, poll_table * wait)
46233 +{
46234 + poll_wait(file, &learn_wait, wait);
46235 +
46236 + if (learn_buffer_len)
46237 + return (POLLIN | POLLRDNORM);
46238 +
46239 + return 0;
46240 +}
46241 +
46242 +void
46243 +gr_clear_learn_entries(void)
46244 +{
46245 + char *tmp;
46246 +
46247 + mutex_lock(&gr_learn_user_mutex);
46248 + spin_lock(&gr_learn_lock);
46249 + tmp = learn_buffer;
46250 + learn_buffer = NULL;
46251 + spin_unlock(&gr_learn_lock);
46252 + if (tmp)
46253 + vfree(tmp);
46254 + if (learn_buffer_user != NULL) {
46255 + vfree(learn_buffer_user);
46256 + learn_buffer_user = NULL;
46257 + }
46258 + learn_buffer_len = 0;
46259 + mutex_unlock(&gr_learn_user_mutex);
46260 +
46261 + return;
46262 +}
46263 +
46264 +void
46265 +gr_add_learn_entry(const char *fmt, ...)
46266 +{
46267 + va_list args;
46268 + unsigned int len;
46269 +
46270 + if (!gr_learn_attached)
46271 + return;
46272 +
46273 + spin_lock(&gr_learn_lock);
46274 +
46275 + /* leave a gap at the end so we know when it's "full" but don't have to
46276 + compute the exact length of the string we're trying to append
46277 + */
46278 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
46279 + spin_unlock(&gr_learn_lock);
46280 + wake_up_interruptible(&learn_wait);
46281 + return;
46282 + }
46283 + if (learn_buffer == NULL) {
46284 + spin_unlock(&gr_learn_lock);
46285 + return;
46286 + }
46287 +
46288 + va_start(args, fmt);
46289 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
46290 + va_end(args);
46291 +
46292 + learn_buffer_len += len + 1;
46293 +
46294 + spin_unlock(&gr_learn_lock);
46295 + wake_up_interruptible(&learn_wait);
46296 +
46297 + return;
46298 +}
46299 +
46300 +static int
46301 +open_learn(struct inode *inode, struct file *file)
46302 +{
46303 + if (file->f_mode & FMODE_READ && gr_learn_attached)
46304 + return -EBUSY;
46305 + if (file->f_mode & FMODE_READ) {
46306 + int retval = 0;
46307 + mutex_lock(&gr_learn_user_mutex);
46308 + if (learn_buffer == NULL)
46309 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
46310 + if (learn_buffer_user == NULL)
46311 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
46312 + if (learn_buffer == NULL) {
46313 + retval = -ENOMEM;
46314 + goto out_error;
46315 + }
46316 + if (learn_buffer_user == NULL) {
46317 + retval = -ENOMEM;
46318 + goto out_error;
46319 + }
46320 + learn_buffer_len = 0;
46321 + learn_buffer_user_len = 0;
46322 + gr_learn_attached = 1;
46323 +out_error:
46324 + mutex_unlock(&gr_learn_user_mutex);
46325 + return retval;
46326 + }
46327 + return 0;
46328 +}
46329 +
46330 +static int
46331 +close_learn(struct inode *inode, struct file *file)
46332 +{
46333 + if (file->f_mode & FMODE_READ) {
46334 + char *tmp = NULL;
46335 + mutex_lock(&gr_learn_user_mutex);
46336 + spin_lock(&gr_learn_lock);
46337 + tmp = learn_buffer;
46338 + learn_buffer = NULL;
46339 + spin_unlock(&gr_learn_lock);
46340 + if (tmp)
46341 + vfree(tmp);
46342 + if (learn_buffer_user != NULL) {
46343 + vfree(learn_buffer_user);
46344 + learn_buffer_user = NULL;
46345 + }
46346 + learn_buffer_len = 0;
46347 + learn_buffer_user_len = 0;
46348 + gr_learn_attached = 0;
46349 + mutex_unlock(&gr_learn_user_mutex);
46350 + }
46351 +
46352 + return 0;
46353 +}
46354 +
46355 +const struct file_operations grsec_fops = {
46356 + .read = read_learn,
46357 + .write = write_grsec_handler,
46358 + .open = open_learn,
46359 + .release = close_learn,
46360 + .poll = poll_learn,
46361 +};
46362 diff -urNp linux-3.0.3/grsecurity/gracl_res.c linux-3.0.3/grsecurity/gracl_res.c
46363 --- linux-3.0.3/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
46364 +++ linux-3.0.3/grsecurity/gracl_res.c 2011-08-23 21:48:14.000000000 -0400
46365 @@ -0,0 +1,68 @@
46366 +#include <linux/kernel.h>
46367 +#include <linux/sched.h>
46368 +#include <linux/gracl.h>
46369 +#include <linux/grinternal.h>
46370 +
46371 +static const char *restab_log[] = {
46372 + [RLIMIT_CPU] = "RLIMIT_CPU",
46373 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
46374 + [RLIMIT_DATA] = "RLIMIT_DATA",
46375 + [RLIMIT_STACK] = "RLIMIT_STACK",
46376 + [RLIMIT_CORE] = "RLIMIT_CORE",
46377 + [RLIMIT_RSS] = "RLIMIT_RSS",
46378 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
46379 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
46380 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
46381 + [RLIMIT_AS] = "RLIMIT_AS",
46382 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
46383 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
46384 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
46385 + [RLIMIT_NICE] = "RLIMIT_NICE",
46386 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
46387 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
46388 + [GR_CRASH_RES] = "RLIMIT_CRASH"
46389 +};
46390 +
46391 +void
46392 +gr_log_resource(const struct task_struct *task,
46393 + const int res, const unsigned long wanted, const int gt)
46394 +{
46395 + const struct cred *cred;
46396 + unsigned long rlim;
46397 +
46398 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
46399 + return;
46400 +
46401 + // not yet supported resource
46402 + if (unlikely(!restab_log[res]))
46403 + return;
46404 +
46405 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
46406 + rlim = task_rlimit_max(task, res);
46407 + else
46408 + rlim = task_rlimit(task, res);
46409 +
46410 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
46411 + return;
46412 +
46413 + rcu_read_lock();
46414 + cred = __task_cred(task);
46415 +
46416 + if (res == RLIMIT_NPROC &&
46417 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
46418 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
46419 + goto out_rcu_unlock;
46420 + else if (res == RLIMIT_MEMLOCK &&
46421 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
46422 + goto out_rcu_unlock;
46423 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
46424 + goto out_rcu_unlock;
46425 + rcu_read_unlock();
46426 +
46427 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
46428 +
46429 + return;
46430 +out_rcu_unlock:
46431 + rcu_read_unlock();
46432 + return;
46433 +}
46434 diff -urNp linux-3.0.3/grsecurity/gracl_segv.c linux-3.0.3/grsecurity/gracl_segv.c
46435 --- linux-3.0.3/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
46436 +++ linux-3.0.3/grsecurity/gracl_segv.c 2011-08-23 21:48:14.000000000 -0400
46437 @@ -0,0 +1,299 @@
46438 +#include <linux/kernel.h>
46439 +#include <linux/mm.h>
46440 +#include <asm/uaccess.h>
46441 +#include <asm/errno.h>
46442 +#include <asm/mman.h>
46443 +#include <net/sock.h>
46444 +#include <linux/file.h>
46445 +#include <linux/fs.h>
46446 +#include <linux/net.h>
46447 +#include <linux/in.h>
46448 +#include <linux/slab.h>
46449 +#include <linux/types.h>
46450 +#include <linux/sched.h>
46451 +#include <linux/timer.h>
46452 +#include <linux/gracl.h>
46453 +#include <linux/grsecurity.h>
46454 +#include <linux/grinternal.h>
46455 +
46456 +static struct crash_uid *uid_set;
46457 +static unsigned short uid_used;
46458 +static DEFINE_SPINLOCK(gr_uid_lock);
46459 +extern rwlock_t gr_inode_lock;
46460 +extern struct acl_subject_label *
46461 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
46462 + struct acl_role_label *role);
46463 +
46464 +#ifdef CONFIG_BTRFS_FS
46465 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
46466 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
46467 +#endif
46468 +
46469 +static inline dev_t __get_dev(const struct dentry *dentry)
46470 +{
46471 +#ifdef CONFIG_BTRFS_FS
46472 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
46473 + return get_btrfs_dev_from_inode(dentry->d_inode);
46474 + else
46475 +#endif
46476 + return dentry->d_inode->i_sb->s_dev;
46477 +}
46478 +
46479 +int
46480 +gr_init_uidset(void)
46481 +{
46482 + uid_set =
46483 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
46484 + uid_used = 0;
46485 +
46486 + return uid_set ? 1 : 0;
46487 +}
46488 +
46489 +void
46490 +gr_free_uidset(void)
46491 +{
46492 + if (uid_set)
46493 + kfree(uid_set);
46494 +
46495 + return;
46496 +}
46497 +
46498 +int
46499 +gr_find_uid(const uid_t uid)
46500 +{
46501 + struct crash_uid *tmp = uid_set;
46502 + uid_t buid;
46503 + int low = 0, high = uid_used - 1, mid;
46504 +
46505 + while (high >= low) {
46506 + mid = (low + high) >> 1;
46507 + buid = tmp[mid].uid;
46508 + if (buid == uid)
46509 + return mid;
46510 + if (buid > uid)
46511 + high = mid - 1;
46512 + if (buid < uid)
46513 + low = mid + 1;
46514 + }
46515 +
46516 + return -1;
46517 +}
46518 +
46519 +static __inline__ void
46520 +gr_insertsort(void)
46521 +{
46522 + unsigned short i, j;
46523 + struct crash_uid index;
46524 +
46525 + for (i = 1; i < uid_used; i++) {
46526 + index = uid_set[i];
46527 + j = i;
46528 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
46529 + uid_set[j] = uid_set[j - 1];
46530 + j--;
46531 + }
46532 + uid_set[j] = index;
46533 + }
46534 +
46535 + return;
46536 +}
46537 +
46538 +static __inline__ void
46539 +gr_insert_uid(const uid_t uid, const unsigned long expires)
46540 +{
46541 + int loc;
46542 +
46543 + if (uid_used == GR_UIDTABLE_MAX)
46544 + return;
46545 +
46546 + loc = gr_find_uid(uid);
46547 +
46548 + if (loc >= 0) {
46549 + uid_set[loc].expires = expires;
46550 + return;
46551 + }
46552 +
46553 + uid_set[uid_used].uid = uid;
46554 + uid_set[uid_used].expires = expires;
46555 + uid_used++;
46556 +
46557 + gr_insertsort();
46558 +
46559 + return;
46560 +}
46561 +
46562 +void
46563 +gr_remove_uid(const unsigned short loc)
46564 +{
46565 + unsigned short i;
46566 +
46567 + for (i = loc + 1; i < uid_used; i++)
46568 + uid_set[i - 1] = uid_set[i];
46569 +
46570 + uid_used--;
46571 +
46572 + return;
46573 +}
46574 +
46575 +int
46576 +gr_check_crash_uid(const uid_t uid)
46577 +{
46578 + int loc;
46579 + int ret = 0;
46580 +
46581 + if (unlikely(!gr_acl_is_enabled()))
46582 + return 0;
46583 +
46584 + spin_lock(&gr_uid_lock);
46585 + loc = gr_find_uid(uid);
46586 +
46587 + if (loc < 0)
46588 + goto out_unlock;
46589 +
46590 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
46591 + gr_remove_uid(loc);
46592 + else
46593 + ret = 1;
46594 +
46595 +out_unlock:
46596 + spin_unlock(&gr_uid_lock);
46597 + return ret;
46598 +}
46599 +
46600 +static __inline__ int
46601 +proc_is_setxid(const struct cred *cred)
46602 +{
46603 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
46604 + cred->uid != cred->fsuid)
46605 + return 1;
46606 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
46607 + cred->gid != cred->fsgid)
46608 + return 1;
46609 +
46610 + return 0;
46611 +}
46612 +
46613 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
46614 +
46615 +void
46616 +gr_handle_crash(struct task_struct *task, const int sig)
46617 +{
46618 + struct acl_subject_label *curr;
46619 + struct acl_subject_label *curr2;
46620 + struct task_struct *tsk, *tsk2;
46621 + const struct cred *cred;
46622 + const struct cred *cred2;
46623 +
46624 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
46625 + return;
46626 +
46627 + if (unlikely(!gr_acl_is_enabled()))
46628 + return;
46629 +
46630 + curr = task->acl;
46631 +
46632 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
46633 + return;
46634 +
46635 + if (time_before_eq(curr->expires, get_seconds())) {
46636 + curr->expires = 0;
46637 + curr->crashes = 0;
46638 + }
46639 +
46640 + curr->crashes++;
46641 +
46642 + if (!curr->expires)
46643 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
46644 +
46645 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
46646 + time_after(curr->expires, get_seconds())) {
46647 + rcu_read_lock();
46648 + cred = __task_cred(task);
46649 + if (cred->uid && proc_is_setxid(cred)) {
46650 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
46651 + spin_lock(&gr_uid_lock);
46652 + gr_insert_uid(cred->uid, curr->expires);
46653 + spin_unlock(&gr_uid_lock);
46654 + curr->expires = 0;
46655 + curr->crashes = 0;
46656 + read_lock(&tasklist_lock);
46657 + do_each_thread(tsk2, tsk) {
46658 + cred2 = __task_cred(tsk);
46659 + if (tsk != task && cred2->uid == cred->uid)
46660 + gr_fake_force_sig(SIGKILL, tsk);
46661 + } while_each_thread(tsk2, tsk);
46662 + read_unlock(&tasklist_lock);
46663 + } else {
46664 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
46665 + read_lock(&tasklist_lock);
46666 + do_each_thread(tsk2, tsk) {
46667 + if (likely(tsk != task)) {
46668 + curr2 = tsk->acl;
46669 +
46670 + if (curr2->device == curr->device &&
46671 + curr2->inode == curr->inode)
46672 + gr_fake_force_sig(SIGKILL, tsk);
46673 + }
46674 + } while_each_thread(tsk2, tsk);
46675 + read_unlock(&tasklist_lock);
46676 + }
46677 + rcu_read_unlock();
46678 + }
46679 +
46680 + return;
46681 +}
46682 +
46683 +int
46684 +gr_check_crash_exec(const struct file *filp)
46685 +{
46686 + struct acl_subject_label *curr;
46687 +
46688 + if (unlikely(!gr_acl_is_enabled()))
46689 + return 0;
46690 +
46691 + read_lock(&gr_inode_lock);
46692 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
46693 + __get_dev(filp->f_path.dentry),
46694 + current->role);
46695 + read_unlock(&gr_inode_lock);
46696 +
46697 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
46698 + (!curr->crashes && !curr->expires))
46699 + return 0;
46700 +
46701 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
46702 + time_after(curr->expires, get_seconds()))
46703 + return 1;
46704 + else if (time_before_eq(curr->expires, get_seconds())) {
46705 + curr->crashes = 0;
46706 + curr->expires = 0;
46707 + }
46708 +
46709 + return 0;
46710 +}
46711 +
46712 +void
46713 +gr_handle_alertkill(struct task_struct *task)
46714 +{
46715 + struct acl_subject_label *curracl;
46716 + __u32 curr_ip;
46717 + struct task_struct *p, *p2;
46718 +
46719 + if (unlikely(!gr_acl_is_enabled()))
46720 + return;
46721 +
46722 + curracl = task->acl;
46723 + curr_ip = task->signal->curr_ip;
46724 +
46725 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
46726 + read_lock(&tasklist_lock);
46727 + do_each_thread(p2, p) {
46728 + if (p->signal->curr_ip == curr_ip)
46729 + gr_fake_force_sig(SIGKILL, p);
46730 + } while_each_thread(p2, p);
46731 + read_unlock(&tasklist_lock);
46732 + } else if (curracl->mode & GR_KILLPROC)
46733 + gr_fake_force_sig(SIGKILL, task);
46734 +
46735 + return;
46736 +}
46737 diff -urNp linux-3.0.3/grsecurity/gracl_shm.c linux-3.0.3/grsecurity/gracl_shm.c
46738 --- linux-3.0.3/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
46739 +++ linux-3.0.3/grsecurity/gracl_shm.c 2011-08-23 21:48:14.000000000 -0400
46740 @@ -0,0 +1,40 @@
46741 +#include <linux/kernel.h>
46742 +#include <linux/mm.h>
46743 +#include <linux/sched.h>
46744 +#include <linux/file.h>
46745 +#include <linux/ipc.h>
46746 +#include <linux/gracl.h>
46747 +#include <linux/grsecurity.h>
46748 +#include <linux/grinternal.h>
46749 +
46750 +int
46751 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
46752 + const time_t shm_createtime, const uid_t cuid, const int shmid)
46753 +{
46754 + struct task_struct *task;
46755 +
46756 + if (!gr_acl_is_enabled())
46757 + return 1;
46758 +
46759 + rcu_read_lock();
46760 + read_lock(&tasklist_lock);
46761 +
46762 + task = find_task_by_vpid(shm_cprid);
46763 +
46764 + if (unlikely(!task))
46765 + task = find_task_by_vpid(shm_lapid);
46766 +
46767 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
46768 + (task->pid == shm_lapid)) &&
46769 + (task->acl->mode & GR_PROTSHM) &&
46770 + (task->acl != current->acl))) {
46771 + read_unlock(&tasklist_lock);
46772 + rcu_read_unlock();
46773 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
46774 + return 0;
46775 + }
46776 + read_unlock(&tasklist_lock);
46777 + rcu_read_unlock();
46778 +
46779 + return 1;
46780 +}
46781 diff -urNp linux-3.0.3/grsecurity/grsec_chdir.c linux-3.0.3/grsecurity/grsec_chdir.c
46782 --- linux-3.0.3/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
46783 +++ linux-3.0.3/grsecurity/grsec_chdir.c 2011-08-23 21:48:14.000000000 -0400
46784 @@ -0,0 +1,19 @@
46785 +#include <linux/kernel.h>
46786 +#include <linux/sched.h>
46787 +#include <linux/fs.h>
46788 +#include <linux/file.h>
46789 +#include <linux/grsecurity.h>
46790 +#include <linux/grinternal.h>
46791 +
46792 +void
46793 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
46794 +{
46795 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
46796 + if ((grsec_enable_chdir && grsec_enable_group &&
46797 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
46798 + !grsec_enable_group)) {
46799 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
46800 + }
46801 +#endif
46802 + return;
46803 +}
46804 diff -urNp linux-3.0.3/grsecurity/grsec_chroot.c linux-3.0.3/grsecurity/grsec_chroot.c
46805 --- linux-3.0.3/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
46806 +++ linux-3.0.3/grsecurity/grsec_chroot.c 2011-08-23 21:48:14.000000000 -0400
46807 @@ -0,0 +1,349 @@
46808 +#include <linux/kernel.h>
46809 +#include <linux/module.h>
46810 +#include <linux/sched.h>
46811 +#include <linux/file.h>
46812 +#include <linux/fs.h>
46813 +#include <linux/mount.h>
46814 +#include <linux/types.h>
46815 +#include <linux/pid_namespace.h>
46816 +#include <linux/grsecurity.h>
46817 +#include <linux/grinternal.h>
46818 +
46819 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
46820 +{
46821 +#ifdef CONFIG_GRKERNSEC
46822 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
46823 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
46824 + task->gr_is_chrooted = 1;
46825 + else
46826 + task->gr_is_chrooted = 0;
46827 +
46828 + task->gr_chroot_dentry = path->dentry;
46829 +#endif
46830 + return;
46831 +}
46832 +
46833 +void gr_clear_chroot_entries(struct task_struct *task)
46834 +{
46835 +#ifdef CONFIG_GRKERNSEC
46836 + task->gr_is_chrooted = 0;
46837 + task->gr_chroot_dentry = NULL;
46838 +#endif
46839 + return;
46840 +}
46841 +
46842 +int
46843 +gr_handle_chroot_unix(const pid_t pid)
46844 +{
46845 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
46846 + struct task_struct *p;
46847 +
46848 + if (unlikely(!grsec_enable_chroot_unix))
46849 + return 1;
46850 +
46851 + if (likely(!proc_is_chrooted(current)))
46852 + return 1;
46853 +
46854 + rcu_read_lock();
46855 + read_lock(&tasklist_lock);
46856 + p = find_task_by_vpid_unrestricted(pid);
46857 + if (unlikely(p && !have_same_root(current, p))) {
46858 + read_unlock(&tasklist_lock);
46859 + rcu_read_unlock();
46860 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
46861 + return 0;
46862 + }
46863 + read_unlock(&tasklist_lock);
46864 + rcu_read_unlock();
46865 +#endif
46866 + return 1;
46867 +}
46868 +
46869 +int
46870 +gr_handle_chroot_nice(void)
46871 +{
46872 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
46873 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
46874 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
46875 + return -EPERM;
46876 + }
46877 +#endif
46878 + return 0;
46879 +}
46880 +
46881 +int
46882 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
46883 +{
46884 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
46885 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
46886 + && proc_is_chrooted(current)) {
46887 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
46888 + return -EACCES;
46889 + }
46890 +#endif
46891 + return 0;
46892 +}
46893 +
46894 +int
46895 +gr_handle_chroot_rawio(const struct inode *inode)
46896 +{
46897 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
46898 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
46899 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
46900 + return 1;
46901 +#endif
46902 + return 0;
46903 +}
46904 +
46905 +int
46906 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
46907 +{
46908 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
46909 + struct task_struct *p;
46910 + int ret = 0;
46911 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
46912 + return ret;
46913 +
46914 + read_lock(&tasklist_lock);
46915 + do_each_pid_task(pid, type, p) {
46916 + if (!have_same_root(current, p)) {
46917 + ret = 1;
46918 + goto out;
46919 + }
46920 + } while_each_pid_task(pid, type, p);
46921 +out:
46922 + read_unlock(&tasklist_lock);
46923 + return ret;
46924 +#endif
46925 + return 0;
46926 +}
46927 +
46928 +int
46929 +gr_pid_is_chrooted(struct task_struct *p)
46930 +{
46931 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
46932 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
46933 + return 0;
46934 +
46935 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
46936 + !have_same_root(current, p)) {
46937 + return 1;
46938 + }
46939 +#endif
46940 + return 0;
46941 +}
46942 +
46943 +EXPORT_SYMBOL(gr_pid_is_chrooted);
46944 +
46945 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
46946 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
46947 +{
46948 + struct path path, currentroot;
46949 + int ret = 0;
46950 +
46951 + path.dentry = (struct dentry *)u_dentry;
46952 + path.mnt = (struct vfsmount *)u_mnt;
46953 + get_fs_root(current->fs, &currentroot);
46954 + if (path_is_under(&path, &currentroot))
46955 + ret = 1;
46956 + path_put(&currentroot);
46957 +
46958 + return ret;
46959 +}
46960 +#endif
46961 +
46962 +int
46963 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
46964 +{
46965 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
46966 + if (!grsec_enable_chroot_fchdir)
46967 + return 1;
46968 +
46969 + if (!proc_is_chrooted(current))
46970 + return 1;
46971 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
46972 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
46973 + return 0;
46974 + }
46975 +#endif
46976 + return 1;
46977 +}
46978 +
46979 +int
46980 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
46981 + const time_t shm_createtime)
46982 +{
46983 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
46984 + struct task_struct *p;
46985 + time_t starttime;
46986 +
46987 + if (unlikely(!grsec_enable_chroot_shmat))
46988 + return 1;
46989 +
46990 + if (likely(!proc_is_chrooted(current)))
46991 + return 1;
46992 +
46993 + rcu_read_lock();
46994 + read_lock(&tasklist_lock);
46995 +
46996 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
46997 + starttime = p->start_time.tv_sec;
46998 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
46999 + if (have_same_root(current, p)) {
47000 + goto allow;
47001 + } else {
47002 + read_unlock(&tasklist_lock);
47003 + rcu_read_unlock();
47004 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
47005 + return 0;
47006 + }
47007 + }
47008 + /* creator exited, pid reuse, fall through to next check */
47009 + }
47010 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
47011 + if (unlikely(!have_same_root(current, p))) {
47012 + read_unlock(&tasklist_lock);
47013 + rcu_read_unlock();
47014 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
47015 + return 0;
47016 + }
47017 + }
47018 +
47019 +allow:
47020 + read_unlock(&tasklist_lock);
47021 + rcu_read_unlock();
47022 +#endif
47023 + return 1;
47024 +}
47025 +
47026 +void
47027 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
47028 +{
47029 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
47030 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
47031 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
47032 +#endif
47033 + return;
47034 +}
47035 +
47036 +int
47037 +gr_handle_chroot_mknod(const struct dentry *dentry,
47038 + const struct vfsmount *mnt, const int mode)
47039 +{
47040 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
47041 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
47042 + proc_is_chrooted(current)) {
47043 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
47044 + return -EPERM;
47045 + }
47046 +#endif
47047 + return 0;
47048 +}
47049 +
47050 +int
47051 +gr_handle_chroot_mount(const struct dentry *dentry,
47052 + const struct vfsmount *mnt, const char *dev_name)
47053 +{
47054 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
47055 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
47056 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
47057 + return -EPERM;
47058 + }
47059 +#endif
47060 + return 0;
47061 +}
47062 +
47063 +int
47064 +gr_handle_chroot_pivot(void)
47065 +{
47066 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
47067 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
47068 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
47069 + return -EPERM;
47070 + }
47071 +#endif
47072 + return 0;
47073 +}
47074 +
47075 +int
47076 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
47077 +{
47078 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
47079 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
47080 + !gr_is_outside_chroot(dentry, mnt)) {
47081 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
47082 + return -EPERM;
47083 + }
47084 +#endif
47085 + return 0;
47086 +}
47087 +
47088 +int
47089 +gr_handle_chroot_caps(struct path *path)
47090 +{
47091 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47092 + if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
47093 + (init_task.fs->root.dentry != path->dentry) &&
47094 + (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
47095 +
47096 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
47097 + const struct cred *old = current_cred();
47098 + struct cred *new = prepare_creds();
47099 + if (new == NULL)
47100 + return 1;
47101 +
47102 + new->cap_permitted = cap_drop(old->cap_permitted,
47103 + chroot_caps);
47104 + new->cap_inheritable = cap_drop(old->cap_inheritable,
47105 + chroot_caps);
47106 + new->cap_effective = cap_drop(old->cap_effective,
47107 + chroot_caps);
47108 +
47109 + commit_creds(new);
47110 +
47111 + return 0;
47112 + }
47113 +#endif
47114 + return 0;
47115 +}
47116 +
47117 +int
47118 +gr_handle_chroot_sysctl(const int op)
47119 +{
47120 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
47121 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
47122 + proc_is_chrooted(current))
47123 + return -EACCES;
47124 +#endif
47125 + return 0;
47126 +}
47127 +
47128 +void
47129 +gr_handle_chroot_chdir(struct path *path)
47130 +{
47131 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
47132 + if (grsec_enable_chroot_chdir)
47133 + set_fs_pwd(current->fs, path);
47134 +#endif
47135 + return;
47136 +}
47137 +
47138 +int
47139 +gr_handle_chroot_chmod(const struct dentry *dentry,
47140 + const struct vfsmount *mnt, const int mode)
47141 +{
47142 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
47143 + /* allow chmod +s on directories, but not files */
47144 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
47145 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
47146 + proc_is_chrooted(current)) {
47147 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
47148 + return -EPERM;
47149 + }
47150 +#endif
47151 + return 0;
47152 +}
47153 +
47154 +#ifdef CONFIG_SECURITY
47155 +EXPORT_SYMBOL(gr_handle_chroot_caps);
47156 +#endif
47157 diff -urNp linux-3.0.3/grsecurity/grsec_disabled.c linux-3.0.3/grsecurity/grsec_disabled.c
47158 --- linux-3.0.3/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
47159 +++ linux-3.0.3/grsecurity/grsec_disabled.c 2011-08-23 21:48:14.000000000 -0400
47160 @@ -0,0 +1,447 @@
47161 +#include <linux/kernel.h>
47162 +#include <linux/module.h>
47163 +#include <linux/sched.h>
47164 +#include <linux/file.h>
47165 +#include <linux/fs.h>
47166 +#include <linux/kdev_t.h>
47167 +#include <linux/net.h>
47168 +#include <linux/in.h>
47169 +#include <linux/ip.h>
47170 +#include <linux/skbuff.h>
47171 +#include <linux/sysctl.h>
47172 +
47173 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
47174 +void
47175 +pax_set_initial_flags(struct linux_binprm *bprm)
47176 +{
47177 + return;
47178 +}
47179 +#endif
47180 +
47181 +#ifdef CONFIG_SYSCTL
47182 +__u32
47183 +gr_handle_sysctl(const struct ctl_table * table, const int op)
47184 +{
47185 + return 0;
47186 +}
47187 +#endif
47188 +
47189 +#ifdef CONFIG_TASKSTATS
47190 +int gr_is_taskstats_denied(int pid)
47191 +{
47192 + return 0;
47193 +}
47194 +#endif
47195 +
47196 +int
47197 +gr_acl_is_enabled(void)
47198 +{
47199 + return 0;
47200 +}
47201 +
47202 +int
47203 +gr_handle_rawio(const struct inode *inode)
47204 +{
47205 + return 0;
47206 +}
47207 +
47208 +void
47209 +gr_acl_handle_psacct(struct task_struct *task, const long code)
47210 +{
47211 + return;
47212 +}
47213 +
47214 +int
47215 +gr_handle_ptrace(struct task_struct *task, const long request)
47216 +{
47217 + return 0;
47218 +}
47219 +
47220 +int
47221 +gr_handle_proc_ptrace(struct task_struct *task)
47222 +{
47223 + return 0;
47224 +}
47225 +
47226 +void
47227 +gr_learn_resource(const struct task_struct *task,
47228 + const int res, const unsigned long wanted, const int gt)
47229 +{
47230 + return;
47231 +}
47232 +
47233 +int
47234 +gr_set_acls(const int type)
47235 +{
47236 + return 0;
47237 +}
47238 +
47239 +int
47240 +gr_check_hidden_task(const struct task_struct *tsk)
47241 +{
47242 + return 0;
47243 +}
47244 +
47245 +int
47246 +gr_check_protected_task(const struct task_struct *task)
47247 +{
47248 + return 0;
47249 +}
47250 +
47251 +int
47252 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
47253 +{
47254 + return 0;
47255 +}
47256 +
47257 +void
47258 +gr_copy_label(struct task_struct *tsk)
47259 +{
47260 + return;
47261 +}
47262 +
47263 +void
47264 +gr_set_pax_flags(struct task_struct *task)
47265 +{
47266 + return;
47267 +}
47268 +
47269 +int
47270 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
47271 + const int unsafe_share)
47272 +{
47273 + return 0;
47274 +}
47275 +
47276 +void
47277 +gr_handle_delete(const ino_t ino, const dev_t dev)
47278 +{
47279 + return;
47280 +}
47281 +
47282 +void
47283 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
47284 +{
47285 + return;
47286 +}
47287 +
47288 +void
47289 +gr_handle_crash(struct task_struct *task, const int sig)
47290 +{
47291 + return;
47292 +}
47293 +
47294 +int
47295 +gr_check_crash_exec(const struct file *filp)
47296 +{
47297 + return 0;
47298 +}
47299 +
47300 +int
47301 +gr_check_crash_uid(const uid_t uid)
47302 +{
47303 + return 0;
47304 +}
47305 +
47306 +void
47307 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
47308 + struct dentry *old_dentry,
47309 + struct dentry *new_dentry,
47310 + struct vfsmount *mnt, const __u8 replace)
47311 +{
47312 + return;
47313 +}
47314 +
47315 +int
47316 +gr_search_socket(const int family, const int type, const int protocol)
47317 +{
47318 + return 1;
47319 +}
47320 +
47321 +int
47322 +gr_search_connectbind(const int mode, const struct socket *sock,
47323 + const struct sockaddr_in *addr)
47324 +{
47325 + return 0;
47326 +}
47327 +
47328 +int
47329 +gr_is_capable(const int cap)
47330 +{
47331 + return 1;
47332 +}
47333 +
47334 +int
47335 +gr_is_capable_nolog(const int cap)
47336 +{
47337 + return 1;
47338 +}
47339 +
47340 +void
47341 +gr_handle_alertkill(struct task_struct *task)
47342 +{
47343 + return;
47344 +}
47345 +
47346 +__u32
47347 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
47348 +{
47349 + return 1;
47350 +}
47351 +
47352 +__u32
47353 +gr_acl_handle_hidden_file(const struct dentry * dentry,
47354 + const struct vfsmount * mnt)
47355 +{
47356 + return 1;
47357 +}
47358 +
47359 +__u32
47360 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
47361 + const int fmode)
47362 +{
47363 + return 1;
47364 +}
47365 +
47366 +__u32
47367 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
47368 +{
47369 + return 1;
47370 +}
47371 +
47372 +__u32
47373 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
47374 +{
47375 + return 1;
47376 +}
47377 +
47378 +int
47379 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
47380 + unsigned int *vm_flags)
47381 +{
47382 + return 1;
47383 +}
47384 +
47385 +__u32
47386 +gr_acl_handle_truncate(const struct dentry * dentry,
47387 + const struct vfsmount * mnt)
47388 +{
47389 + return 1;
47390 +}
47391 +
47392 +__u32
47393 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
47394 +{
47395 + return 1;
47396 +}
47397 +
47398 +__u32
47399 +gr_acl_handle_access(const struct dentry * dentry,
47400 + const struct vfsmount * mnt, const int fmode)
47401 +{
47402 + return 1;
47403 +}
47404 +
47405 +__u32
47406 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
47407 + mode_t mode)
47408 +{
47409 + return 1;
47410 +}
47411 +
47412 +__u32
47413 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
47414 + mode_t mode)
47415 +{
47416 + return 1;
47417 +}
47418 +
47419 +__u32
47420 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
47421 +{
47422 + return 1;
47423 +}
47424 +
47425 +__u32
47426 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
47427 +{
47428 + return 1;
47429 +}
47430 +
47431 +void
47432 +grsecurity_init(void)
47433 +{
47434 + return;
47435 +}
47436 +
47437 +__u32
47438 +gr_acl_handle_mknod(const struct dentry * new_dentry,
47439 + const struct dentry * parent_dentry,
47440 + const struct vfsmount * parent_mnt,
47441 + const int mode)
47442 +{
47443 + return 1;
47444 +}
47445 +
47446 +__u32
47447 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
47448 + const struct dentry * parent_dentry,
47449 + const struct vfsmount * parent_mnt)
47450 +{
47451 + return 1;
47452 +}
47453 +
47454 +__u32
47455 +gr_acl_handle_symlink(const struct dentry * new_dentry,
47456 + const struct dentry * parent_dentry,
47457 + const struct vfsmount * parent_mnt, const char *from)
47458 +{
47459 + return 1;
47460 +}
47461 +
47462 +__u32
47463 +gr_acl_handle_link(const struct dentry * new_dentry,
47464 + const struct dentry * parent_dentry,
47465 + const struct vfsmount * parent_mnt,
47466 + const struct dentry * old_dentry,
47467 + const struct vfsmount * old_mnt, const char *to)
47468 +{
47469 + return 1;
47470 +}
47471 +
47472 +int
47473 +gr_acl_handle_rename(const struct dentry *new_dentry,
47474 + const struct dentry *parent_dentry,
47475 + const struct vfsmount *parent_mnt,
47476 + const struct dentry *old_dentry,
47477 + const struct inode *old_parent_inode,
47478 + const struct vfsmount *old_mnt, const char *newname)
47479 +{
47480 + return 0;
47481 +}
47482 +
47483 +int
47484 +gr_acl_handle_filldir(const struct file *file, const char *name,
47485 + const int namelen, const ino_t ino)
47486 +{
47487 + return 1;
47488 +}
47489 +
47490 +int
47491 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47492 + const time_t shm_createtime, const uid_t cuid, const int shmid)
47493 +{
47494 + return 1;
47495 +}
47496 +
47497 +int
47498 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
47499 +{
47500 + return 0;
47501 +}
47502 +
47503 +int
47504 +gr_search_accept(const struct socket *sock)
47505 +{
47506 + return 0;
47507 +}
47508 +
47509 +int
47510 +gr_search_listen(const struct socket *sock)
47511 +{
47512 + return 0;
47513 +}
47514 +
47515 +int
47516 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
47517 +{
47518 + return 0;
47519 +}
47520 +
47521 +__u32
47522 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
47523 +{
47524 + return 1;
47525 +}
47526 +
47527 +__u32
47528 +gr_acl_handle_creat(const struct dentry * dentry,
47529 + const struct dentry * p_dentry,
47530 + const struct vfsmount * p_mnt, const int fmode,
47531 + const int imode)
47532 +{
47533 + return 1;
47534 +}
47535 +
47536 +void
47537 +gr_acl_handle_exit(void)
47538 +{
47539 + return;
47540 +}
47541 +
47542 +int
47543 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
47544 +{
47545 + return 1;
47546 +}
47547 +
47548 +void
47549 +gr_set_role_label(const uid_t uid, const gid_t gid)
47550 +{
47551 + return;
47552 +}
47553 +
47554 +int
47555 +gr_acl_handle_procpidmem(const struct task_struct *task)
47556 +{
47557 + return 0;
47558 +}
47559 +
47560 +int
47561 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
47562 +{
47563 + return 0;
47564 +}
47565 +
47566 +int
47567 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
47568 +{
47569 + return 0;
47570 +}
47571 +
47572 +void
47573 +gr_set_kernel_label(struct task_struct *task)
47574 +{
47575 + return;
47576 +}
47577 +
47578 +int
47579 +gr_check_user_change(int real, int effective, int fs)
47580 +{
47581 + return 0;
47582 +}
47583 +
47584 +int
47585 +gr_check_group_change(int real, int effective, int fs)
47586 +{
47587 + return 0;
47588 +}
47589 +
47590 +int gr_acl_enable_at_secure(void)
47591 +{
47592 + return 0;
47593 +}
47594 +
47595 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
47596 +{
47597 + return dentry->d_inode->i_sb->s_dev;
47598 +}
47599 +
47600 +EXPORT_SYMBOL(gr_is_capable);
47601 +EXPORT_SYMBOL(gr_is_capable_nolog);
47602 +EXPORT_SYMBOL(gr_learn_resource);
47603 +EXPORT_SYMBOL(gr_set_kernel_label);
47604 +#ifdef CONFIG_SECURITY
47605 +EXPORT_SYMBOL(gr_check_user_change);
47606 +EXPORT_SYMBOL(gr_check_group_change);
47607 +#endif
47608 diff -urNp linux-3.0.3/grsecurity/grsec_exec.c linux-3.0.3/grsecurity/grsec_exec.c
47609 --- linux-3.0.3/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
47610 +++ linux-3.0.3/grsecurity/grsec_exec.c 2011-08-23 21:48:14.000000000 -0400
47611 @@ -0,0 +1,87 @@
47612 +#include <linux/kernel.h>
47613 +#include <linux/sched.h>
47614 +#include <linux/file.h>
47615 +#include <linux/binfmts.h>
47616 +#include <linux/fs.h>
47617 +#include <linux/types.h>
47618 +#include <linux/grdefs.h>
47619 +#include <linux/grsecurity.h>
47620 +#include <linux/grinternal.h>
47621 +#include <linux/capability.h>
47622 +
47623 +#include <asm/uaccess.h>
47624 +
47625 +#ifdef CONFIG_GRKERNSEC_EXECLOG
47626 +static char gr_exec_arg_buf[132];
47627 +static DEFINE_MUTEX(gr_exec_arg_mutex);
47628 +#endif
47629 +
47630 +int
47631 +gr_handle_nproc(void)
47632 +{
47633 +#ifdef CONFIG_GRKERNSEC_EXECVE
47634 + const struct cred *cred = current_cred();
47635 + if (grsec_enable_execve && cred->user &&
47636 + (atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) &&
47637 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) {
47638 + gr_log_noargs(GR_DONT_AUDIT, GR_NPROC_MSG);
47639 + return -EAGAIN;
47640 + }
47641 +#endif
47642 + return 0;
47643 +}
47644 +
47645 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
47646 +
47647 +void
47648 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
47649 +{
47650 +#ifdef CONFIG_GRKERNSEC_EXECLOG
47651 + char *grarg = gr_exec_arg_buf;
47652 + unsigned int i, x, execlen = 0;
47653 + char c;
47654 +
47655 + if (!((grsec_enable_execlog && grsec_enable_group &&
47656 + in_group_p(grsec_audit_gid))
47657 + || (grsec_enable_execlog && !grsec_enable_group)))
47658 + return;
47659 +
47660 + mutex_lock(&gr_exec_arg_mutex);
47661 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
47662 +
47663 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
47664 + const char __user *p;
47665 + unsigned int len;
47666 +
47667 + p = get_user_arg_ptr(argv, i);
47668 + if (IS_ERR(p))
47669 + goto log;
47670 +
47671 + len = strnlen_user(p, 128 - execlen);
47672 + if (len > 128 - execlen)
47673 + len = 128 - execlen;
47674 + else if (len > 0)
47675 + len--;
47676 + if (copy_from_user(grarg + execlen, p, len))
47677 + goto log;
47678 +
47679 + /* rewrite unprintable characters */
47680 + for (x = 0; x < len; x++) {
47681 + c = *(grarg + execlen + x);
47682 + if (c < 32 || c > 126)
47683 + *(grarg + execlen + x) = ' ';
47684 + }
47685 +
47686 + execlen += len;
47687 + *(grarg + execlen) = ' ';
47688 + *(grarg + execlen + 1) = '\0';
47689 + execlen++;
47690 + }
47691 +
47692 + log:
47693 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
47694 + bprm->file->f_path.mnt, grarg);
47695 + mutex_unlock(&gr_exec_arg_mutex);
47696 +#endif
47697 + return;
47698 +}
47699 diff -urNp linux-3.0.3/grsecurity/grsec_fifo.c linux-3.0.3/grsecurity/grsec_fifo.c
47700 --- linux-3.0.3/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
47701 +++ linux-3.0.3/grsecurity/grsec_fifo.c 2011-08-23 21:48:14.000000000 -0400
47702 @@ -0,0 +1,24 @@
47703 +#include <linux/kernel.h>
47704 +#include <linux/sched.h>
47705 +#include <linux/fs.h>
47706 +#include <linux/file.h>
47707 +#include <linux/grinternal.h>
47708 +
47709 +int
47710 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
47711 + const struct dentry *dir, const int flag, const int acc_mode)
47712 +{
47713 +#ifdef CONFIG_GRKERNSEC_FIFO
47714 + const struct cred *cred = current_cred();
47715 +
47716 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
47717 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
47718 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
47719 + (cred->fsuid != dentry->d_inode->i_uid)) {
47720 + if (!inode_permission(dentry->d_inode, acc_mode))
47721 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
47722 + return -EACCES;
47723 + }
47724 +#endif
47725 + return 0;
47726 +}
47727 diff -urNp linux-3.0.3/grsecurity/grsec_fork.c linux-3.0.3/grsecurity/grsec_fork.c
47728 --- linux-3.0.3/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
47729 +++ linux-3.0.3/grsecurity/grsec_fork.c 2011-08-23 21:48:14.000000000 -0400
47730 @@ -0,0 +1,23 @@
47731 +#include <linux/kernel.h>
47732 +#include <linux/sched.h>
47733 +#include <linux/grsecurity.h>
47734 +#include <linux/grinternal.h>
47735 +#include <linux/errno.h>
47736 +
47737 +void
47738 +gr_log_forkfail(const int retval)
47739 +{
47740 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
47741 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
47742 + switch (retval) {
47743 + case -EAGAIN:
47744 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
47745 + break;
47746 + case -ENOMEM:
47747 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
47748 + break;
47749 + }
47750 + }
47751 +#endif
47752 + return;
47753 +}
47754 diff -urNp linux-3.0.3/grsecurity/grsec_init.c linux-3.0.3/grsecurity/grsec_init.c
47755 --- linux-3.0.3/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
47756 +++ linux-3.0.3/grsecurity/grsec_init.c 2011-08-23 21:48:14.000000000 -0400
47757 @@ -0,0 +1,273 @@
47758 +#include <linux/kernel.h>
47759 +#include <linux/sched.h>
47760 +#include <linux/mm.h>
47761 +#include <linux/gracl.h>
47762 +#include <linux/slab.h>
47763 +#include <linux/vmalloc.h>
47764 +#include <linux/percpu.h>
47765 +#include <linux/module.h>
47766 +
47767 +int grsec_enable_brute;
47768 +int grsec_enable_link;
47769 +int grsec_enable_dmesg;
47770 +int grsec_enable_harden_ptrace;
47771 +int grsec_enable_fifo;
47772 +int grsec_enable_execve;
47773 +int grsec_enable_execlog;
47774 +int grsec_enable_signal;
47775 +int grsec_enable_forkfail;
47776 +int grsec_enable_audit_ptrace;
47777 +int grsec_enable_time;
47778 +int grsec_enable_audit_textrel;
47779 +int grsec_enable_group;
47780 +int grsec_audit_gid;
47781 +int grsec_enable_chdir;
47782 +int grsec_enable_mount;
47783 +int grsec_enable_rofs;
47784 +int grsec_enable_chroot_findtask;
47785 +int grsec_enable_chroot_mount;
47786 +int grsec_enable_chroot_shmat;
47787 +int grsec_enable_chroot_fchdir;
47788 +int grsec_enable_chroot_double;
47789 +int grsec_enable_chroot_pivot;
47790 +int grsec_enable_chroot_chdir;
47791 +int grsec_enable_chroot_chmod;
47792 +int grsec_enable_chroot_mknod;
47793 +int grsec_enable_chroot_nice;
47794 +int grsec_enable_chroot_execlog;
47795 +int grsec_enable_chroot_caps;
47796 +int grsec_enable_chroot_sysctl;
47797 +int grsec_enable_chroot_unix;
47798 +int grsec_enable_tpe;
47799 +int grsec_tpe_gid;
47800 +int grsec_enable_blackhole;
47801 +#ifdef CONFIG_IPV6_MODULE
47802 +EXPORT_SYMBOL(grsec_enable_blackhole);
47803 +#endif
47804 +int grsec_lastack_retries;
47805 +int grsec_enable_tpe_all;
47806 +int grsec_enable_tpe_invert;
47807 +int grsec_enable_socket_all;
47808 +int grsec_socket_all_gid;
47809 +int grsec_enable_socket_client;
47810 +int grsec_socket_client_gid;
47811 +int grsec_enable_socket_server;
47812 +int grsec_socket_server_gid;
47813 +int grsec_resource_logging;
47814 +int grsec_disable_privio;
47815 +int grsec_enable_log_rwxmaps;
47816 +int grsec_lock;
47817 +
47818 +DEFINE_SPINLOCK(grsec_alert_lock);
47819 +unsigned long grsec_alert_wtime = 0;
47820 +unsigned long grsec_alert_fyet = 0;
47821 +
47822 +DEFINE_SPINLOCK(grsec_audit_lock);
47823 +
47824 +DEFINE_RWLOCK(grsec_exec_file_lock);
47825 +
47826 +char *gr_shared_page[4];
47827 +
47828 +char *gr_alert_log_fmt;
47829 +char *gr_audit_log_fmt;
47830 +char *gr_alert_log_buf;
47831 +char *gr_audit_log_buf;
47832 +
47833 +extern struct gr_arg *gr_usermode;
47834 +extern unsigned char *gr_system_salt;
47835 +extern unsigned char *gr_system_sum;
47836 +
47837 +void __init
47838 +grsecurity_init(void)
47839 +{
47840 + int j;
47841 + /* create the per-cpu shared pages */
47842 +
47843 +#ifdef CONFIG_X86
47844 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
47845 +#endif
47846 +
47847 + for (j = 0; j < 4; j++) {
47848 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
47849 + if (gr_shared_page[j] == NULL) {
47850 + panic("Unable to allocate grsecurity shared page");
47851 + return;
47852 + }
47853 + }
47854 +
47855 + /* allocate log buffers */
47856 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
47857 + if (!gr_alert_log_fmt) {
47858 + panic("Unable to allocate grsecurity alert log format buffer");
47859 + return;
47860 + }
47861 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
47862 + if (!gr_audit_log_fmt) {
47863 + panic("Unable to allocate grsecurity audit log format buffer");
47864 + return;
47865 + }
47866 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
47867 + if (!gr_alert_log_buf) {
47868 + panic("Unable to allocate grsecurity alert log buffer");
47869 + return;
47870 + }
47871 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
47872 + if (!gr_audit_log_buf) {
47873 + panic("Unable to allocate grsecurity audit log buffer");
47874 + return;
47875 + }
47876 +
47877 + /* allocate memory for authentication structure */
47878 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
47879 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
47880 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
47881 +
47882 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
47883 + panic("Unable to allocate grsecurity authentication structure");
47884 + return;
47885 + }
47886 +
47887 +
47888 +#ifdef CONFIG_GRKERNSEC_IO
47889 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
47890 + grsec_disable_privio = 1;
47891 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
47892 + grsec_disable_privio = 1;
47893 +#else
47894 + grsec_disable_privio = 0;
47895 +#endif
47896 +#endif
47897 +
47898 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
47899 + /* for backward compatibility, tpe_invert always defaults to on if
47900 + enabled in the kernel
47901 + */
47902 + grsec_enable_tpe_invert = 1;
47903 +#endif
47904 +
47905 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
47906 +#ifndef CONFIG_GRKERNSEC_SYSCTL
47907 + grsec_lock = 1;
47908 +#endif
47909 +
47910 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
47911 + grsec_enable_audit_textrel = 1;
47912 +#endif
47913 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
47914 + grsec_enable_log_rwxmaps = 1;
47915 +#endif
47916 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
47917 + grsec_enable_group = 1;
47918 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
47919 +#endif
47920 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
47921 + grsec_enable_chdir = 1;
47922 +#endif
47923 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
47924 + grsec_enable_harden_ptrace = 1;
47925 +#endif
47926 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
47927 + grsec_enable_mount = 1;
47928 +#endif
47929 +#ifdef CONFIG_GRKERNSEC_LINK
47930 + grsec_enable_link = 1;
47931 +#endif
47932 +#ifdef CONFIG_GRKERNSEC_BRUTE
47933 + grsec_enable_brute = 1;
47934 +#endif
47935 +#ifdef CONFIG_GRKERNSEC_DMESG
47936 + grsec_enable_dmesg = 1;
47937 +#endif
47938 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
47939 + grsec_enable_blackhole = 1;
47940 + grsec_lastack_retries = 4;
47941 +#endif
47942 +#ifdef CONFIG_GRKERNSEC_FIFO
47943 + grsec_enable_fifo = 1;
47944 +#endif
47945 +#ifdef CONFIG_GRKERNSEC_EXECVE
47946 + grsec_enable_execve = 1;
47947 +#endif
47948 +#ifdef CONFIG_GRKERNSEC_EXECLOG
47949 + grsec_enable_execlog = 1;
47950 +#endif
47951 +#ifdef CONFIG_GRKERNSEC_SIGNAL
47952 + grsec_enable_signal = 1;
47953 +#endif
47954 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
47955 + grsec_enable_forkfail = 1;
47956 +#endif
47957 +#ifdef CONFIG_GRKERNSEC_TIME
47958 + grsec_enable_time = 1;
47959 +#endif
47960 +#ifdef CONFIG_GRKERNSEC_RESLOG
47961 + grsec_resource_logging = 1;
47962 +#endif
47963 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
47964 + grsec_enable_chroot_findtask = 1;
47965 +#endif
47966 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
47967 + grsec_enable_chroot_unix = 1;
47968 +#endif
47969 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
47970 + grsec_enable_chroot_mount = 1;
47971 +#endif
47972 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
47973 + grsec_enable_chroot_fchdir = 1;
47974 +#endif
47975 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
47976 + grsec_enable_chroot_shmat = 1;
47977 +#endif
47978 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
47979 + grsec_enable_audit_ptrace = 1;
47980 +#endif
47981 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
47982 + grsec_enable_chroot_double = 1;
47983 +#endif
47984 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
47985 + grsec_enable_chroot_pivot = 1;
47986 +#endif
47987 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
47988 + grsec_enable_chroot_chdir = 1;
47989 +#endif
47990 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
47991 + grsec_enable_chroot_chmod = 1;
47992 +#endif
47993 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
47994 + grsec_enable_chroot_mknod = 1;
47995 +#endif
47996 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
47997 + grsec_enable_chroot_nice = 1;
47998 +#endif
47999 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
48000 + grsec_enable_chroot_execlog = 1;
48001 +#endif
48002 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
48003 + grsec_enable_chroot_caps = 1;
48004 +#endif
48005 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
48006 + grsec_enable_chroot_sysctl = 1;
48007 +#endif
48008 +#ifdef CONFIG_GRKERNSEC_TPE
48009 + grsec_enable_tpe = 1;
48010 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
48011 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
48012 + grsec_enable_tpe_all = 1;
48013 +#endif
48014 +#endif
48015 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
48016 + grsec_enable_socket_all = 1;
48017 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
48018 +#endif
48019 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
48020 + grsec_enable_socket_client = 1;
48021 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
48022 +#endif
48023 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
48024 + grsec_enable_socket_server = 1;
48025 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
48026 +#endif
48027 +#endif
48028 +
48029 + return;
48030 +}
48031 diff -urNp linux-3.0.3/grsecurity/grsec_link.c linux-3.0.3/grsecurity/grsec_link.c
48032 --- linux-3.0.3/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
48033 +++ linux-3.0.3/grsecurity/grsec_link.c 2011-08-23 21:48:14.000000000 -0400
48034 @@ -0,0 +1,43 @@
48035 +#include <linux/kernel.h>
48036 +#include <linux/sched.h>
48037 +#include <linux/fs.h>
48038 +#include <linux/file.h>
48039 +#include <linux/grinternal.h>
48040 +
48041 +int
48042 +gr_handle_follow_link(const struct inode *parent,
48043 + const struct inode *inode,
48044 + const struct dentry *dentry, const struct vfsmount *mnt)
48045 +{
48046 +#ifdef CONFIG_GRKERNSEC_LINK
48047 + const struct cred *cred = current_cred();
48048 +
48049 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
48050 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
48051 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
48052 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
48053 + return -EACCES;
48054 + }
48055 +#endif
48056 + return 0;
48057 +}
48058 +
48059 +int
48060 +gr_handle_hardlink(const struct dentry *dentry,
48061 + const struct vfsmount *mnt,
48062 + struct inode *inode, const int mode, const char *to)
48063 +{
48064 +#ifdef CONFIG_GRKERNSEC_LINK
48065 + const struct cred *cred = current_cred();
48066 +
48067 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
48068 + (!S_ISREG(mode) || (mode & S_ISUID) ||
48069 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
48070 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
48071 + !capable(CAP_FOWNER) && cred->uid) {
48072 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
48073 + return -EPERM;
48074 + }
48075 +#endif
48076 + return 0;
48077 +}
48078 diff -urNp linux-3.0.3/grsecurity/grsec_log.c linux-3.0.3/grsecurity/grsec_log.c
48079 --- linux-3.0.3/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
48080 +++ linux-3.0.3/grsecurity/grsec_log.c 2011-08-23 21:48:14.000000000 -0400
48081 @@ -0,0 +1,310 @@
48082 +#include <linux/kernel.h>
48083 +#include <linux/sched.h>
48084 +#include <linux/file.h>
48085 +#include <linux/tty.h>
48086 +#include <linux/fs.h>
48087 +#include <linux/grinternal.h>
48088 +
48089 +#ifdef CONFIG_TREE_PREEMPT_RCU
48090 +#define DISABLE_PREEMPT() preempt_disable()
48091 +#define ENABLE_PREEMPT() preempt_enable()
48092 +#else
48093 +#define DISABLE_PREEMPT()
48094 +#define ENABLE_PREEMPT()
48095 +#endif
48096 +
48097 +#define BEGIN_LOCKS(x) \
48098 + DISABLE_PREEMPT(); \
48099 + rcu_read_lock(); \
48100 + read_lock(&tasklist_lock); \
48101 + read_lock(&grsec_exec_file_lock); \
48102 + if (x != GR_DO_AUDIT) \
48103 + spin_lock(&grsec_alert_lock); \
48104 + else \
48105 + spin_lock(&grsec_audit_lock)
48106 +
48107 +#define END_LOCKS(x) \
48108 + if (x != GR_DO_AUDIT) \
48109 + spin_unlock(&grsec_alert_lock); \
48110 + else \
48111 + spin_unlock(&grsec_audit_lock); \
48112 + read_unlock(&grsec_exec_file_lock); \
48113 + read_unlock(&tasklist_lock); \
48114 + rcu_read_unlock(); \
48115 + ENABLE_PREEMPT(); \
48116 + if (x == GR_DONT_AUDIT) \
48117 + gr_handle_alertkill(current)
48118 +
48119 +enum {
48120 + FLOODING,
48121 + NO_FLOODING
48122 +};
48123 +
48124 +extern char *gr_alert_log_fmt;
48125 +extern char *gr_audit_log_fmt;
48126 +extern char *gr_alert_log_buf;
48127 +extern char *gr_audit_log_buf;
48128 +
48129 +static int gr_log_start(int audit)
48130 +{
48131 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
48132 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
48133 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48134 +
48135 + if (audit == GR_DO_AUDIT)
48136 + goto set_fmt;
48137 +
48138 + if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
48139 + grsec_alert_wtime = jiffies;
48140 + grsec_alert_fyet = 0;
48141 + } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
48142 + grsec_alert_fyet++;
48143 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
48144 + grsec_alert_wtime = jiffies;
48145 + grsec_alert_fyet++;
48146 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
48147 + return FLOODING;
48148 + } else return FLOODING;
48149 +
48150 +set_fmt:
48151 + memset(buf, 0, PAGE_SIZE);
48152 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
48153 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
48154 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
48155 + } else if (current->signal->curr_ip) {
48156 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
48157 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
48158 + } else if (gr_acl_is_enabled()) {
48159 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
48160 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
48161 + } else {
48162 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
48163 + strcpy(buf, fmt);
48164 + }
48165 +
48166 + return NO_FLOODING;
48167 +}
48168 +
48169 +static void gr_log_middle(int audit, const char *msg, va_list ap)
48170 + __attribute__ ((format (printf, 2, 0)));
48171 +
48172 +static void gr_log_middle(int audit, const char *msg, va_list ap)
48173 +{
48174 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48175 + unsigned int len = strlen(buf);
48176 +
48177 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
48178 +
48179 + return;
48180 +}
48181 +
48182 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
48183 + __attribute__ ((format (printf, 2, 3)));
48184 +
48185 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
48186 +{
48187 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48188 + unsigned int len = strlen(buf);
48189 + va_list ap;
48190 +
48191 + va_start(ap, msg);
48192 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
48193 + va_end(ap);
48194 +
48195 + return;
48196 +}
48197 +
48198 +static void gr_log_end(int audit)
48199 +{
48200 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48201 + unsigned int len = strlen(buf);
48202 +
48203 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
48204 + printk("%s\n", buf);
48205 +
48206 + return;
48207 +}
48208 +
48209 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
48210 +{
48211 + int logtype;
48212 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
48213 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
48214 + void *voidptr = NULL;
48215 + int num1 = 0, num2 = 0;
48216 + unsigned long ulong1 = 0, ulong2 = 0;
48217 + struct dentry *dentry = NULL;
48218 + struct vfsmount *mnt = NULL;
48219 + struct file *file = NULL;
48220 + struct task_struct *task = NULL;
48221 + const struct cred *cred, *pcred;
48222 + va_list ap;
48223 +
48224 + BEGIN_LOCKS(audit);
48225 + logtype = gr_log_start(audit);
48226 + if (logtype == FLOODING) {
48227 + END_LOCKS(audit);
48228 + return;
48229 + }
48230 + va_start(ap, argtypes);
48231 + switch (argtypes) {
48232 + case GR_TTYSNIFF:
48233 + task = va_arg(ap, struct task_struct *);
48234 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
48235 + break;
48236 + case GR_SYSCTL_HIDDEN:
48237 + str1 = va_arg(ap, char *);
48238 + gr_log_middle_varargs(audit, msg, result, str1);
48239 + break;
48240 + case GR_RBAC:
48241 + dentry = va_arg(ap, struct dentry *);
48242 + mnt = va_arg(ap, struct vfsmount *);
48243 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
48244 + break;
48245 + case GR_RBAC_STR:
48246 + dentry = va_arg(ap, struct dentry *);
48247 + mnt = va_arg(ap, struct vfsmount *);
48248 + str1 = va_arg(ap, char *);
48249 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
48250 + break;
48251 + case GR_STR_RBAC:
48252 + str1 = va_arg(ap, char *);
48253 + dentry = va_arg(ap, struct dentry *);
48254 + mnt = va_arg(ap, struct vfsmount *);
48255 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
48256 + break;
48257 + case GR_RBAC_MODE2:
48258 + dentry = va_arg(ap, struct dentry *);
48259 + mnt = va_arg(ap, struct vfsmount *);
48260 + str1 = va_arg(ap, char *);
48261 + str2 = va_arg(ap, char *);
48262 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
48263 + break;
48264 + case GR_RBAC_MODE3:
48265 + dentry = va_arg(ap, struct dentry *);
48266 + mnt = va_arg(ap, struct vfsmount *);
48267 + str1 = va_arg(ap, char *);
48268 + str2 = va_arg(ap, char *);
48269 + str3 = va_arg(ap, char *);
48270 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
48271 + break;
48272 + case GR_FILENAME:
48273 + dentry = va_arg(ap, struct dentry *);
48274 + mnt = va_arg(ap, struct vfsmount *);
48275 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
48276 + break;
48277 + case GR_STR_FILENAME:
48278 + str1 = va_arg(ap, char *);
48279 + dentry = va_arg(ap, struct dentry *);
48280 + mnt = va_arg(ap, struct vfsmount *);
48281 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
48282 + break;
48283 + case GR_FILENAME_STR:
48284 + dentry = va_arg(ap, struct dentry *);
48285 + mnt = va_arg(ap, struct vfsmount *);
48286 + str1 = va_arg(ap, char *);
48287 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
48288 + break;
48289 + case GR_FILENAME_TWO_INT:
48290 + dentry = va_arg(ap, struct dentry *);
48291 + mnt = va_arg(ap, struct vfsmount *);
48292 + num1 = va_arg(ap, int);
48293 + num2 = va_arg(ap, int);
48294 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
48295 + break;
48296 + case GR_FILENAME_TWO_INT_STR:
48297 + dentry = va_arg(ap, struct dentry *);
48298 + mnt = va_arg(ap, struct vfsmount *);
48299 + num1 = va_arg(ap, int);
48300 + num2 = va_arg(ap, int);
48301 + str1 = va_arg(ap, char *);
48302 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
48303 + break;
48304 + case GR_TEXTREL:
48305 + file = va_arg(ap, struct file *);
48306 + ulong1 = va_arg(ap, unsigned long);
48307 + ulong2 = va_arg(ap, unsigned long);
48308 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
48309 + break;
48310 + case GR_PTRACE:
48311 + task = va_arg(ap, struct task_struct *);
48312 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
48313 + break;
48314 + case GR_RESOURCE:
48315 + task = va_arg(ap, struct task_struct *);
48316 + cred = __task_cred(task);
48317 + pcred = __task_cred(task->real_parent);
48318 + ulong1 = va_arg(ap, unsigned long);
48319 + str1 = va_arg(ap, char *);
48320 + ulong2 = va_arg(ap, unsigned long);
48321 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48322 + break;
48323 + case GR_CAP:
48324 + task = va_arg(ap, struct task_struct *);
48325 + cred = __task_cred(task);
48326 + pcred = __task_cred(task->real_parent);
48327 + str1 = va_arg(ap, char *);
48328 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48329 + break;
48330 + case GR_SIG:
48331 + str1 = va_arg(ap, char *);
48332 + voidptr = va_arg(ap, void *);
48333 + gr_log_middle_varargs(audit, msg, str1, voidptr);
48334 + break;
48335 + case GR_SIG2:
48336 + task = va_arg(ap, struct task_struct *);
48337 + cred = __task_cred(task);
48338 + pcred = __task_cred(task->real_parent);
48339 + num1 = va_arg(ap, int);
48340 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48341 + break;
48342 + case GR_CRASH1:
48343 + task = va_arg(ap, struct task_struct *);
48344 + cred = __task_cred(task);
48345 + pcred = __task_cred(task->real_parent);
48346 + ulong1 = va_arg(ap, unsigned long);
48347 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
48348 + break;
48349 + case GR_CRASH2:
48350 + task = va_arg(ap, struct task_struct *);
48351 + cred = __task_cred(task);
48352 + pcred = __task_cred(task->real_parent);
48353 + ulong1 = va_arg(ap, unsigned long);
48354 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
48355 + break;
48356 + case GR_RWXMAP:
48357 + file = va_arg(ap, struct file *);
48358 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
48359 + break;
48360 + case GR_PSACCT:
48361 + {
48362 + unsigned int wday, cday;
48363 + __u8 whr, chr;
48364 + __u8 wmin, cmin;
48365 + __u8 wsec, csec;
48366 + char cur_tty[64] = { 0 };
48367 + char parent_tty[64] = { 0 };
48368 +
48369 + task = va_arg(ap, struct task_struct *);
48370 + wday = va_arg(ap, unsigned int);
48371 + cday = va_arg(ap, unsigned int);
48372 + whr = va_arg(ap, int);
48373 + chr = va_arg(ap, int);
48374 + wmin = va_arg(ap, int);
48375 + cmin = va_arg(ap, int);
48376 + wsec = va_arg(ap, int);
48377 + csec = va_arg(ap, int);
48378 + ulong1 = va_arg(ap, unsigned long);
48379 + cred = __task_cred(task);
48380 + pcred = __task_cred(task->real_parent);
48381 +
48382 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48383 + }
48384 + break;
48385 + default:
48386 + gr_log_middle(audit, msg, ap);
48387 + }
48388 + va_end(ap);
48389 + gr_log_end(audit);
48390 + END_LOCKS(audit);
48391 +}
48392 diff -urNp linux-3.0.3/grsecurity/grsec_mem.c linux-3.0.3/grsecurity/grsec_mem.c
48393 --- linux-3.0.3/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
48394 +++ linux-3.0.3/grsecurity/grsec_mem.c 2011-08-23 21:48:14.000000000 -0400
48395 @@ -0,0 +1,33 @@
48396 +#include <linux/kernel.h>
48397 +#include <linux/sched.h>
48398 +#include <linux/mm.h>
48399 +#include <linux/mman.h>
48400 +#include <linux/grinternal.h>
48401 +
48402 +void
48403 +gr_handle_ioperm(void)
48404 +{
48405 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
48406 + return;
48407 +}
48408 +
48409 +void
48410 +gr_handle_iopl(void)
48411 +{
48412 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
48413 + return;
48414 +}
48415 +
48416 +void
48417 +gr_handle_mem_readwrite(u64 from, u64 to)
48418 +{
48419 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
48420 + return;
48421 +}
48422 +
48423 +void
48424 +gr_handle_vm86(void)
48425 +{
48426 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
48427 + return;
48428 +}
48429 diff -urNp linux-3.0.3/grsecurity/grsec_mount.c linux-3.0.3/grsecurity/grsec_mount.c
48430 --- linux-3.0.3/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
48431 +++ linux-3.0.3/grsecurity/grsec_mount.c 2011-08-23 21:48:14.000000000 -0400
48432 @@ -0,0 +1,62 @@
48433 +#include <linux/kernel.h>
48434 +#include <linux/sched.h>
48435 +#include <linux/mount.h>
48436 +#include <linux/grsecurity.h>
48437 +#include <linux/grinternal.h>
48438 +
48439 +void
48440 +gr_log_remount(const char *devname, const int retval)
48441 +{
48442 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48443 + if (grsec_enable_mount && (retval >= 0))
48444 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
48445 +#endif
48446 + return;
48447 +}
48448 +
48449 +void
48450 +gr_log_unmount(const char *devname, const int retval)
48451 +{
48452 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48453 + if (grsec_enable_mount && (retval >= 0))
48454 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
48455 +#endif
48456 + return;
48457 +}
48458 +
48459 +void
48460 +gr_log_mount(const char *from, const char *to, const int retval)
48461 +{
48462 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48463 + if (grsec_enable_mount && (retval >= 0))
48464 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
48465 +#endif
48466 + return;
48467 +}
48468 +
48469 +int
48470 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
48471 +{
48472 +#ifdef CONFIG_GRKERNSEC_ROFS
48473 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
48474 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
48475 + return -EPERM;
48476 + } else
48477 + return 0;
48478 +#endif
48479 + return 0;
48480 +}
48481 +
48482 +int
48483 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
48484 +{
48485 +#ifdef CONFIG_GRKERNSEC_ROFS
48486 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
48487 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
48488 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
48489 + return -EPERM;
48490 + } else
48491 + return 0;
48492 +#endif
48493 + return 0;
48494 +}
48495 diff -urNp linux-3.0.3/grsecurity/grsec_pax.c linux-3.0.3/grsecurity/grsec_pax.c
48496 --- linux-3.0.3/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
48497 +++ linux-3.0.3/grsecurity/grsec_pax.c 2011-08-23 21:48:14.000000000 -0400
48498 @@ -0,0 +1,36 @@
48499 +#include <linux/kernel.h>
48500 +#include <linux/sched.h>
48501 +#include <linux/mm.h>
48502 +#include <linux/file.h>
48503 +#include <linux/grinternal.h>
48504 +#include <linux/grsecurity.h>
48505 +
48506 +void
48507 +gr_log_textrel(struct vm_area_struct * vma)
48508 +{
48509 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
48510 + if (grsec_enable_audit_textrel)
48511 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
48512 +#endif
48513 + return;
48514 +}
48515 +
48516 +void
48517 +gr_log_rwxmmap(struct file *file)
48518 +{
48519 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48520 + if (grsec_enable_log_rwxmaps)
48521 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
48522 +#endif
48523 + return;
48524 +}
48525 +
48526 +void
48527 +gr_log_rwxmprotect(struct file *file)
48528 +{
48529 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48530 + if (grsec_enable_log_rwxmaps)
48531 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
48532 +#endif
48533 + return;
48534 +}
48535 diff -urNp linux-3.0.3/grsecurity/grsec_ptrace.c linux-3.0.3/grsecurity/grsec_ptrace.c
48536 --- linux-3.0.3/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
48537 +++ linux-3.0.3/grsecurity/grsec_ptrace.c 2011-08-23 21:48:14.000000000 -0400
48538 @@ -0,0 +1,14 @@
48539 +#include <linux/kernel.h>
48540 +#include <linux/sched.h>
48541 +#include <linux/grinternal.h>
48542 +#include <linux/grsecurity.h>
48543 +
48544 +void
48545 +gr_audit_ptrace(struct task_struct *task)
48546 +{
48547 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
48548 + if (grsec_enable_audit_ptrace)
48549 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
48550 +#endif
48551 + return;
48552 +}
48553 diff -urNp linux-3.0.3/grsecurity/grsec_sig.c linux-3.0.3/grsecurity/grsec_sig.c
48554 --- linux-3.0.3/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
48555 +++ linux-3.0.3/grsecurity/grsec_sig.c 2011-08-23 21:48:14.000000000 -0400
48556 @@ -0,0 +1,206 @@
48557 +#include <linux/kernel.h>
48558 +#include <linux/sched.h>
48559 +#include <linux/delay.h>
48560 +#include <linux/grsecurity.h>
48561 +#include <linux/grinternal.h>
48562 +#include <linux/hardirq.h>
48563 +
48564 +char *signames[] = {
48565 + [SIGSEGV] = "Segmentation fault",
48566 + [SIGILL] = "Illegal instruction",
48567 + [SIGABRT] = "Abort",
48568 + [SIGBUS] = "Invalid alignment/Bus error"
48569 +};
48570 +
48571 +void
48572 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
48573 +{
48574 +#ifdef CONFIG_GRKERNSEC_SIGNAL
48575 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
48576 + (sig == SIGABRT) || (sig == SIGBUS))) {
48577 + if (t->pid == current->pid) {
48578 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
48579 + } else {
48580 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
48581 + }
48582 + }
48583 +#endif
48584 + return;
48585 +}
48586 +
48587 +int
48588 +gr_handle_signal(const struct task_struct *p, const int sig)
48589 +{
48590 +#ifdef CONFIG_GRKERNSEC
48591 + if (current->pid > 1 && gr_check_protected_task(p)) {
48592 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
48593 + return -EPERM;
48594 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
48595 + return -EPERM;
48596 + }
48597 +#endif
48598 + return 0;
48599 +}
48600 +
48601 +#ifdef CONFIG_GRKERNSEC
48602 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
48603 +
48604 +int gr_fake_force_sig(int sig, struct task_struct *t)
48605 +{
48606 + unsigned long int flags;
48607 + int ret, blocked, ignored;
48608 + struct k_sigaction *action;
48609 +
48610 + spin_lock_irqsave(&t->sighand->siglock, flags);
48611 + action = &t->sighand->action[sig-1];
48612 + ignored = action->sa.sa_handler == SIG_IGN;
48613 + blocked = sigismember(&t->blocked, sig);
48614 + if (blocked || ignored) {
48615 + action->sa.sa_handler = SIG_DFL;
48616 + if (blocked) {
48617 + sigdelset(&t->blocked, sig);
48618 + recalc_sigpending_and_wake(t);
48619 + }
48620 + }
48621 + if (action->sa.sa_handler == SIG_DFL)
48622 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
48623 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
48624 +
48625 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
48626 +
48627 + return ret;
48628 +}
48629 +#endif
48630 +
48631 +#ifdef CONFIG_GRKERNSEC_BRUTE
48632 +#define GR_USER_BAN_TIME (15 * 60)
48633 +
48634 +static int __get_dumpable(unsigned long mm_flags)
48635 +{
48636 + int ret;
48637 +
48638 + ret = mm_flags & MMF_DUMPABLE_MASK;
48639 + return (ret >= 2) ? 2 : ret;
48640 +}
48641 +#endif
48642 +
48643 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
48644 +{
48645 +#ifdef CONFIG_GRKERNSEC_BRUTE
48646 + uid_t uid = 0;
48647 +
48648 + if (!grsec_enable_brute)
48649 + return;
48650 +
48651 + rcu_read_lock();
48652 + read_lock(&tasklist_lock);
48653 + read_lock(&grsec_exec_file_lock);
48654 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
48655 + p->real_parent->brute = 1;
48656 + else {
48657 + const struct cred *cred = __task_cred(p), *cred2;
48658 + struct task_struct *tsk, *tsk2;
48659 +
48660 + if (!__get_dumpable(mm_flags) && cred->uid) {
48661 + struct user_struct *user;
48662 +
48663 + uid = cred->uid;
48664 +
48665 + /* this is put upon execution past expiration */
48666 + user = find_user(uid);
48667 + if (user == NULL)
48668 + goto unlock;
48669 + user->banned = 1;
48670 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
48671 + if (user->ban_expires == ~0UL)
48672 + user->ban_expires--;
48673 +
48674 + do_each_thread(tsk2, tsk) {
48675 + cred2 = __task_cred(tsk);
48676 + if (tsk != p && cred2->uid == uid)
48677 + gr_fake_force_sig(SIGKILL, tsk);
48678 + } while_each_thread(tsk2, tsk);
48679 + }
48680 + }
48681 +unlock:
48682 + read_unlock(&grsec_exec_file_lock);
48683 + read_unlock(&tasklist_lock);
48684 + rcu_read_unlock();
48685 +
48686 + if (uid)
48687 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
48688 +
48689 +#endif
48690 + return;
48691 +}
48692 +
48693 +void gr_handle_brute_check(void)
48694 +{
48695 +#ifdef CONFIG_GRKERNSEC_BRUTE
48696 + if (current->brute)
48697 + msleep(30 * 1000);
48698 +#endif
48699 + return;
48700 +}
48701 +
48702 +void gr_handle_kernel_exploit(void)
48703 +{
48704 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
48705 + const struct cred *cred;
48706 + struct task_struct *tsk, *tsk2;
48707 + struct user_struct *user;
48708 + uid_t uid;
48709 +
48710 + if (in_irq() || in_serving_softirq() || in_nmi())
48711 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
48712 +
48713 + uid = current_uid();
48714 +
48715 + if (uid == 0)
48716 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
48717 + else {
48718 + /* kill all the processes of this user, hold a reference
48719 + to their creds struct, and prevent them from creating
48720 + another process until system reset
48721 + */
48722 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
48723 + /* we intentionally leak this ref */
48724 + user = get_uid(current->cred->user);
48725 + if (user) {
48726 + user->banned = 1;
48727 + user->ban_expires = ~0UL;
48728 + }
48729 +
48730 + read_lock(&tasklist_lock);
48731 + do_each_thread(tsk2, tsk) {
48732 + cred = __task_cred(tsk);
48733 + if (cred->uid == uid)
48734 + gr_fake_force_sig(SIGKILL, tsk);
48735 + } while_each_thread(tsk2, tsk);
48736 + read_unlock(&tasklist_lock);
48737 + }
48738 +#endif
48739 +}
48740 +
48741 +int __gr_process_user_ban(struct user_struct *user)
48742 +{
48743 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
48744 + if (unlikely(user->banned)) {
48745 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
48746 + user->banned = 0;
48747 + user->ban_expires = 0;
48748 + free_uid(user);
48749 + } else
48750 + return -EPERM;
48751 + }
48752 +#endif
48753 + return 0;
48754 +}
48755 +
48756 +int gr_process_user_ban(void)
48757 +{
48758 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
48759 + return __gr_process_user_ban(current->cred->user);
48760 +#endif
48761 + return 0;
48762 +}
48763 diff -urNp linux-3.0.3/grsecurity/grsec_sock.c linux-3.0.3/grsecurity/grsec_sock.c
48764 --- linux-3.0.3/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
48765 +++ linux-3.0.3/grsecurity/grsec_sock.c 2011-08-23 21:48:14.000000000 -0400
48766 @@ -0,0 +1,244 @@
48767 +#include <linux/kernel.h>
48768 +#include <linux/module.h>
48769 +#include <linux/sched.h>
48770 +#include <linux/file.h>
48771 +#include <linux/net.h>
48772 +#include <linux/in.h>
48773 +#include <linux/ip.h>
48774 +#include <net/sock.h>
48775 +#include <net/inet_sock.h>
48776 +#include <linux/grsecurity.h>
48777 +#include <linux/grinternal.h>
48778 +#include <linux/gracl.h>
48779 +
48780 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
48781 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
48782 +
48783 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
48784 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
48785 +
48786 +#ifdef CONFIG_UNIX_MODULE
48787 +EXPORT_SYMBOL(gr_acl_handle_unix);
48788 +EXPORT_SYMBOL(gr_acl_handle_mknod);
48789 +EXPORT_SYMBOL(gr_handle_chroot_unix);
48790 +EXPORT_SYMBOL(gr_handle_create);
48791 +#endif
48792 +
48793 +#ifdef CONFIG_GRKERNSEC
48794 +#define gr_conn_table_size 32749
48795 +struct conn_table_entry {
48796 + struct conn_table_entry *next;
48797 + struct signal_struct *sig;
48798 +};
48799 +
48800 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
48801 +DEFINE_SPINLOCK(gr_conn_table_lock);
48802 +
48803 +extern const char * gr_socktype_to_name(unsigned char type);
48804 +extern const char * gr_proto_to_name(unsigned char proto);
48805 +extern const char * gr_sockfamily_to_name(unsigned char family);
48806 +
48807 +static __inline__ int
48808 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
48809 +{
48810 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
48811 +}
48812 +
48813 +static __inline__ int
48814 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
48815 + __u16 sport, __u16 dport)
48816 +{
48817 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
48818 + sig->gr_sport == sport && sig->gr_dport == dport))
48819 + return 1;
48820 + else
48821 + return 0;
48822 +}
48823 +
48824 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
48825 +{
48826 + struct conn_table_entry **match;
48827 + unsigned int index;
48828 +
48829 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
48830 + sig->gr_sport, sig->gr_dport,
48831 + gr_conn_table_size);
48832 +
48833 + newent->sig = sig;
48834 +
48835 + match = &gr_conn_table[index];
48836 + newent->next = *match;
48837 + *match = newent;
48838 +
48839 + return;
48840 +}
48841 +
48842 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
48843 +{
48844 + struct conn_table_entry *match, *last = NULL;
48845 + unsigned int index;
48846 +
48847 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
48848 + sig->gr_sport, sig->gr_dport,
48849 + gr_conn_table_size);
48850 +
48851 + match = gr_conn_table[index];
48852 + while (match && !conn_match(match->sig,
48853 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
48854 + sig->gr_dport)) {
48855 + last = match;
48856 + match = match->next;
48857 + }
48858 +
48859 + if (match) {
48860 + if (last)
48861 + last->next = match->next;
48862 + else
48863 + gr_conn_table[index] = NULL;
48864 + kfree(match);
48865 + }
48866 +
48867 + return;
48868 +}
48869 +
48870 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
48871 + __u16 sport, __u16 dport)
48872 +{
48873 + struct conn_table_entry *match;
48874 + unsigned int index;
48875 +
48876 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
48877 +
48878 + match = gr_conn_table[index];
48879 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
48880 + match = match->next;
48881 +
48882 + if (match)
48883 + return match->sig;
48884 + else
48885 + return NULL;
48886 +}
48887 +
48888 +#endif
48889 +
48890 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
48891 +{
48892 +#ifdef CONFIG_GRKERNSEC
48893 + struct signal_struct *sig = task->signal;
48894 + struct conn_table_entry *newent;
48895 +
48896 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
48897 + if (newent == NULL)
48898 + return;
48899 + /* no bh lock needed since we are called with bh disabled */
48900 + spin_lock(&gr_conn_table_lock);
48901 + gr_del_task_from_ip_table_nolock(sig);
48902 + sig->gr_saddr = inet->inet_rcv_saddr;
48903 + sig->gr_daddr = inet->inet_daddr;
48904 + sig->gr_sport = inet->inet_sport;
48905 + sig->gr_dport = inet->inet_dport;
48906 + gr_add_to_task_ip_table_nolock(sig, newent);
48907 + spin_unlock(&gr_conn_table_lock);
48908 +#endif
48909 + return;
48910 +}
48911 +
48912 +void gr_del_task_from_ip_table(struct task_struct *task)
48913 +{
48914 +#ifdef CONFIG_GRKERNSEC
48915 + spin_lock_bh(&gr_conn_table_lock);
48916 + gr_del_task_from_ip_table_nolock(task->signal);
48917 + spin_unlock_bh(&gr_conn_table_lock);
48918 +#endif
48919 + return;
48920 +}
48921 +
48922 +void
48923 +gr_attach_curr_ip(const struct sock *sk)
48924 +{
48925 +#ifdef CONFIG_GRKERNSEC
48926 + struct signal_struct *p, *set;
48927 + const struct inet_sock *inet = inet_sk(sk);
48928 +
48929 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
48930 + return;
48931 +
48932 + set = current->signal;
48933 +
48934 + spin_lock_bh(&gr_conn_table_lock);
48935 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
48936 + inet->inet_dport, inet->inet_sport);
48937 + if (unlikely(p != NULL)) {
48938 + set->curr_ip = p->curr_ip;
48939 + set->used_accept = 1;
48940 + gr_del_task_from_ip_table_nolock(p);
48941 + spin_unlock_bh(&gr_conn_table_lock);
48942 + return;
48943 + }
48944 + spin_unlock_bh(&gr_conn_table_lock);
48945 +
48946 + set->curr_ip = inet->inet_daddr;
48947 + set->used_accept = 1;
48948 +#endif
48949 + return;
48950 +}
48951 +
48952 +int
48953 +gr_handle_sock_all(const int family, const int type, const int protocol)
48954 +{
48955 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
48956 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
48957 + (family != AF_UNIX)) {
48958 + if (family == AF_INET)
48959 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
48960 + else
48961 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
48962 + return -EACCES;
48963 + }
48964 +#endif
48965 + return 0;
48966 +}
48967 +
48968 +int
48969 +gr_handle_sock_server(const struct sockaddr *sck)
48970 +{
48971 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
48972 + if (grsec_enable_socket_server &&
48973 + in_group_p(grsec_socket_server_gid) &&
48974 + sck && (sck->sa_family != AF_UNIX) &&
48975 + (sck->sa_family != AF_LOCAL)) {
48976 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
48977 + return -EACCES;
48978 + }
48979 +#endif
48980 + return 0;
48981 +}
48982 +
48983 +int
48984 +gr_handle_sock_server_other(const struct sock *sck)
48985 +{
48986 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
48987 + if (grsec_enable_socket_server &&
48988 + in_group_p(grsec_socket_server_gid) &&
48989 + sck && (sck->sk_family != AF_UNIX) &&
48990 + (sck->sk_family != AF_LOCAL)) {
48991 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
48992 + return -EACCES;
48993 + }
48994 +#endif
48995 + return 0;
48996 +}
48997 +
48998 +int
48999 +gr_handle_sock_client(const struct sockaddr *sck)
49000 +{
49001 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
49002 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
49003 + sck && (sck->sa_family != AF_UNIX) &&
49004 + (sck->sa_family != AF_LOCAL)) {
49005 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
49006 + return -EACCES;
49007 + }
49008 +#endif
49009 + return 0;
49010 +}
49011 diff -urNp linux-3.0.3/grsecurity/grsec_sysctl.c linux-3.0.3/grsecurity/grsec_sysctl.c
49012 --- linux-3.0.3/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
49013 +++ linux-3.0.3/grsecurity/grsec_sysctl.c 2011-08-23 21:48:14.000000000 -0400
49014 @@ -0,0 +1,442 @@
49015 +#include <linux/kernel.h>
49016 +#include <linux/sched.h>
49017 +#include <linux/sysctl.h>
49018 +#include <linux/grsecurity.h>
49019 +#include <linux/grinternal.h>
49020 +
49021 +int
49022 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
49023 +{
49024 +#ifdef CONFIG_GRKERNSEC_SYSCTL
49025 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
49026 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
49027 + return -EACCES;
49028 + }
49029 +#endif
49030 + return 0;
49031 +}
49032 +
49033 +#ifdef CONFIG_GRKERNSEC_ROFS
49034 +static int __maybe_unused one = 1;
49035 +#endif
49036 +
49037 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
49038 +struct ctl_table grsecurity_table[] = {
49039 +#ifdef CONFIG_GRKERNSEC_SYSCTL
49040 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
49041 +#ifdef CONFIG_GRKERNSEC_IO
49042 + {
49043 + .procname = "disable_priv_io",
49044 + .data = &grsec_disable_privio,
49045 + .maxlen = sizeof(int),
49046 + .mode = 0600,
49047 + .proc_handler = &proc_dointvec,
49048 + },
49049 +#endif
49050 +#endif
49051 +#ifdef CONFIG_GRKERNSEC_LINK
49052 + {
49053 + .procname = "linking_restrictions",
49054 + .data = &grsec_enable_link,
49055 + .maxlen = sizeof(int),
49056 + .mode = 0600,
49057 + .proc_handler = &proc_dointvec,
49058 + },
49059 +#endif
49060 +#ifdef CONFIG_GRKERNSEC_BRUTE
49061 + {
49062 + .procname = "deter_bruteforce",
49063 + .data = &grsec_enable_brute,
49064 + .maxlen = sizeof(int),
49065 + .mode = 0600,
49066 + .proc_handler = &proc_dointvec,
49067 + },
49068 +#endif
49069 +#ifdef CONFIG_GRKERNSEC_FIFO
49070 + {
49071 + .procname = "fifo_restrictions",
49072 + .data = &grsec_enable_fifo,
49073 + .maxlen = sizeof(int),
49074 + .mode = 0600,
49075 + .proc_handler = &proc_dointvec,
49076 + },
49077 +#endif
49078 +#ifdef CONFIG_GRKERNSEC_EXECVE
49079 + {
49080 + .procname = "execve_limiting",
49081 + .data = &grsec_enable_execve,
49082 + .maxlen = sizeof(int),
49083 + .mode = 0600,
49084 + .proc_handler = &proc_dointvec,
49085 + },
49086 +#endif
49087 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
49088 + {
49089 + .procname = "ip_blackhole",
49090 + .data = &grsec_enable_blackhole,
49091 + .maxlen = sizeof(int),
49092 + .mode = 0600,
49093 + .proc_handler = &proc_dointvec,
49094 + },
49095 + {
49096 + .procname = "lastack_retries",
49097 + .data = &grsec_lastack_retries,
49098 + .maxlen = sizeof(int),
49099 + .mode = 0600,
49100 + .proc_handler = &proc_dointvec,
49101 + },
49102 +#endif
49103 +#ifdef CONFIG_GRKERNSEC_EXECLOG
49104 + {
49105 + .procname = "exec_logging",
49106 + .data = &grsec_enable_execlog,
49107 + .maxlen = sizeof(int),
49108 + .mode = 0600,
49109 + .proc_handler = &proc_dointvec,
49110 + },
49111 +#endif
49112 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
49113 + {
49114 + .procname = "rwxmap_logging",
49115 + .data = &grsec_enable_log_rwxmaps,
49116 + .maxlen = sizeof(int),
49117 + .mode = 0600,
49118 + .proc_handler = &proc_dointvec,
49119 + },
49120 +#endif
49121 +#ifdef CONFIG_GRKERNSEC_SIGNAL
49122 + {
49123 + .procname = "signal_logging",
49124 + .data = &grsec_enable_signal,
49125 + .maxlen = sizeof(int),
49126 + .mode = 0600,
49127 + .proc_handler = &proc_dointvec,
49128 + },
49129 +#endif
49130 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
49131 + {
49132 + .procname = "forkfail_logging",
49133 + .data = &grsec_enable_forkfail,
49134 + .maxlen = sizeof(int),
49135 + .mode = 0600,
49136 + .proc_handler = &proc_dointvec,
49137 + },
49138 +#endif
49139 +#ifdef CONFIG_GRKERNSEC_TIME
49140 + {
49141 + .procname = "timechange_logging",
49142 + .data = &grsec_enable_time,
49143 + .maxlen = sizeof(int),
49144 + .mode = 0600,
49145 + .proc_handler = &proc_dointvec,
49146 + },
49147 +#endif
49148 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
49149 + {
49150 + .procname = "chroot_deny_shmat",
49151 + .data = &grsec_enable_chroot_shmat,
49152 + .maxlen = sizeof(int),
49153 + .mode = 0600,
49154 + .proc_handler = &proc_dointvec,
49155 + },
49156 +#endif
49157 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
49158 + {
49159 + .procname = "chroot_deny_unix",
49160 + .data = &grsec_enable_chroot_unix,
49161 + .maxlen = sizeof(int),
49162 + .mode = 0600,
49163 + .proc_handler = &proc_dointvec,
49164 + },
49165 +#endif
49166 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
49167 + {
49168 + .procname = "chroot_deny_mount",
49169 + .data = &grsec_enable_chroot_mount,
49170 + .maxlen = sizeof(int),
49171 + .mode = 0600,
49172 + .proc_handler = &proc_dointvec,
49173 + },
49174 +#endif
49175 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
49176 + {
49177 + .procname = "chroot_deny_fchdir",
49178 + .data = &grsec_enable_chroot_fchdir,
49179 + .maxlen = sizeof(int),
49180 + .mode = 0600,
49181 + .proc_handler = &proc_dointvec,
49182 + },
49183 +#endif
49184 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
49185 + {
49186 + .procname = "chroot_deny_chroot",
49187 + .data = &grsec_enable_chroot_double,
49188 + .maxlen = sizeof(int),
49189 + .mode = 0600,
49190 + .proc_handler = &proc_dointvec,
49191 + },
49192 +#endif
49193 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
49194 + {
49195 + .procname = "chroot_deny_pivot",
49196 + .data = &grsec_enable_chroot_pivot,
49197 + .maxlen = sizeof(int),
49198 + .mode = 0600,
49199 + .proc_handler = &proc_dointvec,
49200 + },
49201 +#endif
49202 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
49203 + {
49204 + .procname = "chroot_enforce_chdir",
49205 + .data = &grsec_enable_chroot_chdir,
49206 + .maxlen = sizeof(int),
49207 + .mode = 0600,
49208 + .proc_handler = &proc_dointvec,
49209 + },
49210 +#endif
49211 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
49212 + {
49213 + .procname = "chroot_deny_chmod",
49214 + .data = &grsec_enable_chroot_chmod,
49215 + .maxlen = sizeof(int),
49216 + .mode = 0600,
49217 + .proc_handler = &proc_dointvec,
49218 + },
49219 +#endif
49220 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
49221 + {
49222 + .procname = "chroot_deny_mknod",
49223 + .data = &grsec_enable_chroot_mknod,
49224 + .maxlen = sizeof(int),
49225 + .mode = 0600,
49226 + .proc_handler = &proc_dointvec,
49227 + },
49228 +#endif
49229 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49230 + {
49231 + .procname = "chroot_restrict_nice",
49232 + .data = &grsec_enable_chroot_nice,
49233 + .maxlen = sizeof(int),
49234 + .mode = 0600,
49235 + .proc_handler = &proc_dointvec,
49236 + },
49237 +#endif
49238 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
49239 + {
49240 + .procname = "chroot_execlog",
49241 + .data = &grsec_enable_chroot_execlog,
49242 + .maxlen = sizeof(int),
49243 + .mode = 0600,
49244 + .proc_handler = &proc_dointvec,
49245 + },
49246 +#endif
49247 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49248 + {
49249 + .procname = "chroot_caps",
49250 + .data = &grsec_enable_chroot_caps,
49251 + .maxlen = sizeof(int),
49252 + .mode = 0600,
49253 + .proc_handler = &proc_dointvec,
49254 + },
49255 +#endif
49256 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
49257 + {
49258 + .procname = "chroot_deny_sysctl",
49259 + .data = &grsec_enable_chroot_sysctl,
49260 + .maxlen = sizeof(int),
49261 + .mode = 0600,
49262 + .proc_handler = &proc_dointvec,
49263 + },
49264 +#endif
49265 +#ifdef CONFIG_GRKERNSEC_TPE
49266 + {
49267 + .procname = "tpe",
49268 + .data = &grsec_enable_tpe,
49269 + .maxlen = sizeof(int),
49270 + .mode = 0600,
49271 + .proc_handler = &proc_dointvec,
49272 + },
49273 + {
49274 + .procname = "tpe_gid",
49275 + .data = &grsec_tpe_gid,
49276 + .maxlen = sizeof(int),
49277 + .mode = 0600,
49278 + .proc_handler = &proc_dointvec,
49279 + },
49280 +#endif
49281 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
49282 + {
49283 + .procname = "tpe_invert",
49284 + .data = &grsec_enable_tpe_invert,
49285 + .maxlen = sizeof(int),
49286 + .mode = 0600,
49287 + .proc_handler = &proc_dointvec,
49288 + },
49289 +#endif
49290 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
49291 + {
49292 + .procname = "tpe_restrict_all",
49293 + .data = &grsec_enable_tpe_all,
49294 + .maxlen = sizeof(int),
49295 + .mode = 0600,
49296 + .proc_handler = &proc_dointvec,
49297 + },
49298 +#endif
49299 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
49300 + {
49301 + .procname = "socket_all",
49302 + .data = &grsec_enable_socket_all,
49303 + .maxlen = sizeof(int),
49304 + .mode = 0600,
49305 + .proc_handler = &proc_dointvec,
49306 + },
49307 + {
49308 + .procname = "socket_all_gid",
49309 + .data = &grsec_socket_all_gid,
49310 + .maxlen = sizeof(int),
49311 + .mode = 0600,
49312 + .proc_handler = &proc_dointvec,
49313 + },
49314 +#endif
49315 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
49316 + {
49317 + .procname = "socket_client",
49318 + .data = &grsec_enable_socket_client,
49319 + .maxlen = sizeof(int),
49320 + .mode = 0600,
49321 + .proc_handler = &proc_dointvec,
49322 + },
49323 + {
49324 + .procname = "socket_client_gid",
49325 + .data = &grsec_socket_client_gid,
49326 + .maxlen = sizeof(int),
49327 + .mode = 0600,
49328 + .proc_handler = &proc_dointvec,
49329 + },
49330 +#endif
49331 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49332 + {
49333 + .procname = "socket_server",
49334 + .data = &grsec_enable_socket_server,
49335 + .maxlen = sizeof(int),
49336 + .mode = 0600,
49337 + .proc_handler = &proc_dointvec,
49338 + },
49339 + {
49340 + .procname = "socket_server_gid",
49341 + .data = &grsec_socket_server_gid,
49342 + .maxlen = sizeof(int),
49343 + .mode = 0600,
49344 + .proc_handler = &proc_dointvec,
49345 + },
49346 +#endif
49347 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
49348 + {
49349 + .procname = "audit_group",
49350 + .data = &grsec_enable_group,
49351 + .maxlen = sizeof(int),
49352 + .mode = 0600,
49353 + .proc_handler = &proc_dointvec,
49354 + },
49355 + {
49356 + .procname = "audit_gid",
49357 + .data = &grsec_audit_gid,
49358 + .maxlen = sizeof(int),
49359 + .mode = 0600,
49360 + .proc_handler = &proc_dointvec,
49361 + },
49362 +#endif
49363 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
49364 + {
49365 + .procname = "audit_chdir",
49366 + .data = &grsec_enable_chdir,
49367 + .maxlen = sizeof(int),
49368 + .mode = 0600,
49369 + .proc_handler = &proc_dointvec,
49370 + },
49371 +#endif
49372 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
49373 + {
49374 + .procname = "audit_mount",
49375 + .data = &grsec_enable_mount,
49376 + .maxlen = sizeof(int),
49377 + .mode = 0600,
49378 + .proc_handler = &proc_dointvec,
49379 + },
49380 +#endif
49381 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
49382 + {
49383 + .procname = "audit_textrel",
49384 + .data = &grsec_enable_audit_textrel,
49385 + .maxlen = sizeof(int),
49386 + .mode = 0600,
49387 + .proc_handler = &proc_dointvec,
49388 + },
49389 +#endif
49390 +#ifdef CONFIG_GRKERNSEC_DMESG
49391 + {
49392 + .procname = "dmesg",
49393 + .data = &grsec_enable_dmesg,
49394 + .maxlen = sizeof(int),
49395 + .mode = 0600,
49396 + .proc_handler = &proc_dointvec,
49397 + },
49398 +#endif
49399 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49400 + {
49401 + .procname = "chroot_findtask",
49402 + .data = &grsec_enable_chroot_findtask,
49403 + .maxlen = sizeof(int),
49404 + .mode = 0600,
49405 + .proc_handler = &proc_dointvec,
49406 + },
49407 +#endif
49408 +#ifdef CONFIG_GRKERNSEC_RESLOG
49409 + {
49410 + .procname = "resource_logging",
49411 + .data = &grsec_resource_logging,
49412 + .maxlen = sizeof(int),
49413 + .mode = 0600,
49414 + .proc_handler = &proc_dointvec,
49415 + },
49416 +#endif
49417 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
49418 + {
49419 + .procname = "audit_ptrace",
49420 + .data = &grsec_enable_audit_ptrace,
49421 + .maxlen = sizeof(int),
49422 + .mode = 0600,
49423 + .proc_handler = &proc_dointvec,
49424 + },
49425 +#endif
49426 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
49427 + {
49428 + .procname = "harden_ptrace",
49429 + .data = &grsec_enable_harden_ptrace,
49430 + .maxlen = sizeof(int),
49431 + .mode = 0600,
49432 + .proc_handler = &proc_dointvec,
49433 + },
49434 +#endif
49435 + {
49436 + .procname = "grsec_lock",
49437 + .data = &grsec_lock,
49438 + .maxlen = sizeof(int),
49439 + .mode = 0600,
49440 + .proc_handler = &proc_dointvec,
49441 + },
49442 +#endif
49443 +#ifdef CONFIG_GRKERNSEC_ROFS
49444 + {
49445 + .procname = "romount_protect",
49446 + .data = &grsec_enable_rofs,
49447 + .maxlen = sizeof(int),
49448 + .mode = 0600,
49449 + .proc_handler = &proc_dointvec_minmax,
49450 + .extra1 = &one,
49451 + .extra2 = &one,
49452 + },
49453 +#endif
49454 + { }
49455 +};
49456 +#endif
49457 diff -urNp linux-3.0.3/grsecurity/grsec_time.c linux-3.0.3/grsecurity/grsec_time.c
49458 --- linux-3.0.3/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
49459 +++ linux-3.0.3/grsecurity/grsec_time.c 2011-08-23 21:48:14.000000000 -0400
49460 @@ -0,0 +1,16 @@
49461 +#include <linux/kernel.h>
49462 +#include <linux/sched.h>
49463 +#include <linux/grinternal.h>
49464 +#include <linux/module.h>
49465 +
49466 +void
49467 +gr_log_timechange(void)
49468 +{
49469 +#ifdef CONFIG_GRKERNSEC_TIME
49470 + if (grsec_enable_time)
49471 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
49472 +#endif
49473 + return;
49474 +}
49475 +
49476 +EXPORT_SYMBOL(gr_log_timechange);
49477 diff -urNp linux-3.0.3/grsecurity/grsec_tpe.c linux-3.0.3/grsecurity/grsec_tpe.c
49478 --- linux-3.0.3/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
49479 +++ linux-3.0.3/grsecurity/grsec_tpe.c 2011-08-23 21:48:14.000000000 -0400
49480 @@ -0,0 +1,39 @@
49481 +#include <linux/kernel.h>
49482 +#include <linux/sched.h>
49483 +#include <linux/file.h>
49484 +#include <linux/fs.h>
49485 +#include <linux/grinternal.h>
49486 +
49487 +extern int gr_acl_tpe_check(void);
49488 +
49489 +int
49490 +gr_tpe_allow(const struct file *file)
49491 +{
49492 +#ifdef CONFIG_GRKERNSEC
49493 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
49494 + const struct cred *cred = current_cred();
49495 +
49496 + if (cred->uid && ((grsec_enable_tpe &&
49497 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
49498 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
49499 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
49500 +#else
49501 + in_group_p(grsec_tpe_gid)
49502 +#endif
49503 + ) || gr_acl_tpe_check()) &&
49504 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
49505 + (inode->i_mode & S_IWOTH))))) {
49506 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
49507 + return 0;
49508 + }
49509 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
49510 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
49511 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
49512 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
49513 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
49514 + return 0;
49515 + }
49516 +#endif
49517 +#endif
49518 + return 1;
49519 +}
49520 diff -urNp linux-3.0.3/grsecurity/grsum.c linux-3.0.3/grsecurity/grsum.c
49521 --- linux-3.0.3/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
49522 +++ linux-3.0.3/grsecurity/grsum.c 2011-08-23 21:48:14.000000000 -0400
49523 @@ -0,0 +1,61 @@
49524 +#include <linux/err.h>
49525 +#include <linux/kernel.h>
49526 +#include <linux/sched.h>
49527 +#include <linux/mm.h>
49528 +#include <linux/scatterlist.h>
49529 +#include <linux/crypto.h>
49530 +#include <linux/gracl.h>
49531 +
49532 +
49533 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
49534 +#error "crypto and sha256 must be built into the kernel"
49535 +#endif
49536 +
49537 +int
49538 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
49539 +{
49540 + char *p;
49541 + struct crypto_hash *tfm;
49542 + struct hash_desc desc;
49543 + struct scatterlist sg;
49544 + unsigned char temp_sum[GR_SHA_LEN];
49545 + volatile int retval = 0;
49546 + volatile int dummy = 0;
49547 + unsigned int i;
49548 +
49549 + sg_init_table(&sg, 1);
49550 +
49551 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
49552 + if (IS_ERR(tfm)) {
49553 + /* should never happen, since sha256 should be built in */
49554 + return 1;
49555 + }
49556 +
49557 + desc.tfm = tfm;
49558 + desc.flags = 0;
49559 +
49560 + crypto_hash_init(&desc);
49561 +
49562 + p = salt;
49563 + sg_set_buf(&sg, p, GR_SALT_LEN);
49564 + crypto_hash_update(&desc, &sg, sg.length);
49565 +
49566 + p = entry->pw;
49567 + sg_set_buf(&sg, p, strlen(p));
49568 +
49569 + crypto_hash_update(&desc, &sg, sg.length);
49570 +
49571 + crypto_hash_final(&desc, temp_sum);
49572 +
49573 + memset(entry->pw, 0, GR_PW_LEN);
49574 +
49575 + for (i = 0; i < GR_SHA_LEN; i++)
49576 + if (sum[i] != temp_sum[i])
49577 + retval = 1;
49578 + else
49579 + dummy = 1; // waste a cycle
49580 +
49581 + crypto_free_hash(tfm);
49582 +
49583 + return retval;
49584 +}
49585 diff -urNp linux-3.0.3/grsecurity/Kconfig linux-3.0.3/grsecurity/Kconfig
49586 --- linux-3.0.3/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
49587 +++ linux-3.0.3/grsecurity/Kconfig 2011-08-23 21:48:14.000000000 -0400
49588 @@ -0,0 +1,1050 @@
49589 +#
49590 +# grecurity configuration
49591 +#
49592 +
49593 +menu "Grsecurity"
49594 +
49595 +config GRKERNSEC
49596 + bool "Grsecurity"
49597 + select CRYPTO
49598 + select CRYPTO_SHA256
49599 + help
49600 + If you say Y here, you will be able to configure many features
49601 + that will enhance the security of your system. It is highly
49602 + recommended that you say Y here and read through the help
49603 + for each option so that you fully understand the features and
49604 + can evaluate their usefulness for your machine.
49605 +
49606 +choice
49607 + prompt "Security Level"
49608 + depends on GRKERNSEC
49609 + default GRKERNSEC_CUSTOM
49610 +
49611 +config GRKERNSEC_LOW
49612 + bool "Low"
49613 + select GRKERNSEC_LINK
49614 + select GRKERNSEC_FIFO
49615 + select GRKERNSEC_EXECVE
49616 + select GRKERNSEC_RANDNET
49617 + select GRKERNSEC_DMESG
49618 + select GRKERNSEC_CHROOT
49619 + select GRKERNSEC_CHROOT_CHDIR
49620 +
49621 + help
49622 + If you choose this option, several of the grsecurity options will
49623 + be enabled that will give you greater protection against a number
49624 + of attacks, while assuring that none of your software will have any
49625 + conflicts with the additional security measures. If you run a lot
49626 + of unusual software, or you are having problems with the higher
49627 + security levels, you should say Y here. With this option, the
49628 + following features are enabled:
49629 +
49630 + - Linking restrictions
49631 + - FIFO restrictions
49632 + - Enforcing RLIMIT_NPROC on execve
49633 + - Restricted dmesg
49634 + - Enforced chdir("/") on chroot
49635 + - Runtime module disabling
49636 +
49637 +config GRKERNSEC_MEDIUM
49638 + bool "Medium"
49639 + select PAX
49640 + select PAX_EI_PAX
49641 + select PAX_PT_PAX_FLAGS
49642 + select PAX_HAVE_ACL_FLAGS
49643 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49644 + select GRKERNSEC_CHROOT
49645 + select GRKERNSEC_CHROOT_SYSCTL
49646 + select GRKERNSEC_LINK
49647 + select GRKERNSEC_FIFO
49648 + select GRKERNSEC_EXECVE
49649 + select GRKERNSEC_DMESG
49650 + select GRKERNSEC_RANDNET
49651 + select GRKERNSEC_FORKFAIL
49652 + select GRKERNSEC_TIME
49653 + select GRKERNSEC_SIGNAL
49654 + select GRKERNSEC_CHROOT
49655 + select GRKERNSEC_CHROOT_UNIX
49656 + select GRKERNSEC_CHROOT_MOUNT
49657 + select GRKERNSEC_CHROOT_PIVOT
49658 + select GRKERNSEC_CHROOT_DOUBLE
49659 + select GRKERNSEC_CHROOT_CHDIR
49660 + select GRKERNSEC_CHROOT_MKNOD
49661 + select GRKERNSEC_PROC
49662 + select GRKERNSEC_PROC_USERGROUP
49663 + select PAX_RANDUSTACK
49664 + select PAX_ASLR
49665 + select PAX_RANDMMAP
49666 + select PAX_REFCOUNT if (X86 || SPARC64)
49667 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
49668 +
49669 + help
49670 + If you say Y here, several features in addition to those included
49671 + in the low additional security level will be enabled. These
49672 + features provide even more security to your system, though in rare
49673 + cases they may be incompatible with very old or poorly written
49674 + software. If you enable this option, make sure that your auth
49675 + service (identd) is running as gid 1001. With this option,
49676 + the following features (in addition to those provided in the
49677 + low additional security level) will be enabled:
49678 +
49679 + - Failed fork logging
49680 + - Time change logging
49681 + - Signal logging
49682 + - Deny mounts in chroot
49683 + - Deny double chrooting
49684 + - Deny sysctl writes in chroot
49685 + - Deny mknod in chroot
49686 + - Deny access to abstract AF_UNIX sockets out of chroot
49687 + - Deny pivot_root in chroot
49688 + - Denied writes of /dev/kmem, /dev/mem, and /dev/port
49689 + - /proc restrictions with special GID set to 10 (usually wheel)
49690 + - Address Space Layout Randomization (ASLR)
49691 + - Prevent exploitation of most refcount overflows
49692 + - Bounds checking of copying between the kernel and userland
49693 +
49694 +config GRKERNSEC_HIGH
49695 + bool "High"
49696 + select GRKERNSEC_LINK
49697 + select GRKERNSEC_FIFO
49698 + select GRKERNSEC_EXECVE
49699 + select GRKERNSEC_DMESG
49700 + select GRKERNSEC_FORKFAIL
49701 + select GRKERNSEC_TIME
49702 + select GRKERNSEC_SIGNAL
49703 + select GRKERNSEC_CHROOT
49704 + select GRKERNSEC_CHROOT_SHMAT
49705 + select GRKERNSEC_CHROOT_UNIX
49706 + select GRKERNSEC_CHROOT_MOUNT
49707 + select GRKERNSEC_CHROOT_FCHDIR
49708 + select GRKERNSEC_CHROOT_PIVOT
49709 + select GRKERNSEC_CHROOT_DOUBLE
49710 + select GRKERNSEC_CHROOT_CHDIR
49711 + select GRKERNSEC_CHROOT_MKNOD
49712 + select GRKERNSEC_CHROOT_CAPS
49713 + select GRKERNSEC_CHROOT_SYSCTL
49714 + select GRKERNSEC_CHROOT_FINDTASK
49715 + select GRKERNSEC_SYSFS_RESTRICT
49716 + select GRKERNSEC_PROC
49717 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49718 + select GRKERNSEC_HIDESYM
49719 + select GRKERNSEC_BRUTE
49720 + select GRKERNSEC_PROC_USERGROUP
49721 + select GRKERNSEC_KMEM
49722 + select GRKERNSEC_RESLOG
49723 + select GRKERNSEC_RANDNET
49724 + select GRKERNSEC_PROC_ADD
49725 + select GRKERNSEC_CHROOT_CHMOD
49726 + select GRKERNSEC_CHROOT_NICE
49727 + select GRKERNSEC_AUDIT_MOUNT
49728 + select GRKERNSEC_MODHARDEN if (MODULES)
49729 + select GRKERNSEC_HARDEN_PTRACE
49730 + select GRKERNSEC_VM86 if (X86_32)
49731 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
49732 + select PAX
49733 + select PAX_RANDUSTACK
49734 + select PAX_ASLR
49735 + select PAX_RANDMMAP
49736 + select PAX_NOEXEC
49737 + select PAX_MPROTECT
49738 + select PAX_EI_PAX
49739 + select PAX_PT_PAX_FLAGS
49740 + select PAX_HAVE_ACL_FLAGS
49741 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
49742 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
49743 + select PAX_RANDKSTACK if (X86_TSC && X86)
49744 + select PAX_SEGMEXEC if (X86_32)
49745 + select PAX_PAGEEXEC
49746 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
49747 + select PAX_EMUTRAMP if (PARISC)
49748 + select PAX_EMUSIGRT if (PARISC)
49749 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
49750 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
49751 + select PAX_REFCOUNT if (X86 || SPARC64)
49752 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
49753 + help
49754 + If you say Y here, many of the features of grsecurity will be
49755 + enabled, which will protect you against many kinds of attacks
49756 + against your system. The heightened security comes at a cost
49757 + of an increased chance of incompatibilities with rare software
49758 + on your machine. Since this security level enables PaX, you should
49759 + view <http://pax.grsecurity.net> and read about the PaX
49760 + project. While you are there, download chpax and run it on
49761 + binaries that cause problems with PaX. Also remember that
49762 + since the /proc restrictions are enabled, you must run your
49763 + identd as gid 1001. This security level enables the following
49764 + features in addition to those listed in the low and medium
49765 + security levels:
49766 +
49767 + - Additional /proc restrictions
49768 + - Chmod restrictions in chroot
49769 + - No signals, ptrace, or viewing of processes outside of chroot
49770 + - Capability restrictions in chroot
49771 + - Deny fchdir out of chroot
49772 + - Priority restrictions in chroot
49773 + - Segmentation-based implementation of PaX
49774 + - Mprotect restrictions
49775 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
49776 + - Kernel stack randomization
49777 + - Mount/unmount/remount logging
49778 + - Kernel symbol hiding
49779 + - Prevention of memory exhaustion-based exploits
49780 + - Hardening of module auto-loading
49781 + - Ptrace restrictions
49782 + - Restricted vm86 mode
49783 + - Restricted sysfs/debugfs
49784 + - Active kernel exploit response
49785 +
49786 +config GRKERNSEC_CUSTOM
49787 + bool "Custom"
49788 + help
49789 + If you say Y here, you will be able to configure every grsecurity
49790 + option, which allows you to enable many more features that aren't
49791 + covered in the basic security levels. These additional features
49792 + include TPE, socket restrictions, and the sysctl system for
49793 + grsecurity. It is advised that you read through the help for
49794 + each option to determine its usefulness in your situation.
49795 +
49796 +endchoice
49797 +
49798 +menu "Address Space Protection"
49799 +depends on GRKERNSEC
49800 +
49801 +config GRKERNSEC_KMEM
49802 + bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
49803 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
49804 + help
49805 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
49806 + be written to via mmap or otherwise to modify the running kernel.
49807 + /dev/port will also not be allowed to be opened. If you have module
49808 + support disabled, enabling this will close up four ways that are
49809 + currently used to insert malicious code into the running kernel.
49810 + Even with all these features enabled, we still highly recommend that
49811 + you use the RBAC system, as it is still possible for an attacker to
49812 + modify the running kernel through privileged I/O granted by ioperm/iopl.
49813 + If you are not using XFree86, you may be able to stop this additional
49814 + case by enabling the 'Disable privileged I/O' option. Though nothing
49815 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
49816 + but only to video memory, which is the only writing we allow in this
49817 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
49818 + not be allowed to mprotect it with PROT_WRITE later.
49819 + It is highly recommended that you say Y here if you meet all the
49820 + conditions above.
49821 +
49822 +config GRKERNSEC_VM86
49823 + bool "Restrict VM86 mode"
49824 + depends on X86_32
49825 +
49826 + help
49827 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
49828 + make use of a special execution mode on 32bit x86 processors called
49829 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
49830 + video cards and will still work with this option enabled. The purpose
49831 + of the option is to prevent exploitation of emulation errors in
49832 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
49833 + Nearly all users should be able to enable this option.
49834 +
49835 +config GRKERNSEC_IO
49836 + bool "Disable privileged I/O"
49837 + depends on X86
49838 + select RTC_CLASS
49839 + select RTC_INTF_DEV
49840 + select RTC_DRV_CMOS
49841 +
49842 + help
49843 + If you say Y here, all ioperm and iopl calls will return an error.
49844 + Ioperm and iopl can be used to modify the running kernel.
49845 + Unfortunately, some programs need this access to operate properly,
49846 + the most notable of which are XFree86 and hwclock. hwclock can be
49847 + remedied by having RTC support in the kernel, so real-time
49848 + clock support is enabled if this option is enabled, to ensure
49849 + that hwclock operates correctly. XFree86 still will not
49850 + operate correctly with this option enabled, so DO NOT CHOOSE Y
49851 + IF YOU USE XFree86. If you use XFree86 and you still want to
49852 + protect your kernel against modification, use the RBAC system.
49853 +
49854 +config GRKERNSEC_PROC_MEMMAP
49855 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
49856 + default y if (PAX_NOEXEC || PAX_ASLR)
49857 + depends on PAX_NOEXEC || PAX_ASLR
49858 + help
49859 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
49860 + give no information about the addresses of its mappings if
49861 + PaX features that rely on random addresses are enabled on the task.
49862 + If you use PaX it is greatly recommended that you say Y here as it
49863 + closes up a hole that makes the full ASLR useless for suid
49864 + binaries.
49865 +
49866 +config GRKERNSEC_BRUTE
49867 + bool "Deter exploit bruteforcing"
49868 + help
49869 + If you say Y here, attempts to bruteforce exploits against forking
49870 + daemons such as apache or sshd, as well as against suid/sgid binaries
49871 + will be deterred. When a child of a forking daemon is killed by PaX
49872 + or crashes due to an illegal instruction or other suspicious signal,
49873 + the parent process will be delayed 30 seconds upon every subsequent
49874 + fork until the administrator is able to assess the situation and
49875 + restart the daemon.
49876 + In the suid/sgid case, the attempt is logged, the user has all their
49877 + processes terminated, and they are prevented from executing any further
49878 + processes for 15 minutes.
49879 + It is recommended that you also enable signal logging in the auditing
49880 + section so that logs are generated when a process triggers a suspicious
49881 + signal.
49882 + If the sysctl option is enabled, a sysctl option with name
49883 + "deter_bruteforce" is created.
49884 +
49885 +
49886 +config GRKERNSEC_MODHARDEN
49887 + bool "Harden module auto-loading"
49888 + depends on MODULES
49889 + help
49890 + If you say Y here, module auto-loading in response to use of some
49891 + feature implemented by an unloaded module will be restricted to
49892 + root users. Enabling this option helps defend against attacks
49893 + by unprivileged users who abuse the auto-loading behavior to
49894 + cause a vulnerable module to load that is then exploited.
49895 +
49896 + If this option prevents a legitimate use of auto-loading for a
49897 + non-root user, the administrator can execute modprobe manually
49898 + with the exact name of the module mentioned in the alert log.
49899 + Alternatively, the administrator can add the module to the list
49900 + of modules loaded at boot by modifying init scripts.
49901 +
49902 + Modification of init scripts will most likely be needed on
49903 + Ubuntu servers with encrypted home directory support enabled,
49904 + as the first non-root user logging in will cause the ecb(aes),
49905 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
49906 +
49907 +config GRKERNSEC_HIDESYM
49908 + bool "Hide kernel symbols"
49909 + help
49910 + If you say Y here, getting information on loaded modules, and
49911 + displaying all kernel symbols through a syscall will be restricted
49912 + to users with CAP_SYS_MODULE. For software compatibility reasons,
49913 + /proc/kallsyms will be restricted to the root user. The RBAC
49914 + system can hide that entry even from root.
49915 +
49916 + This option also prevents leaking of kernel addresses through
49917 + several /proc entries.
49918 +
49919 + Note that this option is only effective provided the following
49920 + conditions are met:
49921 + 1) The kernel using grsecurity is not precompiled by some distribution
49922 + 2) You have also enabled GRKERNSEC_DMESG
49923 + 3) You are using the RBAC system and hiding other files such as your
49924 + kernel image and System.map. Alternatively, enabling this option
49925 + causes the permissions on /boot, /lib/modules, and the kernel
49926 + source directory to change at compile time to prevent
49927 + reading by non-root users.
49928 + If the above conditions are met, this option will aid in providing a
49929 + useful protection against local kernel exploitation of overflows
49930 + and arbitrary read/write vulnerabilities.
49931 +
49932 +config GRKERNSEC_KERN_LOCKOUT
49933 + bool "Active kernel exploit response"
49934 + depends on X86 || ARM || PPC || SPARC
49935 + help
49936 + If you say Y here, when a PaX alert is triggered due to suspicious
49937 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
49938 + or an OOPs occurs due to bad memory accesses, instead of just
49939 + terminating the offending process (and potentially allowing
49940 + a subsequent exploit from the same user), we will take one of two
49941 + actions:
49942 + If the user was root, we will panic the system
49943 + If the user was non-root, we will log the attempt, terminate
49944 + all processes owned by the user, then prevent them from creating
49945 + any new processes until the system is restarted
49946 + This deters repeated kernel exploitation/bruteforcing attempts
49947 + and is useful for later forensics.
49948 +
49949 +endmenu
49950 +menu "Role Based Access Control Options"
49951 +depends on GRKERNSEC
49952 +
49953 +config GRKERNSEC_RBAC_DEBUG
49954 + bool
49955 +
49956 +config GRKERNSEC_NO_RBAC
49957 + bool "Disable RBAC system"
49958 + help
49959 + If you say Y here, the /dev/grsec device will be removed from the kernel,
49960 + preventing the RBAC system from being enabled. You should only say Y
49961 + here if you have no intention of using the RBAC system, so as to prevent
49962 + an attacker with root access from misusing the RBAC system to hide files
49963 + and processes when loadable module support and /dev/[k]mem have been
49964 + locked down.
49965 +
49966 +config GRKERNSEC_ACL_HIDEKERN
49967 + bool "Hide kernel processes"
49968 + help
49969 + If you say Y here, all kernel threads will be hidden to all
49970 + processes but those whose subject has the "view hidden processes"
49971 + flag.
49972 +
49973 +config GRKERNSEC_ACL_MAXTRIES
49974 + int "Maximum tries before password lockout"
49975 + default 3
49976 + help
49977 + This option enforces the maximum number of times a user can attempt
49978 + to authorize themselves with the grsecurity RBAC system before being
49979 + denied the ability to attempt authorization again for a specified time.
49980 + The lower the number, the harder it will be to brute-force a password.
49981 +
49982 +config GRKERNSEC_ACL_TIMEOUT
49983 + int "Time to wait after max password tries, in seconds"
49984 + default 30
49985 + help
49986 + This option specifies the time the user must wait after attempting to
49987 + authorize to the RBAC system with the maximum number of invalid
49988 + passwords. The higher the number, the harder it will be to brute-force
49989 + a password.
49990 +
49991 +endmenu
49992 +menu "Filesystem Protections"
49993 +depends on GRKERNSEC
49994 +
49995 +config GRKERNSEC_PROC
49996 + bool "Proc restrictions"
49997 + help
49998 + If you say Y here, the permissions of the /proc filesystem
49999 + will be altered to enhance system security and privacy. You MUST
50000 + choose either a user only restriction or a user and group restriction.
50001 + Depending upon the option you choose, you can either restrict users to
50002 + see only the processes they themselves run, or choose a group that can
50003 + view all processes and files normally restricted to root if you choose
50004 + the "restrict to user only" option. NOTE: If you're running identd as
50005 + a non-root user, you will have to run it as the group you specify here.
50006 +
50007 +config GRKERNSEC_PROC_USER
50008 + bool "Restrict /proc to user only"
50009 + depends on GRKERNSEC_PROC
50010 + help
50011 + If you say Y here, non-root users will only be able to view their own
50012 + processes, and restricts them from viewing network-related information,
50013 + and viewing kernel symbol and module information.
50014 +
50015 +config GRKERNSEC_PROC_USERGROUP
50016 + bool "Allow special group"
50017 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
50018 + help
50019 + If you say Y here, you will be able to select a group that will be
50020 + able to view all processes and network-related information. If you've
50021 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
50022 + remain hidden. This option is useful if you want to run identd as
50023 + a non-root user.
50024 +
50025 +config GRKERNSEC_PROC_GID
50026 + int "GID for special group"
50027 + depends on GRKERNSEC_PROC_USERGROUP
50028 + default 1001
50029 +
50030 +config GRKERNSEC_PROC_ADD
50031 + bool "Additional restrictions"
50032 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
50033 + help
50034 + If you say Y here, additional restrictions will be placed on
50035 + /proc that keep normal users from viewing device information and
50036 + slabinfo information that could be useful for exploits.
50037 +
50038 +config GRKERNSEC_LINK
50039 + bool "Linking restrictions"
50040 + help
50041 + If you say Y here, /tmp race exploits will be prevented, since users
50042 + will no longer be able to follow symlinks owned by other users in
50043 + world-writable +t directories (e.g. /tmp), unless the owner of the
50044 + symlink is the owner of the directory. users will also not be
50045 + able to hardlink to files they do not own. If the sysctl option is
50046 + enabled, a sysctl option with name "linking_restrictions" is created.
50047 +
50048 +config GRKERNSEC_FIFO
50049 + bool "FIFO restrictions"
50050 + help
50051 + If you say Y here, users will not be able to write to FIFOs they don't
50052 + own in world-writable +t directories (e.g. /tmp), unless the owner of
50053 + the FIFO is the same owner of the directory it's held in. If the sysctl
50054 + option is enabled, a sysctl option with name "fifo_restrictions" is
50055 + created.
50056 +
50057 +config GRKERNSEC_SYSFS_RESTRICT
50058 + bool "Sysfs/debugfs restriction"
50059 + depends on SYSFS
50060 + help
50061 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
50062 + any filesystem normally mounted under it (e.g. debugfs) will only
50063 + be accessible by root. These filesystems generally provide access
50064 + to hardware and debug information that isn't appropriate for unprivileged
50065 + users of the system. Sysfs and debugfs have also become a large source
50066 + of new vulnerabilities, ranging from infoleaks to local compromise.
50067 + There has been very little oversight with an eye toward security involved
50068 + in adding new exporters of information to these filesystems, so their
50069 + use is discouraged.
50070 + This option is equivalent to a chmod 0700 of the mount paths.
50071 +
50072 +config GRKERNSEC_ROFS
50073 + bool "Runtime read-only mount protection"
50074 + help
50075 + If you say Y here, a sysctl option with name "romount_protect" will
50076 + be created. By setting this option to 1 at runtime, filesystems
50077 + will be protected in the following ways:
50078 + * No new writable mounts will be allowed
50079 + * Existing read-only mounts won't be able to be remounted read/write
50080 + * Write operations will be denied on all block devices
50081 + This option acts independently of grsec_lock: once it is set to 1,
50082 + it cannot be turned off. Therefore, please be mindful of the resulting
50083 + behavior if this option is enabled in an init script on a read-only
50084 + filesystem. This feature is mainly intended for secure embedded systems.
50085 +
50086 +config GRKERNSEC_CHROOT
50087 + bool "Chroot jail restrictions"
50088 + help
50089 + If you say Y here, you will be able to choose several options that will
50090 + make breaking out of a chrooted jail much more difficult. If you
50091 + encounter no software incompatibilities with the following options, it
50092 + is recommended that you enable each one.
50093 +
50094 +config GRKERNSEC_CHROOT_MOUNT
50095 + bool "Deny mounts"
50096 + depends on GRKERNSEC_CHROOT
50097 + help
50098 + If you say Y here, processes inside a chroot will not be able to
50099 + mount or remount filesystems. If the sysctl option is enabled, a
50100 + sysctl option with name "chroot_deny_mount" is created.
50101 +
50102 +config GRKERNSEC_CHROOT_DOUBLE
50103 + bool "Deny double-chroots"
50104 + depends on GRKERNSEC_CHROOT
50105 + help
50106 + If you say Y here, processes inside a chroot will not be able to chroot
50107 + again outside the chroot. This is a widely used method of breaking
50108 + out of a chroot jail and should not be allowed. If the sysctl
50109 + option is enabled, a sysctl option with name
50110 + "chroot_deny_chroot" is created.
50111 +
50112 +config GRKERNSEC_CHROOT_PIVOT
50113 + bool "Deny pivot_root in chroot"
50114 + depends on GRKERNSEC_CHROOT
50115 + help
50116 + If you say Y here, processes inside a chroot will not be able to use
50117 + a function called pivot_root() that was introduced in Linux 2.3.41. It
50118 + works similar to chroot in that it changes the root filesystem. This
50119 + function could be misused in a chrooted process to attempt to break out
50120 + of the chroot, and therefore should not be allowed. If the sysctl
50121 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
50122 + created.
50123 +
50124 +config GRKERNSEC_CHROOT_CHDIR
50125 + bool "Enforce chdir(\"/\") on all chroots"
50126 + depends on GRKERNSEC_CHROOT
50127 + help
50128 + If you say Y here, the current working directory of all newly-chrooted
50129 + applications will be set to the the root directory of the chroot.
50130 + The man page on chroot(2) states:
50131 + Note that this call does not change the current working
50132 + directory, so that `.' can be outside the tree rooted at
50133 + `/'. In particular, the super-user can escape from a
50134 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
50135 +
50136 + It is recommended that you say Y here, since it's not known to break
50137 + any software. If the sysctl option is enabled, a sysctl option with
50138 + name "chroot_enforce_chdir" is created.
50139 +
50140 +config GRKERNSEC_CHROOT_CHMOD
50141 + bool "Deny (f)chmod +s"
50142 + depends on GRKERNSEC_CHROOT
50143 + help
50144 + If you say Y here, processes inside a chroot will not be able to chmod
50145 + or fchmod files to make them have suid or sgid bits. This protects
50146 + against another published method of breaking a chroot. If the sysctl
50147 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
50148 + created.
50149 +
50150 +config GRKERNSEC_CHROOT_FCHDIR
50151 + bool "Deny fchdir out of chroot"
50152 + depends on GRKERNSEC_CHROOT
50153 + help
50154 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
50155 + to a file descriptor of the chrooting process that points to a directory
50156 + outside the filesystem will be stopped. If the sysctl option
50157 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
50158 +
50159 +config GRKERNSEC_CHROOT_MKNOD
50160 + bool "Deny mknod"
50161 + depends on GRKERNSEC_CHROOT
50162 + help
50163 + If you say Y here, processes inside a chroot will not be allowed to
50164 + mknod. The problem with using mknod inside a chroot is that it
50165 + would allow an attacker to create a device entry that is the same
50166 + as one on the physical root of your system, which could range from
50167 + anything from the console device to a device for your harddrive (which
50168 + they could then use to wipe the drive or steal data). It is recommended
50169 + that you say Y here, unless you run into software incompatibilities.
50170 + If the sysctl option is enabled, a sysctl option with name
50171 + "chroot_deny_mknod" is created.
50172 +
50173 +config GRKERNSEC_CHROOT_SHMAT
50174 + bool "Deny shmat() out of chroot"
50175 + depends on GRKERNSEC_CHROOT
50176 + help
50177 + If you say Y here, processes inside a chroot will not be able to attach
50178 + to shared memory segments that were created outside of the chroot jail.
50179 + It is recommended that you say Y here. If the sysctl option is enabled,
50180 + a sysctl option with name "chroot_deny_shmat" is created.
50181 +
50182 +config GRKERNSEC_CHROOT_UNIX
50183 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
50184 + depends on GRKERNSEC_CHROOT
50185 + help
50186 + If you say Y here, processes inside a chroot will not be able to
50187 + connect to abstract (meaning not belonging to a filesystem) Unix
50188 + domain sockets that were bound outside of a chroot. It is recommended
50189 + that you say Y here. If the sysctl option is enabled, a sysctl option
50190 + with name "chroot_deny_unix" is created.
50191 +
50192 +config GRKERNSEC_CHROOT_FINDTASK
50193 + bool "Protect outside processes"
50194 + depends on GRKERNSEC_CHROOT
50195 + help
50196 + If you say Y here, processes inside a chroot will not be able to
50197 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
50198 + getsid, or view any process outside of the chroot. If the sysctl
50199 + option is enabled, a sysctl option with name "chroot_findtask" is
50200 + created.
50201 +
50202 +config GRKERNSEC_CHROOT_NICE
50203 + bool "Restrict priority changes"
50204 + depends on GRKERNSEC_CHROOT
50205 + help
50206 + If you say Y here, processes inside a chroot will not be able to raise
50207 + the priority of processes in the chroot, or alter the priority of
50208 + processes outside the chroot. This provides more security than simply
50209 + removing CAP_SYS_NICE from the process' capability set. If the
50210 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
50211 + is created.
50212 +
50213 +config GRKERNSEC_CHROOT_SYSCTL
50214 + bool "Deny sysctl writes"
50215 + depends on GRKERNSEC_CHROOT
50216 + help
50217 + If you say Y here, an attacker in a chroot will not be able to
50218 + write to sysctl entries, either by sysctl(2) or through a /proc
50219 + interface. It is strongly recommended that you say Y here. If the
50220 + sysctl option is enabled, a sysctl option with name
50221 + "chroot_deny_sysctl" is created.
50222 +
50223 +config GRKERNSEC_CHROOT_CAPS
50224 + bool "Capability restrictions"
50225 + depends on GRKERNSEC_CHROOT
50226 + help
50227 + If you say Y here, the capabilities on all root processes within a
50228 + chroot jail will be lowered to stop module insertion, raw i/o,
50229 + system and net admin tasks, rebooting the system, modifying immutable
50230 + files, modifying IPC owned by another, and changing the system time.
50231 + This is left an option because it can break some apps. Disable this
50232 + if your chrooted apps are having problems performing those kinds of
50233 + tasks. If the sysctl option is enabled, a sysctl option with
50234 + name "chroot_caps" is created.
50235 +
50236 +endmenu
50237 +menu "Kernel Auditing"
50238 +depends on GRKERNSEC
50239 +
50240 +config GRKERNSEC_AUDIT_GROUP
50241 + bool "Single group for auditing"
50242 + help
50243 + If you say Y here, the exec, chdir, and (un)mount logging features
50244 + will only operate on a group you specify. This option is recommended
50245 + if you only want to watch certain users instead of having a large
50246 + amount of logs from the entire system. If the sysctl option is enabled,
50247 + a sysctl option with name "audit_group" is created.
50248 +
50249 +config GRKERNSEC_AUDIT_GID
50250 + int "GID for auditing"
50251 + depends on GRKERNSEC_AUDIT_GROUP
50252 + default 1007
50253 +
50254 +config GRKERNSEC_EXECLOG
50255 + bool "Exec logging"
50256 + help
50257 + If you say Y here, all execve() calls will be logged (since the
50258 + other exec*() calls are frontends to execve(), all execution
50259 + will be logged). Useful for shell-servers that like to keep track
50260 + of their users. If the sysctl option is enabled, a sysctl option with
50261 + name "exec_logging" is created.
50262 + WARNING: This option when enabled will produce a LOT of logs, especially
50263 + on an active system.
50264 +
50265 +config GRKERNSEC_RESLOG
50266 + bool "Resource logging"
50267 + help
50268 + If you say Y here, all attempts to overstep resource limits will
50269 + be logged with the resource name, the requested size, and the current
50270 + limit. It is highly recommended that you say Y here. If the sysctl
50271 + option is enabled, a sysctl option with name "resource_logging" is
50272 + created. If the RBAC system is enabled, the sysctl value is ignored.
50273 +
50274 +config GRKERNSEC_CHROOT_EXECLOG
50275 + bool "Log execs within chroot"
50276 + help
50277 + If you say Y here, all executions inside a chroot jail will be logged
50278 + to syslog. This can cause a large amount of logs if certain
50279 + applications (eg. djb's daemontools) are installed on the system, and
50280 + is therefore left as an option. If the sysctl option is enabled, a
50281 + sysctl option with name "chroot_execlog" is created.
50282 +
50283 +config GRKERNSEC_AUDIT_PTRACE
50284 + bool "Ptrace logging"
50285 + help
50286 + If you say Y here, all attempts to attach to a process via ptrace
50287 + will be logged. If the sysctl option is enabled, a sysctl option
50288 + with name "audit_ptrace" is created.
50289 +
50290 +config GRKERNSEC_AUDIT_CHDIR
50291 + bool "Chdir logging"
50292 + help
50293 + If you say Y here, all chdir() calls will be logged. If the sysctl
50294 + option is enabled, a sysctl option with name "audit_chdir" is created.
50295 +
50296 +config GRKERNSEC_AUDIT_MOUNT
50297 + bool "(Un)Mount logging"
50298 + help
50299 + If you say Y here, all mounts and unmounts will be logged. If the
50300 + sysctl option is enabled, a sysctl option with name "audit_mount" is
50301 + created.
50302 +
50303 +config GRKERNSEC_SIGNAL
50304 + bool "Signal logging"
50305 + help
50306 + If you say Y here, certain important signals will be logged, such as
50307 + SIGSEGV, which will as a result inform you of when a error in a program
50308 + occurred, which in some cases could mean a possible exploit attempt.
50309 + If the sysctl option is enabled, a sysctl option with name
50310 + "signal_logging" is created.
50311 +
50312 +config GRKERNSEC_FORKFAIL
50313 + bool "Fork failure logging"
50314 + help
50315 + If you say Y here, all failed fork() attempts will be logged.
50316 + This could suggest a fork bomb, or someone attempting to overstep
50317 + their process limit. If the sysctl option is enabled, a sysctl option
50318 + with name "forkfail_logging" is created.
50319 +
50320 +config GRKERNSEC_TIME
50321 + bool "Time change logging"
50322 + help
50323 + If you say Y here, any changes of the system clock will be logged.
50324 + If the sysctl option is enabled, a sysctl option with name
50325 + "timechange_logging" is created.
50326 +
50327 +config GRKERNSEC_PROC_IPADDR
50328 + bool "/proc/<pid>/ipaddr support"
50329 + help
50330 + If you say Y here, a new entry will be added to each /proc/<pid>
50331 + directory that contains the IP address of the person using the task.
50332 + The IP is carried across local TCP and AF_UNIX stream sockets.
50333 + This information can be useful for IDS/IPSes to perform remote response
50334 + to a local attack. The entry is readable by only the owner of the
50335 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
50336 + the RBAC system), and thus does not create privacy concerns.
50337 +
50338 +config GRKERNSEC_RWXMAP_LOG
50339 + bool 'Denied RWX mmap/mprotect logging'
50340 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
50341 + help
50342 + If you say Y here, calls to mmap() and mprotect() with explicit
50343 + usage of PROT_WRITE and PROT_EXEC together will be logged when
50344 + denied by the PAX_MPROTECT feature. If the sysctl option is
50345 + enabled, a sysctl option with name "rwxmap_logging" is created.
50346 +
50347 +config GRKERNSEC_AUDIT_TEXTREL
50348 + bool 'ELF text relocations logging (READ HELP)'
50349 + depends on PAX_MPROTECT
50350 + help
50351 + If you say Y here, text relocations will be logged with the filename
50352 + of the offending library or binary. The purpose of the feature is
50353 + to help Linux distribution developers get rid of libraries and
50354 + binaries that need text relocations which hinder the future progress
50355 + of PaX. Only Linux distribution developers should say Y here, and
50356 + never on a production machine, as this option creates an information
50357 + leak that could aid an attacker in defeating the randomization of
50358 + a single memory region. If the sysctl option is enabled, a sysctl
50359 + option with name "audit_textrel" is created.
50360 +
50361 +endmenu
50362 +
50363 +menu "Executable Protections"
50364 +depends on GRKERNSEC
50365 +
50366 +config GRKERNSEC_EXECVE
50367 + bool "Enforce RLIMIT_NPROC on execs"
50368 + help
50369 + If you say Y here, users with a resource limit on processes will
50370 + have the value checked during execve() calls. The current system
50371 + only checks the system limit during fork() calls. If the sysctl option
50372 + is enabled, a sysctl option with name "execve_limiting" is created.
50373 +
50374 +config GRKERNSEC_DMESG
50375 + bool "Dmesg(8) restriction"
50376 + help
50377 + If you say Y here, non-root users will not be able to use dmesg(8)
50378 + to view up to the last 4kb of messages in the kernel's log buffer.
50379 + The kernel's log buffer often contains kernel addresses and other
50380 + identifying information useful to an attacker in fingerprinting a
50381 + system for a targeted exploit.
50382 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
50383 + created.
50384 +
50385 +config GRKERNSEC_HARDEN_PTRACE
50386 + bool "Deter ptrace-based process snooping"
50387 + help
50388 + If you say Y here, TTY sniffers and other malicious monitoring
50389 + programs implemented through ptrace will be defeated. If you
50390 + have been using the RBAC system, this option has already been
50391 + enabled for several years for all users, with the ability to make
50392 + fine-grained exceptions.
50393 +
50394 + This option only affects the ability of non-root users to ptrace
50395 + processes that are not a descendent of the ptracing process.
50396 + This means that strace ./binary and gdb ./binary will still work,
50397 + but attaching to arbitrary processes will not. If the sysctl
50398 + option is enabled, a sysctl option with name "harden_ptrace" is
50399 + created.
50400 +
50401 +config GRKERNSEC_TPE
50402 + bool "Trusted Path Execution (TPE)"
50403 + help
50404 + If you say Y here, you will be able to choose a gid to add to the
50405 + supplementary groups of users you want to mark as "untrusted."
50406 + These users will not be able to execute any files that are not in
50407 + root-owned directories writable only by root. If the sysctl option
50408 + is enabled, a sysctl option with name "tpe" is created.
50409 +
50410 +config GRKERNSEC_TPE_ALL
50411 + bool "Partially restrict all non-root users"
50412 + depends on GRKERNSEC_TPE
50413 + help
50414 + If you say Y here, all non-root users will be covered under
50415 + a weaker TPE restriction. This is separate from, and in addition to,
50416 + the main TPE options that you have selected elsewhere. Thus, if a
50417 + "trusted" GID is chosen, this restriction applies to even that GID.
50418 + Under this restriction, all non-root users will only be allowed to
50419 + execute files in directories they own that are not group or
50420 + world-writable, or in directories owned by root and writable only by
50421 + root. If the sysctl option is enabled, a sysctl option with name
50422 + "tpe_restrict_all" is created.
50423 +
50424 +config GRKERNSEC_TPE_INVERT
50425 + bool "Invert GID option"
50426 + depends on GRKERNSEC_TPE
50427 + help
50428 + If you say Y here, the group you specify in the TPE configuration will
50429 + decide what group TPE restrictions will be *disabled* for. This
50430 + option is useful if you want TPE restrictions to be applied to most
50431 + users on the system. If the sysctl option is enabled, a sysctl option
50432 + with name "tpe_invert" is created. Unlike other sysctl options, this
50433 + entry will default to on for backward-compatibility.
50434 +
50435 +config GRKERNSEC_TPE_GID
50436 + int "GID for untrusted users"
50437 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
50438 + default 1005
50439 + help
50440 + Setting this GID determines what group TPE restrictions will be
50441 + *enabled* for. If the sysctl option is enabled, a sysctl option
50442 + with name "tpe_gid" is created.
50443 +
50444 +config GRKERNSEC_TPE_GID
50445 + int "GID for trusted users"
50446 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
50447 + default 1005
50448 + help
50449 + Setting this GID determines what group TPE restrictions will be
50450 + *disabled* for. If the sysctl option is enabled, a sysctl option
50451 + with name "tpe_gid" is created.
50452 +
50453 +endmenu
50454 +menu "Network Protections"
50455 +depends on GRKERNSEC
50456 +
50457 +config GRKERNSEC_RANDNET
50458 + bool "Larger entropy pools"
50459 + help
50460 + If you say Y here, the entropy pools used for many features of Linux
50461 + and grsecurity will be doubled in size. Since several grsecurity
50462 + features use additional randomness, it is recommended that you say Y
50463 + here. Saying Y here has a similar effect as modifying
50464 + /proc/sys/kernel/random/poolsize.
50465 +
50466 +config GRKERNSEC_BLACKHOLE
50467 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
50468 + depends on NET
50469 + help
50470 + If you say Y here, neither TCP resets nor ICMP
50471 + destination-unreachable packets will be sent in response to packets
50472 + sent to ports for which no associated listening process exists.
50473 + This feature supports both IPV4 and IPV6 and exempts the
50474 + loopback interface from blackholing. Enabling this feature
50475 + makes a host more resilient to DoS attacks and reduces network
50476 + visibility against scanners.
50477 +
50478 + The blackhole feature as-implemented is equivalent to the FreeBSD
50479 + blackhole feature, as it prevents RST responses to all packets, not
50480 + just SYNs. Under most application behavior this causes no
50481 + problems, but applications (like haproxy) may not close certain
50482 + connections in a way that cleanly terminates them on the remote
50483 + end, leaving the remote host in LAST_ACK state. Because of this
50484 + side-effect and to prevent intentional LAST_ACK DoSes, this
50485 + feature also adds automatic mitigation against such attacks.
50486 + The mitigation drastically reduces the amount of time a socket
50487 + can spend in LAST_ACK state. If you're using haproxy and not
50488 + all servers it connects to have this option enabled, consider
50489 + disabling this feature on the haproxy host.
50490 +
50491 + If the sysctl option is enabled, two sysctl options with names
50492 + "ip_blackhole" and "lastack_retries" will be created.
50493 + While "ip_blackhole" takes the standard zero/non-zero on/off
50494 + toggle, "lastack_retries" uses the same kinds of values as
50495 + "tcp_retries1" and "tcp_retries2". The default value of 4
50496 + prevents a socket from lasting more than 45 seconds in LAST_ACK
50497 + state.
50498 +
50499 +config GRKERNSEC_SOCKET
50500 + bool "Socket restrictions"
50501 + depends on NET
50502 + help
50503 + If you say Y here, you will be able to choose from several options.
50504 + If you assign a GID on your system and add it to the supplementary
50505 + groups of users you want to restrict socket access to, this patch
50506 + will perform up to three things, based on the option(s) you choose.
50507 +
50508 +config GRKERNSEC_SOCKET_ALL
50509 + bool "Deny any sockets to group"
50510 + depends on GRKERNSEC_SOCKET
50511 + help
50512 + If you say Y here, you will be able to choose a GID of whose users will
50513 + be unable to connect to other hosts from your machine or run server
50514 + applications from your machine. If the sysctl option is enabled, a
50515 + sysctl option with name "socket_all" is created.
50516 +
50517 +config GRKERNSEC_SOCKET_ALL_GID
50518 + int "GID to deny all sockets for"
50519 + depends on GRKERNSEC_SOCKET_ALL
50520 + default 1004
50521 + help
50522 + Here you can choose the GID to disable socket access for. Remember to
50523 + add the users you want socket access disabled for to the GID
50524 + specified here. If the sysctl option is enabled, a sysctl option
50525 + with name "socket_all_gid" is created.
50526 +
50527 +config GRKERNSEC_SOCKET_CLIENT
50528 + bool "Deny client sockets to group"
50529 + depends on GRKERNSEC_SOCKET
50530 + help
50531 + If you say Y here, you will be able to choose a GID of whose users will
50532 + be unable to connect to other hosts from your machine, but will be
50533 + able to run servers. If this option is enabled, all users in the group
50534 + you specify will have to use passive mode when initiating ftp transfers
50535 + from the shell on your machine. If the sysctl option is enabled, a
50536 + sysctl option with name "socket_client" is created.
50537 +
50538 +config GRKERNSEC_SOCKET_CLIENT_GID
50539 + int "GID to deny client sockets for"
50540 + depends on GRKERNSEC_SOCKET_CLIENT
50541 + default 1003
50542 + help
50543 + Here you can choose the GID to disable client socket access for.
50544 + Remember to add the users you want client socket access disabled for to
50545 + the GID specified here. If the sysctl option is enabled, a sysctl
50546 + option with name "socket_client_gid" is created.
50547 +
50548 +config GRKERNSEC_SOCKET_SERVER
50549 + bool "Deny server sockets to group"
50550 + depends on GRKERNSEC_SOCKET
50551 + help
50552 + If you say Y here, you will be able to choose a GID of whose users will
50553 + be unable to run server applications from your machine. If the sysctl
50554 + option is enabled, a sysctl option with name "socket_server" is created.
50555 +
50556 +config GRKERNSEC_SOCKET_SERVER_GID
50557 + int "GID to deny server sockets for"
50558 + depends on GRKERNSEC_SOCKET_SERVER
50559 + default 1002
50560 + help
50561 + Here you can choose the GID to disable server socket access for.
50562 + Remember to add the users you want server socket access disabled for to
50563 + the GID specified here. If the sysctl option is enabled, a sysctl
50564 + option with name "socket_server_gid" is created.
50565 +
50566 +endmenu
50567 +menu "Sysctl support"
50568 +depends on GRKERNSEC && SYSCTL
50569 +
50570 +config GRKERNSEC_SYSCTL
50571 + bool "Sysctl support"
50572 + help
50573 + If you say Y here, you will be able to change the options that
50574 + grsecurity runs with at bootup, without having to recompile your
50575 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
50576 + to enable (1) or disable (0) various features. All the sysctl entries
50577 + are mutable until the "grsec_lock" entry is set to a non-zero value.
50578 + All features enabled in the kernel configuration are disabled at boot
50579 + if you do not say Y to the "Turn on features by default" option.
50580 + All options should be set at startup, and the grsec_lock entry should
50581 + be set to a non-zero value after all the options are set.
50582 + *THIS IS EXTREMELY IMPORTANT*
50583 +
50584 +config GRKERNSEC_SYSCTL_DISTRO
50585 + bool "Extra sysctl support for distro makers (READ HELP)"
50586 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
50587 + help
50588 + If you say Y here, additional sysctl options will be created
50589 + for features that affect processes running as root. Therefore,
50590 + it is critical when using this option that the grsec_lock entry be
50591 + enabled after boot. Only distros with prebuilt kernel packages
50592 + with this option enabled that can ensure grsec_lock is enabled
50593 + after boot should use this option.
50594 + *Failure to set grsec_lock after boot makes all grsec features
50595 + this option covers useless*
50596 +
50597 + Currently this option creates the following sysctl entries:
50598 + "Disable Privileged I/O": "disable_priv_io"
50599 +
50600 +config GRKERNSEC_SYSCTL_ON
50601 + bool "Turn on features by default"
50602 + depends on GRKERNSEC_SYSCTL
50603 + help
50604 + If you say Y here, instead of having all features enabled in the
50605 + kernel configuration disabled at boot time, the features will be
50606 + enabled at boot time. It is recommended you say Y here unless
50607 + there is some reason you would want all sysctl-tunable features to
50608 + be disabled by default. As mentioned elsewhere, it is important
50609 + to enable the grsec_lock entry once you have finished modifying
50610 + the sysctl entries.
50611 +
50612 +endmenu
50613 +menu "Logging Options"
50614 +depends on GRKERNSEC
50615 +
50616 +config GRKERNSEC_FLOODTIME
50617 + int "Seconds in between log messages (minimum)"
50618 + default 10
50619 + help
50620 + This option allows you to enforce the number of seconds between
50621 + grsecurity log messages. The default should be suitable for most
50622 + people, however, if you choose to change it, choose a value small enough
50623 + to allow informative logs to be produced, but large enough to
50624 + prevent flooding.
50625 +
50626 +config GRKERNSEC_FLOODBURST
50627 + int "Number of messages in a burst (maximum)"
50628 + default 4
50629 + help
50630 + This option allows you to choose the maximum number of messages allowed
50631 + within the flood time interval you chose in a separate option. The
50632 + default should be suitable for most people, however if you find that
50633 + many of your logs are being interpreted as flooding, you may want to
50634 + raise this value.
50635 +
50636 +endmenu
50637 +
50638 +endmenu
50639 diff -urNp linux-3.0.3/grsecurity/Makefile linux-3.0.3/grsecurity/Makefile
50640 --- linux-3.0.3/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
50641 +++ linux-3.0.3/grsecurity/Makefile 2011-08-23 21:48:14.000000000 -0400
50642 @@ -0,0 +1,34 @@
50643 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
50644 +# during 2001-2009 it has been completely redesigned by Brad Spengler
50645 +# into an RBAC system
50646 +#
50647 +# All code in this directory and various hooks inserted throughout the kernel
50648 +# are copyright Brad Spengler - Open Source Security, Inc., and released
50649 +# under the GPL v2 or higher
50650 +
50651 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
50652 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
50653 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
50654 +
50655 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
50656 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
50657 + gracl_learn.o grsec_log.o
50658 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
50659 +
50660 +ifdef CONFIG_NET
50661 +obj-y += grsec_sock.o
50662 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
50663 +endif
50664 +
50665 +ifndef CONFIG_GRKERNSEC
50666 +obj-y += grsec_disabled.o
50667 +endif
50668 +
50669 +ifdef CONFIG_GRKERNSEC_HIDESYM
50670 +extra-y := grsec_hidesym.o
50671 +$(obj)/grsec_hidesym.o:
50672 + @-chmod -f 500 /boot
50673 + @-chmod -f 500 /lib/modules
50674 + @-chmod -f 700 .
50675 + @echo ' grsec: protected kernel image paths'
50676 +endif
50677 diff -urNp linux-3.0.3/include/acpi/acpi_bus.h linux-3.0.3/include/acpi/acpi_bus.h
50678 --- linux-3.0.3/include/acpi/acpi_bus.h 2011-07-21 22:17:23.000000000 -0400
50679 +++ linux-3.0.3/include/acpi/acpi_bus.h 2011-08-23 21:47:56.000000000 -0400
50680 @@ -107,7 +107,7 @@ struct acpi_device_ops {
50681 acpi_op_bind bind;
50682 acpi_op_unbind unbind;
50683 acpi_op_notify notify;
50684 -};
50685 +} __no_const;
50686
50687 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
50688
50689 diff -urNp linux-3.0.3/include/asm-generic/atomic-long.h linux-3.0.3/include/asm-generic/atomic-long.h
50690 --- linux-3.0.3/include/asm-generic/atomic-long.h 2011-07-21 22:17:23.000000000 -0400
50691 +++ linux-3.0.3/include/asm-generic/atomic-long.h 2011-08-23 21:47:56.000000000 -0400
50692 @@ -22,6 +22,12 @@
50693
50694 typedef atomic64_t atomic_long_t;
50695
50696 +#ifdef CONFIG_PAX_REFCOUNT
50697 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
50698 +#else
50699 +typedef atomic64_t atomic_long_unchecked_t;
50700 +#endif
50701 +
50702 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
50703
50704 static inline long atomic_long_read(atomic_long_t *l)
50705 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
50706 return (long)atomic64_read(v);
50707 }
50708
50709 +#ifdef CONFIG_PAX_REFCOUNT
50710 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
50711 +{
50712 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
50713 +
50714 + return (long)atomic64_read_unchecked(v);
50715 +}
50716 +#endif
50717 +
50718 static inline void atomic_long_set(atomic_long_t *l, long i)
50719 {
50720 atomic64_t *v = (atomic64_t *)l;
50721 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
50722 atomic64_set(v, i);
50723 }
50724
50725 +#ifdef CONFIG_PAX_REFCOUNT
50726 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
50727 +{
50728 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
50729 +
50730 + atomic64_set_unchecked(v, i);
50731 +}
50732 +#endif
50733 +
50734 static inline void atomic_long_inc(atomic_long_t *l)
50735 {
50736 atomic64_t *v = (atomic64_t *)l;
50737 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
50738 atomic64_inc(v);
50739 }
50740
50741 +#ifdef CONFIG_PAX_REFCOUNT
50742 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
50743 +{
50744 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
50745 +
50746 + atomic64_inc_unchecked(v);
50747 +}
50748 +#endif
50749 +
50750 static inline void atomic_long_dec(atomic_long_t *l)
50751 {
50752 atomic64_t *v = (atomic64_t *)l;
50753 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
50754 atomic64_dec(v);
50755 }
50756
50757 +#ifdef CONFIG_PAX_REFCOUNT
50758 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
50759 +{
50760 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
50761 +
50762 + atomic64_dec_unchecked(v);
50763 +}
50764 +#endif
50765 +
50766 static inline void atomic_long_add(long i, atomic_long_t *l)
50767 {
50768 atomic64_t *v = (atomic64_t *)l;
50769 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long
50770 atomic64_add(i, v);
50771 }
50772
50773 +#ifdef CONFIG_PAX_REFCOUNT
50774 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
50775 +{
50776 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
50777 +
50778 + atomic64_add_unchecked(i, v);
50779 +}
50780 +#endif
50781 +
50782 static inline void atomic_long_sub(long i, atomic_long_t *l)
50783 {
50784 atomic64_t *v = (atomic64_t *)l;
50785 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
50786 atomic64_sub(i, v);
50787 }
50788
50789 +#ifdef CONFIG_PAX_REFCOUNT
50790 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
50791 +{
50792 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
50793 +
50794 + atomic64_sub_unchecked(i, v);
50795 +}
50796 +#endif
50797 +
50798 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
50799 {
50800 atomic64_t *v = (atomic64_t *)l;
50801 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
50802 return (long)atomic64_inc_return(v);
50803 }
50804
50805 +#ifdef CONFIG_PAX_REFCOUNT
50806 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
50807 +{
50808 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
50809 +
50810 + return (long)atomic64_inc_return_unchecked(v);
50811 +}
50812 +#endif
50813 +
50814 static inline long atomic_long_dec_return(atomic_long_t *l)
50815 {
50816 atomic64_t *v = (atomic64_t *)l;
50817 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
50818
50819 typedef atomic_t atomic_long_t;
50820
50821 +#ifdef CONFIG_PAX_REFCOUNT
50822 +typedef atomic_unchecked_t atomic_long_unchecked_t;
50823 +#else
50824 +typedef atomic_t atomic_long_unchecked_t;
50825 +#endif
50826 +
50827 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
50828 static inline long atomic_long_read(atomic_long_t *l)
50829 {
50830 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
50831 return (long)atomic_read(v);
50832 }
50833
50834 +#ifdef CONFIG_PAX_REFCOUNT
50835 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
50836 +{
50837 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
50838 +
50839 + return (long)atomic_read_unchecked(v);
50840 +}
50841 +#endif
50842 +
50843 static inline void atomic_long_set(atomic_long_t *l, long i)
50844 {
50845 atomic_t *v = (atomic_t *)l;
50846 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
50847 atomic_set(v, i);
50848 }
50849
50850 +#ifdef CONFIG_PAX_REFCOUNT
50851 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
50852 +{
50853 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
50854 +
50855 + atomic_set_unchecked(v, i);
50856 +}
50857 +#endif
50858 +
50859 static inline void atomic_long_inc(atomic_long_t *l)
50860 {
50861 atomic_t *v = (atomic_t *)l;
50862 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
50863 atomic_inc(v);
50864 }
50865
50866 +#ifdef CONFIG_PAX_REFCOUNT
50867 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
50868 +{
50869 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
50870 +
50871 + atomic_inc_unchecked(v);
50872 +}
50873 +#endif
50874 +
50875 static inline void atomic_long_dec(atomic_long_t *l)
50876 {
50877 atomic_t *v = (atomic_t *)l;
50878 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
50879 atomic_dec(v);
50880 }
50881
50882 +#ifdef CONFIG_PAX_REFCOUNT
50883 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
50884 +{
50885 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
50886 +
50887 + atomic_dec_unchecked(v);
50888 +}
50889 +#endif
50890 +
50891 static inline void atomic_long_add(long i, atomic_long_t *l)
50892 {
50893 atomic_t *v = (atomic_t *)l;
50894 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long
50895 atomic_add(i, v);
50896 }
50897
50898 +#ifdef CONFIG_PAX_REFCOUNT
50899 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
50900 +{
50901 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
50902 +
50903 + atomic_add_unchecked(i, v);
50904 +}
50905 +#endif
50906 +
50907 static inline void atomic_long_sub(long i, atomic_long_t *l)
50908 {
50909 atomic_t *v = (atomic_t *)l;
50910 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
50911 atomic_sub(i, v);
50912 }
50913
50914 +#ifdef CONFIG_PAX_REFCOUNT
50915 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
50916 +{
50917 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
50918 +
50919 + atomic_sub_unchecked(i, v);
50920 +}
50921 +#endif
50922 +
50923 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
50924 {
50925 atomic_t *v = (atomic_t *)l;
50926 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
50927 return (long)atomic_inc_return(v);
50928 }
50929
50930 +#ifdef CONFIG_PAX_REFCOUNT
50931 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
50932 +{
50933 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
50934 +
50935 + return (long)atomic_inc_return_unchecked(v);
50936 +}
50937 +#endif
50938 +
50939 static inline long atomic_long_dec_return(atomic_long_t *l)
50940 {
50941 atomic_t *v = (atomic_t *)l;
50942 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
50943
50944 #endif /* BITS_PER_LONG == 64 */
50945
50946 +#ifdef CONFIG_PAX_REFCOUNT
50947 +static inline void pax_refcount_needs_these_functions(void)
50948 +{
50949 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
50950 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
50951 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
50952 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
50953 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
50954 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
50955 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
50956 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
50957 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
50958 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
50959 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
50960 +
50961 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
50962 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
50963 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
50964 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
50965 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
50966 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
50967 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
50968 +}
50969 +#else
50970 +#define atomic_read_unchecked(v) atomic_read(v)
50971 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
50972 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
50973 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
50974 +#define atomic_inc_unchecked(v) atomic_inc(v)
50975 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
50976 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
50977 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
50978 +#define atomic_dec_unchecked(v) atomic_dec(v)
50979 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
50980 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
50981 +
50982 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
50983 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
50984 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
50985 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
50986 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
50987 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
50988 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
50989 +#endif
50990 +
50991 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
50992 diff -urNp linux-3.0.3/include/asm-generic/cache.h linux-3.0.3/include/asm-generic/cache.h
50993 --- linux-3.0.3/include/asm-generic/cache.h 2011-07-21 22:17:23.000000000 -0400
50994 +++ linux-3.0.3/include/asm-generic/cache.h 2011-08-23 21:47:56.000000000 -0400
50995 @@ -6,7 +6,7 @@
50996 * cache lines need to provide their own cache.h.
50997 */
50998
50999 -#define L1_CACHE_SHIFT 5
51000 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
51001 +#define L1_CACHE_SHIFT 5UL
51002 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
51003
51004 #endif /* __ASM_GENERIC_CACHE_H */
51005 diff -urNp linux-3.0.3/include/asm-generic/int-l64.h linux-3.0.3/include/asm-generic/int-l64.h
51006 --- linux-3.0.3/include/asm-generic/int-l64.h 2011-07-21 22:17:23.000000000 -0400
51007 +++ linux-3.0.3/include/asm-generic/int-l64.h 2011-08-23 21:47:56.000000000 -0400
51008 @@ -46,6 +46,8 @@ typedef unsigned int u32;
51009 typedef signed long s64;
51010 typedef unsigned long u64;
51011
51012 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
51013 +
51014 #define S8_C(x) x
51015 #define U8_C(x) x ## U
51016 #define S16_C(x) x
51017 diff -urNp linux-3.0.3/include/asm-generic/int-ll64.h linux-3.0.3/include/asm-generic/int-ll64.h
51018 --- linux-3.0.3/include/asm-generic/int-ll64.h 2011-07-21 22:17:23.000000000 -0400
51019 +++ linux-3.0.3/include/asm-generic/int-ll64.h 2011-08-23 21:47:56.000000000 -0400
51020 @@ -51,6 +51,8 @@ typedef unsigned int u32;
51021 typedef signed long long s64;
51022 typedef unsigned long long u64;
51023
51024 +typedef unsigned long long intoverflow_t;
51025 +
51026 #define S8_C(x) x
51027 #define U8_C(x) x ## U
51028 #define S16_C(x) x
51029 diff -urNp linux-3.0.3/include/asm-generic/kmap_types.h linux-3.0.3/include/asm-generic/kmap_types.h
51030 --- linux-3.0.3/include/asm-generic/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
51031 +++ linux-3.0.3/include/asm-generic/kmap_types.h 2011-08-23 21:47:56.000000000 -0400
51032 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
51033 KMAP_D(17) KM_NMI,
51034 KMAP_D(18) KM_NMI_PTE,
51035 KMAP_D(19) KM_KDB,
51036 +KMAP_D(20) KM_CLEARPAGE,
51037 /*
51038 * Remember to update debug_kmap_atomic() when adding new kmap types!
51039 */
51040 -KMAP_D(20) KM_TYPE_NR
51041 +KMAP_D(21) KM_TYPE_NR
51042 };
51043
51044 #undef KMAP_D
51045 diff -urNp linux-3.0.3/include/asm-generic/pgtable.h linux-3.0.3/include/asm-generic/pgtable.h
51046 --- linux-3.0.3/include/asm-generic/pgtable.h 2011-07-21 22:17:23.000000000 -0400
51047 +++ linux-3.0.3/include/asm-generic/pgtable.h 2011-08-23 21:47:56.000000000 -0400
51048 @@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
51049 #endif /* __HAVE_ARCH_PMD_WRITE */
51050 #endif
51051
51052 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
51053 +static inline unsigned long pax_open_kernel(void) { return 0; }
51054 +#endif
51055 +
51056 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
51057 +static inline unsigned long pax_close_kernel(void) { return 0; }
51058 +#endif
51059 +
51060 #endif /* !__ASSEMBLY__ */
51061
51062 #endif /* _ASM_GENERIC_PGTABLE_H */
51063 diff -urNp linux-3.0.3/include/asm-generic/pgtable-nopmd.h linux-3.0.3/include/asm-generic/pgtable-nopmd.h
51064 --- linux-3.0.3/include/asm-generic/pgtable-nopmd.h 2011-07-21 22:17:23.000000000 -0400
51065 +++ linux-3.0.3/include/asm-generic/pgtable-nopmd.h 2011-08-23 21:47:56.000000000 -0400
51066 @@ -1,14 +1,19 @@
51067 #ifndef _PGTABLE_NOPMD_H
51068 #define _PGTABLE_NOPMD_H
51069
51070 -#ifndef __ASSEMBLY__
51071 -
51072 #include <asm-generic/pgtable-nopud.h>
51073
51074 -struct mm_struct;
51075 -
51076 #define __PAGETABLE_PMD_FOLDED
51077
51078 +#define PMD_SHIFT PUD_SHIFT
51079 +#define PTRS_PER_PMD 1
51080 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
51081 +#define PMD_MASK (~(PMD_SIZE-1))
51082 +
51083 +#ifndef __ASSEMBLY__
51084 +
51085 +struct mm_struct;
51086 +
51087 /*
51088 * Having the pmd type consist of a pud gets the size right, and allows
51089 * us to conceptually access the pud entry that this pmd is folded into
51090 @@ -16,11 +21,6 @@ struct mm_struct;
51091 */
51092 typedef struct { pud_t pud; } pmd_t;
51093
51094 -#define PMD_SHIFT PUD_SHIFT
51095 -#define PTRS_PER_PMD 1
51096 -#define PMD_SIZE (1UL << PMD_SHIFT)
51097 -#define PMD_MASK (~(PMD_SIZE-1))
51098 -
51099 /*
51100 * The "pud_xxx()" functions here are trivial for a folded two-level
51101 * setup: the pmd is never bad, and a pmd always exists (as it's folded
51102 diff -urNp linux-3.0.3/include/asm-generic/pgtable-nopud.h linux-3.0.3/include/asm-generic/pgtable-nopud.h
51103 --- linux-3.0.3/include/asm-generic/pgtable-nopud.h 2011-07-21 22:17:23.000000000 -0400
51104 +++ linux-3.0.3/include/asm-generic/pgtable-nopud.h 2011-08-23 21:47:56.000000000 -0400
51105 @@ -1,10 +1,15 @@
51106 #ifndef _PGTABLE_NOPUD_H
51107 #define _PGTABLE_NOPUD_H
51108
51109 -#ifndef __ASSEMBLY__
51110 -
51111 #define __PAGETABLE_PUD_FOLDED
51112
51113 +#define PUD_SHIFT PGDIR_SHIFT
51114 +#define PTRS_PER_PUD 1
51115 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
51116 +#define PUD_MASK (~(PUD_SIZE-1))
51117 +
51118 +#ifndef __ASSEMBLY__
51119 +
51120 /*
51121 * Having the pud type consist of a pgd gets the size right, and allows
51122 * us to conceptually access the pgd entry that this pud is folded into
51123 @@ -12,11 +17,6 @@
51124 */
51125 typedef struct { pgd_t pgd; } pud_t;
51126
51127 -#define PUD_SHIFT PGDIR_SHIFT
51128 -#define PTRS_PER_PUD 1
51129 -#define PUD_SIZE (1UL << PUD_SHIFT)
51130 -#define PUD_MASK (~(PUD_SIZE-1))
51131 -
51132 /*
51133 * The "pgd_xxx()" functions here are trivial for a folded two-level
51134 * setup: the pud is never bad, and a pud always exists (as it's folded
51135 diff -urNp linux-3.0.3/include/asm-generic/vmlinux.lds.h linux-3.0.3/include/asm-generic/vmlinux.lds.h
51136 --- linux-3.0.3/include/asm-generic/vmlinux.lds.h 2011-07-21 22:17:23.000000000 -0400
51137 +++ linux-3.0.3/include/asm-generic/vmlinux.lds.h 2011-08-23 21:47:56.000000000 -0400
51138 @@ -217,6 +217,7 @@
51139 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
51140 VMLINUX_SYMBOL(__start_rodata) = .; \
51141 *(.rodata) *(.rodata.*) \
51142 + *(.data..read_only) \
51143 *(__vermagic) /* Kernel version magic */ \
51144 . = ALIGN(8); \
51145 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
51146 @@ -723,17 +724,18 @@
51147 * section in the linker script will go there too. @phdr should have
51148 * a leading colon.
51149 *
51150 - * Note that this macros defines __per_cpu_load as an absolute symbol.
51151 + * Note that this macros defines per_cpu_load as an absolute symbol.
51152 * If there is no need to put the percpu section at a predetermined
51153 * address, use PERCPU_SECTION.
51154 */
51155 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
51156 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
51157 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
51158 + per_cpu_load = .; \
51159 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
51160 - LOAD_OFFSET) { \
51161 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
51162 PERCPU_INPUT(cacheline) \
51163 } phdr \
51164 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
51165 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
51166
51167 /**
51168 * PERCPU_SECTION - define output section for percpu area, simple version
51169 diff -urNp linux-3.0.3/include/drm/drm_crtc_helper.h linux-3.0.3/include/drm/drm_crtc_helper.h
51170 --- linux-3.0.3/include/drm/drm_crtc_helper.h 2011-07-21 22:17:23.000000000 -0400
51171 +++ linux-3.0.3/include/drm/drm_crtc_helper.h 2011-08-23 21:47:56.000000000 -0400
51172 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
51173
51174 /* disable crtc when not in use - more explicit than dpms off */
51175 void (*disable)(struct drm_crtc *crtc);
51176 -};
51177 +} __no_const;
51178
51179 struct drm_encoder_helper_funcs {
51180 void (*dpms)(struct drm_encoder *encoder, int mode);
51181 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
51182 struct drm_connector *connector);
51183 /* disable encoder when not in use - more explicit than dpms off */
51184 void (*disable)(struct drm_encoder *encoder);
51185 -};
51186 +} __no_const;
51187
51188 struct drm_connector_helper_funcs {
51189 int (*get_modes)(struct drm_connector *connector);
51190 diff -urNp linux-3.0.3/include/drm/drmP.h linux-3.0.3/include/drm/drmP.h
51191 --- linux-3.0.3/include/drm/drmP.h 2011-07-21 22:17:23.000000000 -0400
51192 +++ linux-3.0.3/include/drm/drmP.h 2011-08-23 21:47:56.000000000 -0400
51193 @@ -73,6 +73,7 @@
51194 #include <linux/workqueue.h>
51195 #include <linux/poll.h>
51196 #include <asm/pgalloc.h>
51197 +#include <asm/local.h>
51198 #include "drm.h"
51199
51200 #include <linux/idr.h>
51201 @@ -1033,7 +1034,7 @@ struct drm_device {
51202
51203 /** \name Usage Counters */
51204 /*@{ */
51205 - int open_count; /**< Outstanding files open */
51206 + local_t open_count; /**< Outstanding files open */
51207 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
51208 atomic_t vma_count; /**< Outstanding vma areas open */
51209 int buf_use; /**< Buffers in use -- cannot alloc */
51210 @@ -1044,7 +1045,7 @@ struct drm_device {
51211 /*@{ */
51212 unsigned long counters;
51213 enum drm_stat_type types[15];
51214 - atomic_t counts[15];
51215 + atomic_unchecked_t counts[15];
51216 /*@} */
51217
51218 struct list_head filelist;
51219 diff -urNp linux-3.0.3/include/drm/ttm/ttm_memory.h linux-3.0.3/include/drm/ttm/ttm_memory.h
51220 --- linux-3.0.3/include/drm/ttm/ttm_memory.h 2011-07-21 22:17:23.000000000 -0400
51221 +++ linux-3.0.3/include/drm/ttm/ttm_memory.h 2011-08-23 21:47:56.000000000 -0400
51222 @@ -47,7 +47,7 @@
51223
51224 struct ttm_mem_shrink {
51225 int (*do_shrink) (struct ttm_mem_shrink *);
51226 -};
51227 +} __no_const;
51228
51229 /**
51230 * struct ttm_mem_global - Global memory accounting structure.
51231 diff -urNp linux-3.0.3/include/linux/a.out.h linux-3.0.3/include/linux/a.out.h
51232 --- linux-3.0.3/include/linux/a.out.h 2011-07-21 22:17:23.000000000 -0400
51233 +++ linux-3.0.3/include/linux/a.out.h 2011-08-23 21:47:56.000000000 -0400
51234 @@ -39,6 +39,14 @@ enum machine_type {
51235 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
51236 };
51237
51238 +/* Constants for the N_FLAGS field */
51239 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
51240 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
51241 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
51242 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
51243 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
51244 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
51245 +
51246 #if !defined (N_MAGIC)
51247 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
51248 #endif
51249 diff -urNp linux-3.0.3/include/linux/atmdev.h linux-3.0.3/include/linux/atmdev.h
51250 --- linux-3.0.3/include/linux/atmdev.h 2011-07-21 22:17:23.000000000 -0400
51251 +++ linux-3.0.3/include/linux/atmdev.h 2011-08-23 21:47:56.000000000 -0400
51252 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
51253 #endif
51254
51255 struct k_atm_aal_stats {
51256 -#define __HANDLE_ITEM(i) atomic_t i
51257 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
51258 __AAL_STAT_ITEMS
51259 #undef __HANDLE_ITEM
51260 };
51261 diff -urNp linux-3.0.3/include/linux/binfmts.h linux-3.0.3/include/linux/binfmts.h
51262 --- linux-3.0.3/include/linux/binfmts.h 2011-07-21 22:17:23.000000000 -0400
51263 +++ linux-3.0.3/include/linux/binfmts.h 2011-08-23 21:47:56.000000000 -0400
51264 @@ -88,6 +88,7 @@ struct linux_binfmt {
51265 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
51266 int (*load_shlib)(struct file *);
51267 int (*core_dump)(struct coredump_params *cprm);
51268 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
51269 unsigned long min_coredump; /* minimal dump size */
51270 };
51271
51272 diff -urNp linux-3.0.3/include/linux/blkdev.h linux-3.0.3/include/linux/blkdev.h
51273 --- linux-3.0.3/include/linux/blkdev.h 2011-07-21 22:17:23.000000000 -0400
51274 +++ linux-3.0.3/include/linux/blkdev.h 2011-08-23 21:47:56.000000000 -0400
51275 @@ -1307,7 +1307,7 @@ struct block_device_operations {
51276 int (*getgeo)(struct block_device *, struct hd_geometry *);
51277 /* this callback is with swap_lock and sometimes page table lock held */
51278 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
51279 - struct module *owner;
51280 + struct module * const owner;
51281 };
51282
51283 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
51284 diff -urNp linux-3.0.3/include/linux/blktrace_api.h linux-3.0.3/include/linux/blktrace_api.h
51285 --- linux-3.0.3/include/linux/blktrace_api.h 2011-07-21 22:17:23.000000000 -0400
51286 +++ linux-3.0.3/include/linux/blktrace_api.h 2011-08-23 21:47:56.000000000 -0400
51287 @@ -161,7 +161,7 @@ struct blk_trace {
51288 struct dentry *dir;
51289 struct dentry *dropped_file;
51290 struct dentry *msg_file;
51291 - atomic_t dropped;
51292 + atomic_unchecked_t dropped;
51293 };
51294
51295 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
51296 diff -urNp linux-3.0.3/include/linux/byteorder/little_endian.h linux-3.0.3/include/linux/byteorder/little_endian.h
51297 --- linux-3.0.3/include/linux/byteorder/little_endian.h 2011-07-21 22:17:23.000000000 -0400
51298 +++ linux-3.0.3/include/linux/byteorder/little_endian.h 2011-08-23 21:47:56.000000000 -0400
51299 @@ -42,51 +42,51 @@
51300
51301 static inline __le64 __cpu_to_le64p(const __u64 *p)
51302 {
51303 - return (__force __le64)*p;
51304 + return (__force const __le64)*p;
51305 }
51306 static inline __u64 __le64_to_cpup(const __le64 *p)
51307 {
51308 - return (__force __u64)*p;
51309 + return (__force const __u64)*p;
51310 }
51311 static inline __le32 __cpu_to_le32p(const __u32 *p)
51312 {
51313 - return (__force __le32)*p;
51314 + return (__force const __le32)*p;
51315 }
51316 static inline __u32 __le32_to_cpup(const __le32 *p)
51317 {
51318 - return (__force __u32)*p;
51319 + return (__force const __u32)*p;
51320 }
51321 static inline __le16 __cpu_to_le16p(const __u16 *p)
51322 {
51323 - return (__force __le16)*p;
51324 + return (__force const __le16)*p;
51325 }
51326 static inline __u16 __le16_to_cpup(const __le16 *p)
51327 {
51328 - return (__force __u16)*p;
51329 + return (__force const __u16)*p;
51330 }
51331 static inline __be64 __cpu_to_be64p(const __u64 *p)
51332 {
51333 - return (__force __be64)__swab64p(p);
51334 + return (__force const __be64)__swab64p(p);
51335 }
51336 static inline __u64 __be64_to_cpup(const __be64 *p)
51337 {
51338 - return __swab64p((__u64 *)p);
51339 + return __swab64p((const __u64 *)p);
51340 }
51341 static inline __be32 __cpu_to_be32p(const __u32 *p)
51342 {
51343 - return (__force __be32)__swab32p(p);
51344 + return (__force const __be32)__swab32p(p);
51345 }
51346 static inline __u32 __be32_to_cpup(const __be32 *p)
51347 {
51348 - return __swab32p((__u32 *)p);
51349 + return __swab32p((const __u32 *)p);
51350 }
51351 static inline __be16 __cpu_to_be16p(const __u16 *p)
51352 {
51353 - return (__force __be16)__swab16p(p);
51354 + return (__force const __be16)__swab16p(p);
51355 }
51356 static inline __u16 __be16_to_cpup(const __be16 *p)
51357 {
51358 - return __swab16p((__u16 *)p);
51359 + return __swab16p((const __u16 *)p);
51360 }
51361 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
51362 #define __le64_to_cpus(x) do { (void)(x); } while (0)
51363 diff -urNp linux-3.0.3/include/linux/cache.h linux-3.0.3/include/linux/cache.h
51364 --- linux-3.0.3/include/linux/cache.h 2011-07-21 22:17:23.000000000 -0400
51365 +++ linux-3.0.3/include/linux/cache.h 2011-08-23 21:47:56.000000000 -0400
51366 @@ -16,6 +16,10 @@
51367 #define __read_mostly
51368 #endif
51369
51370 +#ifndef __read_only
51371 +#define __read_only __read_mostly
51372 +#endif
51373 +
51374 #ifndef ____cacheline_aligned
51375 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
51376 #endif
51377 diff -urNp linux-3.0.3/include/linux/capability.h linux-3.0.3/include/linux/capability.h
51378 --- linux-3.0.3/include/linux/capability.h 2011-07-21 22:17:23.000000000 -0400
51379 +++ linux-3.0.3/include/linux/capability.h 2011-08-23 21:48:14.000000000 -0400
51380 @@ -547,6 +547,9 @@ extern bool capable(int cap);
51381 extern bool ns_capable(struct user_namespace *ns, int cap);
51382 extern bool task_ns_capable(struct task_struct *t, int cap);
51383 extern bool nsown_capable(int cap);
51384 +extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
51385 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
51386 +extern bool capable_nolog(int cap);
51387
51388 /* audit system wants to get cap info from files as well */
51389 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
51390 diff -urNp linux-3.0.3/include/linux/cleancache.h linux-3.0.3/include/linux/cleancache.h
51391 --- linux-3.0.3/include/linux/cleancache.h 2011-07-21 22:17:23.000000000 -0400
51392 +++ linux-3.0.3/include/linux/cleancache.h 2011-08-23 21:47:56.000000000 -0400
51393 @@ -31,7 +31,7 @@ struct cleancache_ops {
51394 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
51395 void (*flush_inode)(int, struct cleancache_filekey);
51396 void (*flush_fs)(int);
51397 -};
51398 +} __no_const;
51399
51400 extern struct cleancache_ops
51401 cleancache_register_ops(struct cleancache_ops *ops);
51402 diff -urNp linux-3.0.3/include/linux/compiler-gcc4.h linux-3.0.3/include/linux/compiler-gcc4.h
51403 --- linux-3.0.3/include/linux/compiler-gcc4.h 2011-07-21 22:17:23.000000000 -0400
51404 +++ linux-3.0.3/include/linux/compiler-gcc4.h 2011-08-23 21:47:56.000000000 -0400
51405 @@ -31,6 +31,9 @@
51406
51407
51408 #if __GNUC_MINOR__ >= 5
51409 +
51410 +#define __no_const __attribute__((no_const))
51411 +
51412 /*
51413 * Mark a position in code as unreachable. This can be used to
51414 * suppress control flow warnings after asm blocks that transfer
51415 @@ -46,6 +49,11 @@
51416 #define __noclone __attribute__((__noclone__))
51417
51418 #endif
51419 +
51420 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
51421 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
51422 +#define __bos0(ptr) __bos((ptr), 0)
51423 +#define __bos1(ptr) __bos((ptr), 1)
51424 #endif
51425
51426 #if __GNUC_MINOR__ > 0
51427 diff -urNp linux-3.0.3/include/linux/compiler.h linux-3.0.3/include/linux/compiler.h
51428 --- linux-3.0.3/include/linux/compiler.h 2011-07-21 22:17:23.000000000 -0400
51429 +++ linux-3.0.3/include/linux/compiler.h 2011-08-23 21:47:56.000000000 -0400
51430 @@ -264,6 +264,10 @@ void ftrace_likely_update(struct ftrace_
51431 # define __attribute_const__ /* unimplemented */
51432 #endif
51433
51434 +#ifndef __no_const
51435 +# define __no_const
51436 +#endif
51437 +
51438 /*
51439 * Tell gcc if a function is cold. The compiler will assume any path
51440 * directly leading to the call is unlikely.
51441 @@ -273,6 +277,22 @@ void ftrace_likely_update(struct ftrace_
51442 #define __cold
51443 #endif
51444
51445 +#ifndef __alloc_size
51446 +#define __alloc_size(...)
51447 +#endif
51448 +
51449 +#ifndef __bos
51450 +#define __bos(ptr, arg)
51451 +#endif
51452 +
51453 +#ifndef __bos0
51454 +#define __bos0(ptr)
51455 +#endif
51456 +
51457 +#ifndef __bos1
51458 +#define __bos1(ptr)
51459 +#endif
51460 +
51461 /* Simple shorthand for a section definition */
51462 #ifndef __section
51463 # define __section(S) __attribute__ ((__section__(#S)))
51464 @@ -306,6 +326,7 @@ void ftrace_likely_update(struct ftrace_
51465 * use is to mediate communication between process-level code and irq/NMI
51466 * handlers, all running on the same CPU.
51467 */
51468 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
51469 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
51470 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
51471
51472 #endif /* __LINUX_COMPILER_H */
51473 diff -urNp linux-3.0.3/include/linux/cpuset.h linux-3.0.3/include/linux/cpuset.h
51474 --- linux-3.0.3/include/linux/cpuset.h 2011-07-21 22:17:23.000000000 -0400
51475 +++ linux-3.0.3/include/linux/cpuset.h 2011-08-23 21:47:56.000000000 -0400
51476 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
51477 * nodemask.
51478 */
51479 smp_mb();
51480 - --ACCESS_ONCE(current->mems_allowed_change_disable);
51481 + --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
51482 }
51483
51484 static inline void set_mems_allowed(nodemask_t nodemask)
51485 diff -urNp linux-3.0.3/include/linux/crypto.h linux-3.0.3/include/linux/crypto.h
51486 --- linux-3.0.3/include/linux/crypto.h 2011-07-21 22:17:23.000000000 -0400
51487 +++ linux-3.0.3/include/linux/crypto.h 2011-08-23 21:47:56.000000000 -0400
51488 @@ -361,7 +361,7 @@ struct cipher_tfm {
51489 const u8 *key, unsigned int keylen);
51490 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
51491 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
51492 -};
51493 +} __no_const;
51494
51495 struct hash_tfm {
51496 int (*init)(struct hash_desc *desc);
51497 @@ -382,13 +382,13 @@ struct compress_tfm {
51498 int (*cot_decompress)(struct crypto_tfm *tfm,
51499 const u8 *src, unsigned int slen,
51500 u8 *dst, unsigned int *dlen);
51501 -};
51502 +} __no_const;
51503
51504 struct rng_tfm {
51505 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
51506 unsigned int dlen);
51507 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
51508 -};
51509 +} __no_const;
51510
51511 #define crt_ablkcipher crt_u.ablkcipher
51512 #define crt_aead crt_u.aead
51513 diff -urNp linux-3.0.3/include/linux/decompress/mm.h linux-3.0.3/include/linux/decompress/mm.h
51514 --- linux-3.0.3/include/linux/decompress/mm.h 2011-07-21 22:17:23.000000000 -0400
51515 +++ linux-3.0.3/include/linux/decompress/mm.h 2011-08-23 21:47:56.000000000 -0400
51516 @@ -77,7 +77,7 @@ static void free(void *where)
51517 * warnings when not needed (indeed large_malloc / large_free are not
51518 * needed by inflate */
51519
51520 -#define malloc(a) kmalloc(a, GFP_KERNEL)
51521 +#define malloc(a) kmalloc((a), GFP_KERNEL)
51522 #define free(a) kfree(a)
51523
51524 #define large_malloc(a) vmalloc(a)
51525 diff -urNp linux-3.0.3/include/linux/dma-mapping.h linux-3.0.3/include/linux/dma-mapping.h
51526 --- linux-3.0.3/include/linux/dma-mapping.h 2011-07-21 22:17:23.000000000 -0400
51527 +++ linux-3.0.3/include/linux/dma-mapping.h 2011-08-23 21:47:56.000000000 -0400
51528 @@ -49,7 +49,7 @@ struct dma_map_ops {
51529 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
51530 int (*dma_supported)(struct device *dev, u64 mask);
51531 int (*set_dma_mask)(struct device *dev, u64 mask);
51532 - int is_phys;
51533 + const int is_phys;
51534 };
51535
51536 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
51537 diff -urNp linux-3.0.3/include/linux/efi.h linux-3.0.3/include/linux/efi.h
51538 --- linux-3.0.3/include/linux/efi.h 2011-07-21 22:17:23.000000000 -0400
51539 +++ linux-3.0.3/include/linux/efi.h 2011-08-23 21:47:56.000000000 -0400
51540 @@ -410,7 +410,7 @@ struct efivar_operations {
51541 efi_get_variable_t *get_variable;
51542 efi_get_next_variable_t *get_next_variable;
51543 efi_set_variable_t *set_variable;
51544 -};
51545 +} __no_const;
51546
51547 struct efivars {
51548 /*
51549 diff -urNp linux-3.0.3/include/linux/elf.h linux-3.0.3/include/linux/elf.h
51550 --- linux-3.0.3/include/linux/elf.h 2011-07-21 22:17:23.000000000 -0400
51551 +++ linux-3.0.3/include/linux/elf.h 2011-08-23 21:47:56.000000000 -0400
51552 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
51553 #define PT_GNU_EH_FRAME 0x6474e550
51554
51555 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
51556 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
51557 +
51558 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
51559 +
51560 +/* Constants for the e_flags field */
51561 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
51562 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
51563 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
51564 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
51565 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
51566 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
51567
51568 /*
51569 * Extended Numbering
51570 @@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
51571 #define DT_DEBUG 21
51572 #define DT_TEXTREL 22
51573 #define DT_JMPREL 23
51574 +#define DT_FLAGS 30
51575 + #define DF_TEXTREL 0x00000004
51576 #define DT_ENCODING 32
51577 #define OLD_DT_LOOS 0x60000000
51578 #define DT_LOOS 0x6000000d
51579 @@ -252,6 +265,19 @@ typedef struct elf64_hdr {
51580 #define PF_W 0x2
51581 #define PF_X 0x1
51582
51583 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
51584 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
51585 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
51586 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
51587 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
51588 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
51589 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
51590 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
51591 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
51592 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
51593 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
51594 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
51595 +
51596 typedef struct elf32_phdr{
51597 Elf32_Word p_type;
51598 Elf32_Off p_offset;
51599 @@ -344,6 +370,8 @@ typedef struct elf64_shdr {
51600 #define EI_OSABI 7
51601 #define EI_PAD 8
51602
51603 +#define EI_PAX 14
51604 +
51605 #define ELFMAG0 0x7f /* EI_MAG */
51606 #define ELFMAG1 'E'
51607 #define ELFMAG2 'L'
51608 @@ -422,6 +450,7 @@ extern Elf32_Dyn _DYNAMIC [];
51609 #define elf_note elf32_note
51610 #define elf_addr_t Elf32_Off
51611 #define Elf_Half Elf32_Half
51612 +#define elf_dyn Elf32_Dyn
51613
51614 #else
51615
51616 @@ -432,6 +461,7 @@ extern Elf64_Dyn _DYNAMIC [];
51617 #define elf_note elf64_note
51618 #define elf_addr_t Elf64_Off
51619 #define Elf_Half Elf64_Half
51620 +#define elf_dyn Elf64_Dyn
51621
51622 #endif
51623
51624 diff -urNp linux-3.0.3/include/linux/firewire.h linux-3.0.3/include/linux/firewire.h
51625 --- linux-3.0.3/include/linux/firewire.h 2011-07-21 22:17:23.000000000 -0400
51626 +++ linux-3.0.3/include/linux/firewire.h 2011-08-23 21:47:56.000000000 -0400
51627 @@ -428,7 +428,7 @@ struct fw_iso_context {
51628 union {
51629 fw_iso_callback_t sc;
51630 fw_iso_mc_callback_t mc;
51631 - } callback;
51632 + } __no_const callback;
51633 void *callback_data;
51634 };
51635
51636 diff -urNp linux-3.0.3/include/linux/fscache-cache.h linux-3.0.3/include/linux/fscache-cache.h
51637 --- linux-3.0.3/include/linux/fscache-cache.h 2011-07-21 22:17:23.000000000 -0400
51638 +++ linux-3.0.3/include/linux/fscache-cache.h 2011-08-23 21:47:56.000000000 -0400
51639 @@ -102,7 +102,7 @@ struct fscache_operation {
51640 fscache_operation_release_t release;
51641 };
51642
51643 -extern atomic_t fscache_op_debug_id;
51644 +extern atomic_unchecked_t fscache_op_debug_id;
51645 extern void fscache_op_work_func(struct work_struct *work);
51646
51647 extern void fscache_enqueue_operation(struct fscache_operation *);
51648 @@ -122,7 +122,7 @@ static inline void fscache_operation_ini
51649 {
51650 INIT_WORK(&op->work, fscache_op_work_func);
51651 atomic_set(&op->usage, 1);
51652 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
51653 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
51654 op->processor = processor;
51655 op->release = release;
51656 INIT_LIST_HEAD(&op->pend_link);
51657 diff -urNp linux-3.0.3/include/linux/fs.h linux-3.0.3/include/linux/fs.h
51658 --- linux-3.0.3/include/linux/fs.h 2011-07-21 22:17:23.000000000 -0400
51659 +++ linux-3.0.3/include/linux/fs.h 2011-08-23 21:48:14.000000000 -0400
51660 @@ -109,6 +109,11 @@ struct inodes_stat_t {
51661 /* File was opened by fanotify and shouldn't generate fanotify events */
51662 #define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
51663
51664 +/* Hack for grsec so as not to require read permission simply to execute
51665 + * a binary
51666 + */
51667 +#define FMODE_GREXEC ((__force fmode_t)0x2000000)
51668 +
51669 /*
51670 * The below are the various read and write types that we support. Some of
51671 * them include behavioral modifiers that send information down to the
51672 @@ -1544,7 +1549,7 @@ struct block_device_operations;
51673 * the big kernel lock held in all filesystems.
51674 */
51675 struct file_operations {
51676 - struct module *owner;
51677 + struct module * const owner;
51678 loff_t (*llseek) (struct file *, loff_t, int);
51679 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
51680 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
51681 @@ -1572,6 +1577,7 @@ struct file_operations {
51682 long (*fallocate)(struct file *file, int mode, loff_t offset,
51683 loff_t len);
51684 };
51685 +typedef struct file_operations __no_const file_operations_no_const;
51686
51687 #define IPERM_FLAG_RCU 0x0001
51688
51689 diff -urNp linux-3.0.3/include/linux/fs_struct.h linux-3.0.3/include/linux/fs_struct.h
51690 --- linux-3.0.3/include/linux/fs_struct.h 2011-07-21 22:17:23.000000000 -0400
51691 +++ linux-3.0.3/include/linux/fs_struct.h 2011-08-23 21:47:56.000000000 -0400
51692 @@ -6,7 +6,7 @@
51693 #include <linux/seqlock.h>
51694
51695 struct fs_struct {
51696 - int users;
51697 + atomic_t users;
51698 spinlock_t lock;
51699 seqcount_t seq;
51700 int umask;
51701 diff -urNp linux-3.0.3/include/linux/ftrace_event.h linux-3.0.3/include/linux/ftrace_event.h
51702 --- linux-3.0.3/include/linux/ftrace_event.h 2011-07-21 22:17:23.000000000 -0400
51703 +++ linux-3.0.3/include/linux/ftrace_event.h 2011-08-23 21:47:56.000000000 -0400
51704 @@ -96,7 +96,7 @@ struct trace_event_functions {
51705 trace_print_func raw;
51706 trace_print_func hex;
51707 trace_print_func binary;
51708 -};
51709 +} __no_const;
51710
51711 struct trace_event {
51712 struct hlist_node node;
51713 @@ -247,7 +247,7 @@ extern int trace_define_field(struct ftr
51714 extern int trace_add_event_call(struct ftrace_event_call *call);
51715 extern void trace_remove_event_call(struct ftrace_event_call *call);
51716
51717 -#define is_signed_type(type) (((type)(-1)) < 0)
51718 +#define is_signed_type(type) (((type)(-1)) < (type)1)
51719
51720 int trace_set_clr_event(const char *system, const char *event, int set);
51721
51722 diff -urNp linux-3.0.3/include/linux/genhd.h linux-3.0.3/include/linux/genhd.h
51723 --- linux-3.0.3/include/linux/genhd.h 2011-07-21 22:17:23.000000000 -0400
51724 +++ linux-3.0.3/include/linux/genhd.h 2011-08-23 21:47:56.000000000 -0400
51725 @@ -184,7 +184,7 @@ struct gendisk {
51726 struct kobject *slave_dir;
51727
51728 struct timer_rand_state *random;
51729 - atomic_t sync_io; /* RAID */
51730 + atomic_unchecked_t sync_io; /* RAID */
51731 struct disk_events *ev;
51732 #ifdef CONFIG_BLK_DEV_INTEGRITY
51733 struct blk_integrity *integrity;
51734 diff -urNp linux-3.0.3/include/linux/gracl.h linux-3.0.3/include/linux/gracl.h
51735 --- linux-3.0.3/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
51736 +++ linux-3.0.3/include/linux/gracl.h 2011-08-23 21:48:14.000000000 -0400
51737 @@ -0,0 +1,317 @@
51738 +#ifndef GR_ACL_H
51739 +#define GR_ACL_H
51740 +
51741 +#include <linux/grdefs.h>
51742 +#include <linux/resource.h>
51743 +#include <linux/capability.h>
51744 +#include <linux/dcache.h>
51745 +#include <asm/resource.h>
51746 +
51747 +/* Major status information */
51748 +
51749 +#define GR_VERSION "grsecurity 2.2.2"
51750 +#define GRSECURITY_VERSION 0x2202
51751 +
51752 +enum {
51753 + GR_SHUTDOWN = 0,
51754 + GR_ENABLE = 1,
51755 + GR_SPROLE = 2,
51756 + GR_RELOAD = 3,
51757 + GR_SEGVMOD = 4,
51758 + GR_STATUS = 5,
51759 + GR_UNSPROLE = 6,
51760 + GR_PASSSET = 7,
51761 + GR_SPROLEPAM = 8,
51762 +};
51763 +
51764 +/* Password setup definitions
51765 + * kernel/grhash.c */
51766 +enum {
51767 + GR_PW_LEN = 128,
51768 + GR_SALT_LEN = 16,
51769 + GR_SHA_LEN = 32,
51770 +};
51771 +
51772 +enum {
51773 + GR_SPROLE_LEN = 64,
51774 +};
51775 +
51776 +enum {
51777 + GR_NO_GLOB = 0,
51778 + GR_REG_GLOB,
51779 + GR_CREATE_GLOB
51780 +};
51781 +
51782 +#define GR_NLIMITS 32
51783 +
51784 +/* Begin Data Structures */
51785 +
51786 +struct sprole_pw {
51787 + unsigned char *rolename;
51788 + unsigned char salt[GR_SALT_LEN];
51789 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
51790 +};
51791 +
51792 +struct name_entry {
51793 + __u32 key;
51794 + ino_t inode;
51795 + dev_t device;
51796 + char *name;
51797 + __u16 len;
51798 + __u8 deleted;
51799 + struct name_entry *prev;
51800 + struct name_entry *next;
51801 +};
51802 +
51803 +struct inodev_entry {
51804 + struct name_entry *nentry;
51805 + struct inodev_entry *prev;
51806 + struct inodev_entry *next;
51807 +};
51808 +
51809 +struct acl_role_db {
51810 + struct acl_role_label **r_hash;
51811 + __u32 r_size;
51812 +};
51813 +
51814 +struct inodev_db {
51815 + struct inodev_entry **i_hash;
51816 + __u32 i_size;
51817 +};
51818 +
51819 +struct name_db {
51820 + struct name_entry **n_hash;
51821 + __u32 n_size;
51822 +};
51823 +
51824 +struct crash_uid {
51825 + uid_t uid;
51826 + unsigned long expires;
51827 +};
51828 +
51829 +struct gr_hash_struct {
51830 + void **table;
51831 + void **nametable;
51832 + void *first;
51833 + __u32 table_size;
51834 + __u32 used_size;
51835 + int type;
51836 +};
51837 +
51838 +/* Userspace Grsecurity ACL data structures */
51839 +
51840 +struct acl_subject_label {
51841 + char *filename;
51842 + ino_t inode;
51843 + dev_t device;
51844 + __u32 mode;
51845 + kernel_cap_t cap_mask;
51846 + kernel_cap_t cap_lower;
51847 + kernel_cap_t cap_invert_audit;
51848 +
51849 + struct rlimit res[GR_NLIMITS];
51850 + __u32 resmask;
51851 +
51852 + __u8 user_trans_type;
51853 + __u8 group_trans_type;
51854 + uid_t *user_transitions;
51855 + gid_t *group_transitions;
51856 + __u16 user_trans_num;
51857 + __u16 group_trans_num;
51858 +
51859 + __u32 sock_families[2];
51860 + __u32 ip_proto[8];
51861 + __u32 ip_type;
51862 + struct acl_ip_label **ips;
51863 + __u32 ip_num;
51864 + __u32 inaddr_any_override;
51865 +
51866 + __u32 crashes;
51867 + unsigned long expires;
51868 +
51869 + struct acl_subject_label *parent_subject;
51870 + struct gr_hash_struct *hash;
51871 + struct acl_subject_label *prev;
51872 + struct acl_subject_label *next;
51873 +
51874 + struct acl_object_label **obj_hash;
51875 + __u32 obj_hash_size;
51876 + __u16 pax_flags;
51877 +};
51878 +
51879 +struct role_allowed_ip {
51880 + __u32 addr;
51881 + __u32 netmask;
51882 +
51883 + struct role_allowed_ip *prev;
51884 + struct role_allowed_ip *next;
51885 +};
51886 +
51887 +struct role_transition {
51888 + char *rolename;
51889 +
51890 + struct role_transition *prev;
51891 + struct role_transition *next;
51892 +};
51893 +
51894 +struct acl_role_label {
51895 + char *rolename;
51896 + uid_t uidgid;
51897 + __u16 roletype;
51898 +
51899 + __u16 auth_attempts;
51900 + unsigned long expires;
51901 +
51902 + struct acl_subject_label *root_label;
51903 + struct gr_hash_struct *hash;
51904 +
51905 + struct acl_role_label *prev;
51906 + struct acl_role_label *next;
51907 +
51908 + struct role_transition *transitions;
51909 + struct role_allowed_ip *allowed_ips;
51910 + uid_t *domain_children;
51911 + __u16 domain_child_num;
51912 +
51913 + struct acl_subject_label **subj_hash;
51914 + __u32 subj_hash_size;
51915 +};
51916 +
51917 +struct user_acl_role_db {
51918 + struct acl_role_label **r_table;
51919 + __u32 num_pointers; /* Number of allocations to track */
51920 + __u32 num_roles; /* Number of roles */
51921 + __u32 num_domain_children; /* Number of domain children */
51922 + __u32 num_subjects; /* Number of subjects */
51923 + __u32 num_objects; /* Number of objects */
51924 +};
51925 +
51926 +struct acl_object_label {
51927 + char *filename;
51928 + ino_t inode;
51929 + dev_t device;
51930 + __u32 mode;
51931 +
51932 + struct acl_subject_label *nested;
51933 + struct acl_object_label *globbed;
51934 +
51935 + /* next two structures not used */
51936 +
51937 + struct acl_object_label *prev;
51938 + struct acl_object_label *next;
51939 +};
51940 +
51941 +struct acl_ip_label {
51942 + char *iface;
51943 + __u32 addr;
51944 + __u32 netmask;
51945 + __u16 low, high;
51946 + __u8 mode;
51947 + __u32 type;
51948 + __u32 proto[8];
51949 +
51950 + /* next two structures not used */
51951 +
51952 + struct acl_ip_label *prev;
51953 + struct acl_ip_label *next;
51954 +};
51955 +
51956 +struct gr_arg {
51957 + struct user_acl_role_db role_db;
51958 + unsigned char pw[GR_PW_LEN];
51959 + unsigned char salt[GR_SALT_LEN];
51960 + unsigned char sum[GR_SHA_LEN];
51961 + unsigned char sp_role[GR_SPROLE_LEN];
51962 + struct sprole_pw *sprole_pws;
51963 + dev_t segv_device;
51964 + ino_t segv_inode;
51965 + uid_t segv_uid;
51966 + __u16 num_sprole_pws;
51967 + __u16 mode;
51968 +};
51969 +
51970 +struct gr_arg_wrapper {
51971 + struct gr_arg *arg;
51972 + __u32 version;
51973 + __u32 size;
51974 +};
51975 +
51976 +struct subject_map {
51977 + struct acl_subject_label *user;
51978 + struct acl_subject_label *kernel;
51979 + struct subject_map *prev;
51980 + struct subject_map *next;
51981 +};
51982 +
51983 +struct acl_subj_map_db {
51984 + struct subject_map **s_hash;
51985 + __u32 s_size;
51986 +};
51987 +
51988 +/* End Data Structures Section */
51989 +
51990 +/* Hash functions generated by empirical testing by Brad Spengler
51991 + Makes good use of the low bits of the inode. Generally 0-1 times
51992 + in loop for successful match. 0-3 for unsuccessful match.
51993 + Shift/add algorithm with modulus of table size and an XOR*/
51994 +
51995 +static __inline__ unsigned int
51996 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
51997 +{
51998 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
51999 +}
52000 +
52001 + static __inline__ unsigned int
52002 +shash(const struct acl_subject_label *userp, const unsigned int sz)
52003 +{
52004 + return ((const unsigned long)userp % sz);
52005 +}
52006 +
52007 +static __inline__ unsigned int
52008 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
52009 +{
52010 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
52011 +}
52012 +
52013 +static __inline__ unsigned int
52014 +nhash(const char *name, const __u16 len, const unsigned int sz)
52015 +{
52016 + return full_name_hash((const unsigned char *)name, len) % sz;
52017 +}
52018 +
52019 +#define FOR_EACH_ROLE_START(role) \
52020 + role = role_list; \
52021 + while (role) {
52022 +
52023 +#define FOR_EACH_ROLE_END(role) \
52024 + role = role->prev; \
52025 + }
52026 +
52027 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
52028 + subj = NULL; \
52029 + iter = 0; \
52030 + while (iter < role->subj_hash_size) { \
52031 + if (subj == NULL) \
52032 + subj = role->subj_hash[iter]; \
52033 + if (subj == NULL) { \
52034 + iter++; \
52035 + continue; \
52036 + }
52037 +
52038 +#define FOR_EACH_SUBJECT_END(subj,iter) \
52039 + subj = subj->next; \
52040 + if (subj == NULL) \
52041 + iter++; \
52042 + }
52043 +
52044 +
52045 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
52046 + subj = role->hash->first; \
52047 + while (subj != NULL) {
52048 +
52049 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
52050 + subj = subj->next; \
52051 + }
52052 +
52053 +#endif
52054 +
52055 diff -urNp linux-3.0.3/include/linux/gralloc.h linux-3.0.3/include/linux/gralloc.h
52056 --- linux-3.0.3/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
52057 +++ linux-3.0.3/include/linux/gralloc.h 2011-08-23 21:48:14.000000000 -0400
52058 @@ -0,0 +1,9 @@
52059 +#ifndef __GRALLOC_H
52060 +#define __GRALLOC_H
52061 +
52062 +void acl_free_all(void);
52063 +int acl_alloc_stack_init(unsigned long size);
52064 +void *acl_alloc(unsigned long len);
52065 +void *acl_alloc_num(unsigned long num, unsigned long len);
52066 +
52067 +#endif
52068 diff -urNp linux-3.0.3/include/linux/grdefs.h linux-3.0.3/include/linux/grdefs.h
52069 --- linux-3.0.3/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
52070 +++ linux-3.0.3/include/linux/grdefs.h 2011-08-23 21:48:14.000000000 -0400
52071 @@ -0,0 +1,140 @@
52072 +#ifndef GRDEFS_H
52073 +#define GRDEFS_H
52074 +
52075 +/* Begin grsecurity status declarations */
52076 +
52077 +enum {
52078 + GR_READY = 0x01,
52079 + GR_STATUS_INIT = 0x00 // disabled state
52080 +};
52081 +
52082 +/* Begin ACL declarations */
52083 +
52084 +/* Role flags */
52085 +
52086 +enum {
52087 + GR_ROLE_USER = 0x0001,
52088 + GR_ROLE_GROUP = 0x0002,
52089 + GR_ROLE_DEFAULT = 0x0004,
52090 + GR_ROLE_SPECIAL = 0x0008,
52091 + GR_ROLE_AUTH = 0x0010,
52092 + GR_ROLE_NOPW = 0x0020,
52093 + GR_ROLE_GOD = 0x0040,
52094 + GR_ROLE_LEARN = 0x0080,
52095 + GR_ROLE_TPE = 0x0100,
52096 + GR_ROLE_DOMAIN = 0x0200,
52097 + GR_ROLE_PAM = 0x0400,
52098 + GR_ROLE_PERSIST = 0x0800
52099 +};
52100 +
52101 +/* ACL Subject and Object mode flags */
52102 +enum {
52103 + GR_DELETED = 0x80000000
52104 +};
52105 +
52106 +/* ACL Object-only mode flags */
52107 +enum {
52108 + GR_READ = 0x00000001,
52109 + GR_APPEND = 0x00000002,
52110 + GR_WRITE = 0x00000004,
52111 + GR_EXEC = 0x00000008,
52112 + GR_FIND = 0x00000010,
52113 + GR_INHERIT = 0x00000020,
52114 + GR_SETID = 0x00000040,
52115 + GR_CREATE = 0x00000080,
52116 + GR_DELETE = 0x00000100,
52117 + GR_LINK = 0x00000200,
52118 + GR_AUDIT_READ = 0x00000400,
52119 + GR_AUDIT_APPEND = 0x00000800,
52120 + GR_AUDIT_WRITE = 0x00001000,
52121 + GR_AUDIT_EXEC = 0x00002000,
52122 + GR_AUDIT_FIND = 0x00004000,
52123 + GR_AUDIT_INHERIT= 0x00008000,
52124 + GR_AUDIT_SETID = 0x00010000,
52125 + GR_AUDIT_CREATE = 0x00020000,
52126 + GR_AUDIT_DELETE = 0x00040000,
52127 + GR_AUDIT_LINK = 0x00080000,
52128 + GR_PTRACERD = 0x00100000,
52129 + GR_NOPTRACE = 0x00200000,
52130 + GR_SUPPRESS = 0x00400000,
52131 + GR_NOLEARN = 0x00800000,
52132 + GR_INIT_TRANSFER= 0x01000000
52133 +};
52134 +
52135 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
52136 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
52137 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
52138 +
52139 +/* ACL subject-only mode flags */
52140 +enum {
52141 + GR_KILL = 0x00000001,
52142 + GR_VIEW = 0x00000002,
52143 + GR_PROTECTED = 0x00000004,
52144 + GR_LEARN = 0x00000008,
52145 + GR_OVERRIDE = 0x00000010,
52146 + /* just a placeholder, this mode is only used in userspace */
52147 + GR_DUMMY = 0x00000020,
52148 + GR_PROTSHM = 0x00000040,
52149 + GR_KILLPROC = 0x00000080,
52150 + GR_KILLIPPROC = 0x00000100,
52151 + /* just a placeholder, this mode is only used in userspace */
52152 + GR_NOTROJAN = 0x00000200,
52153 + GR_PROTPROCFD = 0x00000400,
52154 + GR_PROCACCT = 0x00000800,
52155 + GR_RELAXPTRACE = 0x00001000,
52156 + GR_NESTED = 0x00002000,
52157 + GR_INHERITLEARN = 0x00004000,
52158 + GR_PROCFIND = 0x00008000,
52159 + GR_POVERRIDE = 0x00010000,
52160 + GR_KERNELAUTH = 0x00020000,
52161 + GR_ATSECURE = 0x00040000,
52162 + GR_SHMEXEC = 0x00080000
52163 +};
52164 +
52165 +enum {
52166 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
52167 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
52168 + GR_PAX_ENABLE_MPROTECT = 0x0004,
52169 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
52170 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
52171 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
52172 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
52173 + GR_PAX_DISABLE_MPROTECT = 0x0400,
52174 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
52175 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
52176 +};
52177 +
52178 +enum {
52179 + GR_ID_USER = 0x01,
52180 + GR_ID_GROUP = 0x02,
52181 +};
52182 +
52183 +enum {
52184 + GR_ID_ALLOW = 0x01,
52185 + GR_ID_DENY = 0x02,
52186 +};
52187 +
52188 +#define GR_CRASH_RES 31
52189 +#define GR_UIDTABLE_MAX 500
52190 +
52191 +/* begin resource learning section */
52192 +enum {
52193 + GR_RLIM_CPU_BUMP = 60,
52194 + GR_RLIM_FSIZE_BUMP = 50000,
52195 + GR_RLIM_DATA_BUMP = 10000,
52196 + GR_RLIM_STACK_BUMP = 1000,
52197 + GR_RLIM_CORE_BUMP = 10000,
52198 + GR_RLIM_RSS_BUMP = 500000,
52199 + GR_RLIM_NPROC_BUMP = 1,
52200 + GR_RLIM_NOFILE_BUMP = 5,
52201 + GR_RLIM_MEMLOCK_BUMP = 50000,
52202 + GR_RLIM_AS_BUMP = 500000,
52203 + GR_RLIM_LOCKS_BUMP = 2,
52204 + GR_RLIM_SIGPENDING_BUMP = 5,
52205 + GR_RLIM_MSGQUEUE_BUMP = 10000,
52206 + GR_RLIM_NICE_BUMP = 1,
52207 + GR_RLIM_RTPRIO_BUMP = 1,
52208 + GR_RLIM_RTTIME_BUMP = 1000000
52209 +};
52210 +
52211 +#endif
52212 diff -urNp linux-3.0.3/include/linux/grinternal.h linux-3.0.3/include/linux/grinternal.h
52213 --- linux-3.0.3/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
52214 +++ linux-3.0.3/include/linux/grinternal.h 2011-08-23 21:48:14.000000000 -0400
52215 @@ -0,0 +1,219 @@
52216 +#ifndef __GRINTERNAL_H
52217 +#define __GRINTERNAL_H
52218 +
52219 +#ifdef CONFIG_GRKERNSEC
52220 +
52221 +#include <linux/fs.h>
52222 +#include <linux/mnt_namespace.h>
52223 +#include <linux/nsproxy.h>
52224 +#include <linux/gracl.h>
52225 +#include <linux/grdefs.h>
52226 +#include <linux/grmsg.h>
52227 +
52228 +void gr_add_learn_entry(const char *fmt, ...)
52229 + __attribute__ ((format (printf, 1, 2)));
52230 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
52231 + const struct vfsmount *mnt);
52232 +__u32 gr_check_create(const struct dentry *new_dentry,
52233 + const struct dentry *parent,
52234 + const struct vfsmount *mnt, const __u32 mode);
52235 +int gr_check_protected_task(const struct task_struct *task);
52236 +__u32 to_gr_audit(const __u32 reqmode);
52237 +int gr_set_acls(const int type);
52238 +int gr_apply_subject_to_task(struct task_struct *task);
52239 +int gr_acl_is_enabled(void);
52240 +char gr_roletype_to_char(void);
52241 +
52242 +void gr_handle_alertkill(struct task_struct *task);
52243 +char *gr_to_filename(const struct dentry *dentry,
52244 + const struct vfsmount *mnt);
52245 +char *gr_to_filename1(const struct dentry *dentry,
52246 + const struct vfsmount *mnt);
52247 +char *gr_to_filename2(const struct dentry *dentry,
52248 + const struct vfsmount *mnt);
52249 +char *gr_to_filename3(const struct dentry *dentry,
52250 + const struct vfsmount *mnt);
52251 +
52252 +extern int grsec_enable_harden_ptrace;
52253 +extern int grsec_enable_link;
52254 +extern int grsec_enable_fifo;
52255 +extern int grsec_enable_execve;
52256 +extern int grsec_enable_shm;
52257 +extern int grsec_enable_execlog;
52258 +extern int grsec_enable_signal;
52259 +extern int grsec_enable_audit_ptrace;
52260 +extern int grsec_enable_forkfail;
52261 +extern int grsec_enable_time;
52262 +extern int grsec_enable_rofs;
52263 +extern int grsec_enable_chroot_shmat;
52264 +extern int grsec_enable_chroot_mount;
52265 +extern int grsec_enable_chroot_double;
52266 +extern int grsec_enable_chroot_pivot;
52267 +extern int grsec_enable_chroot_chdir;
52268 +extern int grsec_enable_chroot_chmod;
52269 +extern int grsec_enable_chroot_mknod;
52270 +extern int grsec_enable_chroot_fchdir;
52271 +extern int grsec_enable_chroot_nice;
52272 +extern int grsec_enable_chroot_execlog;
52273 +extern int grsec_enable_chroot_caps;
52274 +extern int grsec_enable_chroot_sysctl;
52275 +extern int grsec_enable_chroot_unix;
52276 +extern int grsec_enable_tpe;
52277 +extern int grsec_tpe_gid;
52278 +extern int grsec_enable_tpe_all;
52279 +extern int grsec_enable_tpe_invert;
52280 +extern int grsec_enable_socket_all;
52281 +extern int grsec_socket_all_gid;
52282 +extern int grsec_enable_socket_client;
52283 +extern int grsec_socket_client_gid;
52284 +extern int grsec_enable_socket_server;
52285 +extern int grsec_socket_server_gid;
52286 +extern int grsec_audit_gid;
52287 +extern int grsec_enable_group;
52288 +extern int grsec_enable_audit_textrel;
52289 +extern int grsec_enable_log_rwxmaps;
52290 +extern int grsec_enable_mount;
52291 +extern int grsec_enable_chdir;
52292 +extern int grsec_resource_logging;
52293 +extern int grsec_enable_blackhole;
52294 +extern int grsec_lastack_retries;
52295 +extern int grsec_enable_brute;
52296 +extern int grsec_lock;
52297 +
52298 +extern spinlock_t grsec_alert_lock;
52299 +extern unsigned long grsec_alert_wtime;
52300 +extern unsigned long grsec_alert_fyet;
52301 +
52302 +extern spinlock_t grsec_audit_lock;
52303 +
52304 +extern rwlock_t grsec_exec_file_lock;
52305 +
52306 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
52307 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
52308 + (tsk)->exec_file->f_vfsmnt) : "/")
52309 +
52310 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
52311 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
52312 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
52313 +
52314 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
52315 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
52316 + (tsk)->exec_file->f_vfsmnt) : "/")
52317 +
52318 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
52319 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
52320 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
52321 +
52322 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
52323 +
52324 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
52325 +
52326 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
52327 + (task)->pid, (cred)->uid, \
52328 + (cred)->euid, (cred)->gid, (cred)->egid, \
52329 + gr_parent_task_fullpath(task), \
52330 + (task)->real_parent->comm, (task)->real_parent->pid, \
52331 + (pcred)->uid, (pcred)->euid, \
52332 + (pcred)->gid, (pcred)->egid
52333 +
52334 +#define GR_CHROOT_CAPS {{ \
52335 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
52336 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
52337 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
52338 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
52339 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
52340 + CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
52341 +
52342 +#define security_learn(normal_msg,args...) \
52343 +({ \
52344 + read_lock(&grsec_exec_file_lock); \
52345 + gr_add_learn_entry(normal_msg "\n", ## args); \
52346 + read_unlock(&grsec_exec_file_lock); \
52347 +})
52348 +
52349 +enum {
52350 + GR_DO_AUDIT,
52351 + GR_DONT_AUDIT,
52352 + /* used for non-audit messages that we shouldn't kill the task on */
52353 + GR_DONT_AUDIT_GOOD
52354 +};
52355 +
52356 +enum {
52357 + GR_TTYSNIFF,
52358 + GR_RBAC,
52359 + GR_RBAC_STR,
52360 + GR_STR_RBAC,
52361 + GR_RBAC_MODE2,
52362 + GR_RBAC_MODE3,
52363 + GR_FILENAME,
52364 + GR_SYSCTL_HIDDEN,
52365 + GR_NOARGS,
52366 + GR_ONE_INT,
52367 + GR_ONE_INT_TWO_STR,
52368 + GR_ONE_STR,
52369 + GR_STR_INT,
52370 + GR_TWO_STR_INT,
52371 + GR_TWO_INT,
52372 + GR_TWO_U64,
52373 + GR_THREE_INT,
52374 + GR_FIVE_INT_TWO_STR,
52375 + GR_TWO_STR,
52376 + GR_THREE_STR,
52377 + GR_FOUR_STR,
52378 + GR_STR_FILENAME,
52379 + GR_FILENAME_STR,
52380 + GR_FILENAME_TWO_INT,
52381 + GR_FILENAME_TWO_INT_STR,
52382 + GR_TEXTREL,
52383 + GR_PTRACE,
52384 + GR_RESOURCE,
52385 + GR_CAP,
52386 + GR_SIG,
52387 + GR_SIG2,
52388 + GR_CRASH1,
52389 + GR_CRASH2,
52390 + GR_PSACCT,
52391 + GR_RWXMAP
52392 +};
52393 +
52394 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
52395 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
52396 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
52397 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
52398 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
52399 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
52400 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
52401 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
52402 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
52403 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
52404 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
52405 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
52406 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
52407 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
52408 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
52409 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
52410 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
52411 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
52412 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
52413 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
52414 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
52415 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
52416 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
52417 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
52418 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
52419 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
52420 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
52421 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
52422 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
52423 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
52424 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
52425 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
52426 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
52427 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
52428 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
52429 +
52430 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
52431 +
52432 +#endif
52433 +
52434 +#endif
52435 diff -urNp linux-3.0.3/include/linux/grmsg.h linux-3.0.3/include/linux/grmsg.h
52436 --- linux-3.0.3/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
52437 +++ linux-3.0.3/include/linux/grmsg.h 2011-08-23 21:48:14.000000000 -0400
52438 @@ -0,0 +1,108 @@
52439 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
52440 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
52441 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
52442 +#define GR_STOPMOD_MSG "denied modification of module state by "
52443 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
52444 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
52445 +#define GR_IOPERM_MSG "denied use of ioperm() by "
52446 +#define GR_IOPL_MSG "denied use of iopl() by "
52447 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
52448 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
52449 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
52450 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
52451 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
52452 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
52453 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
52454 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
52455 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
52456 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
52457 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
52458 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
52459 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
52460 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
52461 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
52462 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
52463 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
52464 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
52465 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
52466 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
52467 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
52468 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
52469 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
52470 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
52471 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
52472 +#define GR_NPROC_MSG "denied overstep of process limit by "
52473 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
52474 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
52475 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
52476 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
52477 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
52478 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
52479 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
52480 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
52481 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
52482 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
52483 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
52484 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
52485 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
52486 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
52487 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
52488 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
52489 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
52490 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
52491 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
52492 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
52493 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
52494 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
52495 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
52496 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
52497 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
52498 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
52499 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
52500 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
52501 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
52502 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
52503 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
52504 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
52505 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
52506 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
52507 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
52508 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
52509 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
52510 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
52511 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
52512 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
52513 +#define GR_NICE_CHROOT_MSG "denied priority change by "
52514 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
52515 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
52516 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
52517 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
52518 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
52519 +#define GR_TIME_MSG "time set by "
52520 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
52521 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
52522 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
52523 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
52524 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
52525 +#define GR_BIND_MSG "denied bind() by "
52526 +#define GR_CONNECT_MSG "denied connect() by "
52527 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
52528 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
52529 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
52530 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
52531 +#define GR_CAP_ACL_MSG "use of %s denied for "
52532 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
52533 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
52534 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
52535 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
52536 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
52537 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
52538 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
52539 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
52540 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
52541 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
52542 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
52543 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
52544 +#define GR_VM86_MSG "denied use of vm86 by "
52545 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
52546 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
52547 diff -urNp linux-3.0.3/include/linux/grsecurity.h linux-3.0.3/include/linux/grsecurity.h
52548 --- linux-3.0.3/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
52549 +++ linux-3.0.3/include/linux/grsecurity.h 2011-08-23 21:48:14.000000000 -0400
52550 @@ -0,0 +1,228 @@
52551 +#ifndef GR_SECURITY_H
52552 +#define GR_SECURITY_H
52553 +#include <linux/fs.h>
52554 +#include <linux/fs_struct.h>
52555 +#include <linux/binfmts.h>
52556 +#include <linux/gracl.h>
52557 +
52558 +/* notify of brain-dead configs */
52559 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52560 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
52561 +#endif
52562 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
52563 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
52564 +#endif
52565 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
52566 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
52567 +#endif
52568 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
52569 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
52570 +#endif
52571 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
52572 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
52573 +#endif
52574 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
52575 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
52576 +#endif
52577 +
52578 +#include <linux/compat.h>
52579 +
52580 +struct user_arg_ptr {
52581 +#ifdef CONFIG_COMPAT
52582 + bool is_compat;
52583 +#endif
52584 + union {
52585 + const char __user *const __user *native;
52586 +#ifdef CONFIG_COMPAT
52587 + compat_uptr_t __user *compat;
52588 +#endif
52589 + } ptr;
52590 +};
52591 +
52592 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
52593 +void gr_handle_brute_check(void);
52594 +void gr_handle_kernel_exploit(void);
52595 +int gr_process_user_ban(void);
52596 +
52597 +char gr_roletype_to_char(void);
52598 +
52599 +int gr_acl_enable_at_secure(void);
52600 +
52601 +int gr_check_user_change(int real, int effective, int fs);
52602 +int gr_check_group_change(int real, int effective, int fs);
52603 +
52604 +void gr_del_task_from_ip_table(struct task_struct *p);
52605 +
52606 +int gr_pid_is_chrooted(struct task_struct *p);
52607 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
52608 +int gr_handle_chroot_nice(void);
52609 +int gr_handle_chroot_sysctl(const int op);
52610 +int gr_handle_chroot_setpriority(struct task_struct *p,
52611 + const int niceval);
52612 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
52613 +int gr_handle_chroot_chroot(const struct dentry *dentry,
52614 + const struct vfsmount *mnt);
52615 +int gr_handle_chroot_caps(struct path *path);
52616 +void gr_handle_chroot_chdir(struct path *path);
52617 +int gr_handle_chroot_chmod(const struct dentry *dentry,
52618 + const struct vfsmount *mnt, const int mode);
52619 +int gr_handle_chroot_mknod(const struct dentry *dentry,
52620 + const struct vfsmount *mnt, const int mode);
52621 +int gr_handle_chroot_mount(const struct dentry *dentry,
52622 + const struct vfsmount *mnt,
52623 + const char *dev_name);
52624 +int gr_handle_chroot_pivot(void);
52625 +int gr_handle_chroot_unix(const pid_t pid);
52626 +
52627 +int gr_handle_rawio(const struct inode *inode);
52628 +int gr_handle_nproc(void);
52629 +
52630 +void gr_handle_ioperm(void);
52631 +void gr_handle_iopl(void);
52632 +
52633 +int gr_tpe_allow(const struct file *file);
52634 +
52635 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
52636 +void gr_clear_chroot_entries(struct task_struct *task);
52637 +
52638 +void gr_log_forkfail(const int retval);
52639 +void gr_log_timechange(void);
52640 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
52641 +void gr_log_chdir(const struct dentry *dentry,
52642 + const struct vfsmount *mnt);
52643 +void gr_log_chroot_exec(const struct dentry *dentry,
52644 + const struct vfsmount *mnt);
52645 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
52646 +void gr_log_remount(const char *devname, const int retval);
52647 +void gr_log_unmount(const char *devname, const int retval);
52648 +void gr_log_mount(const char *from, const char *to, const int retval);
52649 +void gr_log_textrel(struct vm_area_struct *vma);
52650 +void gr_log_rwxmmap(struct file *file);
52651 +void gr_log_rwxmprotect(struct file *file);
52652 +
52653 +int gr_handle_follow_link(const struct inode *parent,
52654 + const struct inode *inode,
52655 + const struct dentry *dentry,
52656 + const struct vfsmount *mnt);
52657 +int gr_handle_fifo(const struct dentry *dentry,
52658 + const struct vfsmount *mnt,
52659 + const struct dentry *dir, const int flag,
52660 + const int acc_mode);
52661 +int gr_handle_hardlink(const struct dentry *dentry,
52662 + const struct vfsmount *mnt,
52663 + struct inode *inode,
52664 + const int mode, const char *to);
52665 +
52666 +int gr_is_capable(const int cap);
52667 +int gr_is_capable_nolog(const int cap);
52668 +void gr_learn_resource(const struct task_struct *task, const int limit,
52669 + const unsigned long wanted, const int gt);
52670 +void gr_copy_label(struct task_struct *tsk);
52671 +void gr_handle_crash(struct task_struct *task, const int sig);
52672 +int gr_handle_signal(const struct task_struct *p, const int sig);
52673 +int gr_check_crash_uid(const uid_t uid);
52674 +int gr_check_protected_task(const struct task_struct *task);
52675 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
52676 +int gr_acl_handle_mmap(const struct file *file,
52677 + const unsigned long prot);
52678 +int gr_acl_handle_mprotect(const struct file *file,
52679 + const unsigned long prot);
52680 +int gr_check_hidden_task(const struct task_struct *tsk);
52681 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
52682 + const struct vfsmount *mnt);
52683 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
52684 + const struct vfsmount *mnt);
52685 +__u32 gr_acl_handle_access(const struct dentry *dentry,
52686 + const struct vfsmount *mnt, const int fmode);
52687 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
52688 + const struct vfsmount *mnt, mode_t mode);
52689 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
52690 + const struct vfsmount *mnt, mode_t mode);
52691 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
52692 + const struct vfsmount *mnt);
52693 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
52694 + const struct vfsmount *mnt);
52695 +int gr_handle_ptrace(struct task_struct *task, const long request);
52696 +int gr_handle_proc_ptrace(struct task_struct *task);
52697 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
52698 + const struct vfsmount *mnt);
52699 +int gr_check_crash_exec(const struct file *filp);
52700 +int gr_acl_is_enabled(void);
52701 +void gr_set_kernel_label(struct task_struct *task);
52702 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
52703 + const gid_t gid);
52704 +int gr_set_proc_label(const struct dentry *dentry,
52705 + const struct vfsmount *mnt,
52706 + const int unsafe_share);
52707 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
52708 + const struct vfsmount *mnt);
52709 +__u32 gr_acl_handle_open(const struct dentry *dentry,
52710 + const struct vfsmount *mnt, const int fmode);
52711 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
52712 + const struct dentry *p_dentry,
52713 + const struct vfsmount *p_mnt, const int fmode,
52714 + const int imode);
52715 +void gr_handle_create(const struct dentry *dentry,
52716 + const struct vfsmount *mnt);
52717 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
52718 + const struct dentry *parent_dentry,
52719 + const struct vfsmount *parent_mnt,
52720 + const int mode);
52721 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
52722 + const struct dentry *parent_dentry,
52723 + const struct vfsmount *parent_mnt);
52724 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
52725 + const struct vfsmount *mnt);
52726 +void gr_handle_delete(const ino_t ino, const dev_t dev);
52727 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
52728 + const struct vfsmount *mnt);
52729 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
52730 + const struct dentry *parent_dentry,
52731 + const struct vfsmount *parent_mnt,
52732 + const char *from);
52733 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
52734 + const struct dentry *parent_dentry,
52735 + const struct vfsmount *parent_mnt,
52736 + const struct dentry *old_dentry,
52737 + const struct vfsmount *old_mnt, const char *to);
52738 +int gr_acl_handle_rename(struct dentry *new_dentry,
52739 + struct dentry *parent_dentry,
52740 + const struct vfsmount *parent_mnt,
52741 + struct dentry *old_dentry,
52742 + struct inode *old_parent_inode,
52743 + struct vfsmount *old_mnt, const char *newname);
52744 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
52745 + struct dentry *old_dentry,
52746 + struct dentry *new_dentry,
52747 + struct vfsmount *mnt, const __u8 replace);
52748 +__u32 gr_check_link(const struct dentry *new_dentry,
52749 + const struct dentry *parent_dentry,
52750 + const struct vfsmount *parent_mnt,
52751 + const struct dentry *old_dentry,
52752 + const struct vfsmount *old_mnt);
52753 +int gr_acl_handle_filldir(const struct file *file, const char *name,
52754 + const unsigned int namelen, const ino_t ino);
52755 +
52756 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
52757 + const struct vfsmount *mnt);
52758 +void gr_acl_handle_exit(void);
52759 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
52760 +int gr_acl_handle_procpidmem(const struct task_struct *task);
52761 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
52762 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
52763 +void gr_audit_ptrace(struct task_struct *task);
52764 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
52765 +
52766 +#ifdef CONFIG_GRKERNSEC
52767 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
52768 +void gr_handle_vm86(void);
52769 +void gr_handle_mem_readwrite(u64 from, u64 to);
52770 +
52771 +extern int grsec_enable_dmesg;
52772 +extern int grsec_disable_privio;
52773 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52774 +extern int grsec_enable_chroot_findtask;
52775 +#endif
52776 +#endif
52777 +
52778 +#endif
52779 diff -urNp linux-3.0.3/include/linux/grsock.h linux-3.0.3/include/linux/grsock.h
52780 --- linux-3.0.3/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
52781 +++ linux-3.0.3/include/linux/grsock.h 2011-08-23 21:48:14.000000000 -0400
52782 @@ -0,0 +1,19 @@
52783 +#ifndef __GRSOCK_H
52784 +#define __GRSOCK_H
52785 +
52786 +extern void gr_attach_curr_ip(const struct sock *sk);
52787 +extern int gr_handle_sock_all(const int family, const int type,
52788 + const int protocol);
52789 +extern int gr_handle_sock_server(const struct sockaddr *sck);
52790 +extern int gr_handle_sock_server_other(const struct sock *sck);
52791 +extern int gr_handle_sock_client(const struct sockaddr *sck);
52792 +extern int gr_search_connect(struct socket * sock,
52793 + struct sockaddr_in * addr);
52794 +extern int gr_search_bind(struct socket * sock,
52795 + struct sockaddr_in * addr);
52796 +extern int gr_search_listen(struct socket * sock);
52797 +extern int gr_search_accept(struct socket * sock);
52798 +extern int gr_search_socket(const int domain, const int type,
52799 + const int protocol);
52800 +
52801 +#endif
52802 diff -urNp linux-3.0.3/include/linux/hid.h linux-3.0.3/include/linux/hid.h
52803 --- linux-3.0.3/include/linux/hid.h 2011-07-21 22:17:23.000000000 -0400
52804 +++ linux-3.0.3/include/linux/hid.h 2011-08-23 21:47:56.000000000 -0400
52805 @@ -675,7 +675,7 @@ struct hid_ll_driver {
52806 unsigned int code, int value);
52807
52808 int (*parse)(struct hid_device *hdev);
52809 -};
52810 +} __no_const;
52811
52812 #define PM_HINT_FULLON 1<<5
52813 #define PM_HINT_NORMAL 1<<1
52814 diff -urNp linux-3.0.3/include/linux/highmem.h linux-3.0.3/include/linux/highmem.h
52815 --- linux-3.0.3/include/linux/highmem.h 2011-07-21 22:17:23.000000000 -0400
52816 +++ linux-3.0.3/include/linux/highmem.h 2011-08-23 21:47:56.000000000 -0400
52817 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct
52818 kunmap_atomic(kaddr, KM_USER0);
52819 }
52820
52821 +static inline void sanitize_highpage(struct page *page)
52822 +{
52823 + void *kaddr;
52824 + unsigned long flags;
52825 +
52826 + local_irq_save(flags);
52827 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
52828 + clear_page(kaddr);
52829 + kunmap_atomic(kaddr, KM_CLEARPAGE);
52830 + local_irq_restore(flags);
52831 +}
52832 +
52833 static inline void zero_user_segments(struct page *page,
52834 unsigned start1, unsigned end1,
52835 unsigned start2, unsigned end2)
52836 diff -urNp linux-3.0.3/include/linux/i2c.h linux-3.0.3/include/linux/i2c.h
52837 --- linux-3.0.3/include/linux/i2c.h 2011-07-21 22:17:23.000000000 -0400
52838 +++ linux-3.0.3/include/linux/i2c.h 2011-08-23 21:47:56.000000000 -0400
52839 @@ -346,6 +346,7 @@ struct i2c_algorithm {
52840 /* To determine what the adapter supports */
52841 u32 (*functionality) (struct i2c_adapter *);
52842 };
52843 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
52844
52845 /*
52846 * i2c_adapter is the structure used to identify a physical i2c bus along
52847 diff -urNp linux-3.0.3/include/linux/i2o.h linux-3.0.3/include/linux/i2o.h
52848 --- linux-3.0.3/include/linux/i2o.h 2011-07-21 22:17:23.000000000 -0400
52849 +++ linux-3.0.3/include/linux/i2o.h 2011-08-23 21:47:56.000000000 -0400
52850 @@ -564,7 +564,7 @@ struct i2o_controller {
52851 struct i2o_device *exec; /* Executive */
52852 #if BITS_PER_LONG == 64
52853 spinlock_t context_list_lock; /* lock for context_list */
52854 - atomic_t context_list_counter; /* needed for unique contexts */
52855 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
52856 struct list_head context_list; /* list of context id's
52857 and pointers */
52858 #endif
52859 diff -urNp linux-3.0.3/include/linux/init.h linux-3.0.3/include/linux/init.h
52860 --- linux-3.0.3/include/linux/init.h 2011-07-21 22:17:23.000000000 -0400
52861 +++ linux-3.0.3/include/linux/init.h 2011-08-23 21:47:56.000000000 -0400
52862 @@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
52863
52864 /* Each module must use one module_init(). */
52865 #define module_init(initfn) \
52866 - static inline initcall_t __inittest(void) \
52867 + static inline __used initcall_t __inittest(void) \
52868 { return initfn; } \
52869 int init_module(void) __attribute__((alias(#initfn)));
52870
52871 /* This is only required if you want to be unloadable. */
52872 #define module_exit(exitfn) \
52873 - static inline exitcall_t __exittest(void) \
52874 + static inline __used exitcall_t __exittest(void) \
52875 { return exitfn; } \
52876 void cleanup_module(void) __attribute__((alias(#exitfn)));
52877
52878 diff -urNp linux-3.0.3/include/linux/init_task.h linux-3.0.3/include/linux/init_task.h
52879 --- linux-3.0.3/include/linux/init_task.h 2011-07-21 22:17:23.000000000 -0400
52880 +++ linux-3.0.3/include/linux/init_task.h 2011-08-23 21:47:56.000000000 -0400
52881 @@ -126,6 +126,12 @@ extern struct cred init_cred;
52882 # define INIT_PERF_EVENTS(tsk)
52883 #endif
52884
52885 +#ifdef CONFIG_X86
52886 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
52887 +#else
52888 +#define INIT_TASK_THREAD_INFO
52889 +#endif
52890 +
52891 /*
52892 * INIT_TASK is used to set up the first task table, touch at
52893 * your own risk!. Base=0, limit=0x1fffff (=2MB)
52894 @@ -164,6 +170,7 @@ extern struct cred init_cred;
52895 RCU_INIT_POINTER(.cred, &init_cred), \
52896 .comm = "swapper", \
52897 .thread = INIT_THREAD, \
52898 + INIT_TASK_THREAD_INFO \
52899 .fs = &init_fs, \
52900 .files = &init_files, \
52901 .signal = &init_signals, \
52902 diff -urNp linux-3.0.3/include/linux/intel-iommu.h linux-3.0.3/include/linux/intel-iommu.h
52903 --- linux-3.0.3/include/linux/intel-iommu.h 2011-07-21 22:17:23.000000000 -0400
52904 +++ linux-3.0.3/include/linux/intel-iommu.h 2011-08-23 21:47:56.000000000 -0400
52905 @@ -296,7 +296,7 @@ struct iommu_flush {
52906 u8 fm, u64 type);
52907 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
52908 unsigned int size_order, u64 type);
52909 -};
52910 +} __no_const;
52911
52912 enum {
52913 SR_DMAR_FECTL_REG,
52914 diff -urNp linux-3.0.3/include/linux/interrupt.h linux-3.0.3/include/linux/interrupt.h
52915 --- linux-3.0.3/include/linux/interrupt.h 2011-07-21 22:17:23.000000000 -0400
52916 +++ linux-3.0.3/include/linux/interrupt.h 2011-08-23 21:47:56.000000000 -0400
52917 @@ -422,7 +422,7 @@ enum
52918 /* map softirq index to softirq name. update 'softirq_to_name' in
52919 * kernel/softirq.c when adding a new softirq.
52920 */
52921 -extern char *softirq_to_name[NR_SOFTIRQS];
52922 +extern const char * const softirq_to_name[NR_SOFTIRQS];
52923
52924 /* softirq mask and active fields moved to irq_cpustat_t in
52925 * asm/hardirq.h to get better cache usage. KAO
52926 @@ -430,12 +430,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
52927
52928 struct softirq_action
52929 {
52930 - void (*action)(struct softirq_action *);
52931 + void (*action)(void);
52932 };
52933
52934 asmlinkage void do_softirq(void);
52935 asmlinkage void __do_softirq(void);
52936 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
52937 +extern void open_softirq(int nr, void (*action)(void));
52938 extern void softirq_init(void);
52939 static inline void __raise_softirq_irqoff(unsigned int nr)
52940 {
52941 diff -urNp linux-3.0.3/include/linux/kallsyms.h linux-3.0.3/include/linux/kallsyms.h
52942 --- linux-3.0.3/include/linux/kallsyms.h 2011-07-21 22:17:23.000000000 -0400
52943 +++ linux-3.0.3/include/linux/kallsyms.h 2011-08-23 21:48:14.000000000 -0400
52944 @@ -15,7 +15,8 @@
52945
52946 struct module;
52947
52948 -#ifdef CONFIG_KALLSYMS
52949 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
52950 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52951 /* Lookup the address for a symbol. Returns 0 if not found. */
52952 unsigned long kallsyms_lookup_name(const char *name);
52953
52954 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
52955 /* Stupid that this does nothing, but I didn't create this mess. */
52956 #define __print_symbol(fmt, addr)
52957 #endif /*CONFIG_KALLSYMS*/
52958 +#else /* when included by kallsyms.c, vsnprintf.c, or
52959 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
52960 +extern void __print_symbol(const char *fmt, unsigned long address);
52961 +extern int sprint_backtrace(char *buffer, unsigned long address);
52962 +extern int sprint_symbol(char *buffer, unsigned long address);
52963 +const char *kallsyms_lookup(unsigned long addr,
52964 + unsigned long *symbolsize,
52965 + unsigned long *offset,
52966 + char **modname, char *namebuf);
52967 +#endif
52968
52969 /* This macro allows us to keep printk typechecking */
52970 static void __check_printsym_format(const char *fmt, ...)
52971 diff -urNp linux-3.0.3/include/linux/kgdb.h linux-3.0.3/include/linux/kgdb.h
52972 --- linux-3.0.3/include/linux/kgdb.h 2011-07-21 22:17:23.000000000 -0400
52973 +++ linux-3.0.3/include/linux/kgdb.h 2011-08-23 21:47:56.000000000 -0400
52974 @@ -53,7 +53,7 @@ extern int kgdb_connected;
52975 extern int kgdb_io_module_registered;
52976
52977 extern atomic_t kgdb_setting_breakpoint;
52978 -extern atomic_t kgdb_cpu_doing_single_step;
52979 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
52980
52981 extern struct task_struct *kgdb_usethread;
52982 extern struct task_struct *kgdb_contthread;
52983 @@ -241,8 +241,8 @@ extern void kgdb_arch_late(void);
52984 * hardware debug registers.
52985 */
52986 struct kgdb_arch {
52987 - unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
52988 - unsigned long flags;
52989 + const unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
52990 + const unsigned long flags;
52991
52992 int (*set_breakpoint)(unsigned long, char *);
52993 int (*remove_breakpoint)(unsigned long, char *);
52994 @@ -268,14 +268,14 @@ struct kgdb_arch {
52995 * not a console
52996 */
52997 struct kgdb_io {
52998 - const char *name;
52999 + const char * const name;
53000 int (*read_char) (void);
53001 void (*write_char) (u8);
53002 void (*flush) (void);
53003 int (*init) (void);
53004 void (*pre_exception) (void);
53005 void (*post_exception) (void);
53006 - int is_console;
53007 + const int is_console;
53008 };
53009
53010 extern struct kgdb_arch arch_kgdb_ops;
53011 diff -urNp linux-3.0.3/include/linux/kmod.h linux-3.0.3/include/linux/kmod.h
53012 --- linux-3.0.3/include/linux/kmod.h 2011-07-21 22:17:23.000000000 -0400
53013 +++ linux-3.0.3/include/linux/kmod.h 2011-08-23 21:48:14.000000000 -0400
53014 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysc
53015 * usually useless though. */
53016 extern int __request_module(bool wait, const char *name, ...) \
53017 __attribute__((format(printf, 2, 3)));
53018 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
53019 + __attribute__((format(printf, 3, 4)));
53020 #define request_module(mod...) __request_module(true, mod)
53021 #define request_module_nowait(mod...) __request_module(false, mod)
53022 #define try_then_request_module(x, mod...) \
53023 diff -urNp linux-3.0.3/include/linux/kvm_host.h linux-3.0.3/include/linux/kvm_host.h
53024 --- linux-3.0.3/include/linux/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
53025 +++ linux-3.0.3/include/linux/kvm_host.h 2011-08-23 21:47:56.000000000 -0400
53026 @@ -307,7 +307,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
53027 void vcpu_load(struct kvm_vcpu *vcpu);
53028 void vcpu_put(struct kvm_vcpu *vcpu);
53029
53030 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
53031 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
53032 struct module *module);
53033 void kvm_exit(void);
53034
53035 @@ -446,7 +446,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
53036 struct kvm_guest_debug *dbg);
53037 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
53038
53039 -int kvm_arch_init(void *opaque);
53040 +int kvm_arch_init(const void *opaque);
53041 void kvm_arch_exit(void);
53042
53043 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
53044 diff -urNp linux-3.0.3/include/linux/libata.h linux-3.0.3/include/linux/libata.h
53045 --- linux-3.0.3/include/linux/libata.h 2011-07-21 22:17:23.000000000 -0400
53046 +++ linux-3.0.3/include/linux/libata.h 2011-08-23 21:47:56.000000000 -0400
53047 @@ -898,7 +898,7 @@ struct ata_port_operations {
53048 * ->inherits must be the last field and all the preceding
53049 * fields must be pointers.
53050 */
53051 - const struct ata_port_operations *inherits;
53052 + const struct ata_port_operations * const inherits;
53053 };
53054
53055 struct ata_port_info {
53056 diff -urNp linux-3.0.3/include/linux/mca.h linux-3.0.3/include/linux/mca.h
53057 --- linux-3.0.3/include/linux/mca.h 2011-07-21 22:17:23.000000000 -0400
53058 +++ linux-3.0.3/include/linux/mca.h 2011-08-23 21:47:56.000000000 -0400
53059 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
53060 int region);
53061 void * (*mca_transform_memory)(struct mca_device *,
53062 void *memory);
53063 -};
53064 +} __no_const;
53065
53066 struct mca_bus {
53067 u64 default_dma_mask;
53068 diff -urNp linux-3.0.3/include/linux/memory.h linux-3.0.3/include/linux/memory.h
53069 --- linux-3.0.3/include/linux/memory.h 2011-07-21 22:17:23.000000000 -0400
53070 +++ linux-3.0.3/include/linux/memory.h 2011-08-23 21:47:56.000000000 -0400
53071 @@ -144,7 +144,7 @@ struct memory_accessor {
53072 size_t count);
53073 ssize_t (*write)(struct memory_accessor *, const char *buf,
53074 off_t offset, size_t count);
53075 -};
53076 +} __no_const;
53077
53078 /*
53079 * Kernel text modification mutex, used for code patching. Users of this lock
53080 diff -urNp linux-3.0.3/include/linux/mfd/abx500.h linux-3.0.3/include/linux/mfd/abx500.h
53081 --- linux-3.0.3/include/linux/mfd/abx500.h 2011-07-21 22:17:23.000000000 -0400
53082 +++ linux-3.0.3/include/linux/mfd/abx500.h 2011-08-23 21:47:56.000000000 -0400
53083 @@ -234,6 +234,7 @@ struct abx500_ops {
53084 int (*event_registers_startup_state_get) (struct device *, u8 *);
53085 int (*startup_irq_enabled) (struct device *, unsigned int);
53086 };
53087 +typedef struct abx500_ops __no_const abx500_ops_no_const;
53088
53089 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
53090 void abx500_remove_ops(struct device *dev);
53091 diff -urNp linux-3.0.3/include/linux/mm.h linux-3.0.3/include/linux/mm.h
53092 --- linux-3.0.3/include/linux/mm.h 2011-08-23 21:44:40.000000000 -0400
53093 +++ linux-3.0.3/include/linux/mm.h 2011-08-23 21:47:56.000000000 -0400
53094 @@ -113,7 +113,14 @@ extern unsigned int kobjsize(const void
53095
53096 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
53097 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
53098 +
53099 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
53100 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
53101 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
53102 +#else
53103 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
53104 +#endif
53105 +
53106 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
53107 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
53108
53109 @@ -1009,34 +1016,6 @@ int set_page_dirty(struct page *page);
53110 int set_page_dirty_lock(struct page *page);
53111 int clear_page_dirty_for_io(struct page *page);
53112
53113 -/* Is the vma a continuation of the stack vma above it? */
53114 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
53115 -{
53116 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
53117 -}
53118 -
53119 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
53120 - unsigned long addr)
53121 -{
53122 - return (vma->vm_flags & VM_GROWSDOWN) &&
53123 - (vma->vm_start == addr) &&
53124 - !vma_growsdown(vma->vm_prev, addr);
53125 -}
53126 -
53127 -/* Is the vma a continuation of the stack vma below it? */
53128 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
53129 -{
53130 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
53131 -}
53132 -
53133 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
53134 - unsigned long addr)
53135 -{
53136 - return (vma->vm_flags & VM_GROWSUP) &&
53137 - (vma->vm_end == addr) &&
53138 - !vma_growsup(vma->vm_next, addr);
53139 -}
53140 -
53141 extern unsigned long move_page_tables(struct vm_area_struct *vma,
53142 unsigned long old_addr, struct vm_area_struct *new_vma,
53143 unsigned long new_addr, unsigned long len);
53144 @@ -1169,6 +1148,15 @@ struct shrinker {
53145 extern void register_shrinker(struct shrinker *);
53146 extern void unregister_shrinker(struct shrinker *);
53147
53148 +#ifdef CONFIG_MMU
53149 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
53150 +#else
53151 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
53152 +{
53153 + return __pgprot(0);
53154 +}
53155 +#endif
53156 +
53157 int vma_wants_writenotify(struct vm_area_struct *vma);
53158
53159 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
53160 @@ -1452,6 +1440,7 @@ out:
53161 }
53162
53163 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
53164 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
53165
53166 extern unsigned long do_brk(unsigned long, unsigned long);
53167
53168 @@ -1510,6 +1499,10 @@ extern struct vm_area_struct * find_vma(
53169 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
53170 struct vm_area_struct **pprev);
53171
53172 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
53173 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
53174 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
53175 +
53176 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
53177 NULL if none. Assume start_addr < end_addr. */
53178 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
53179 @@ -1526,15 +1519,6 @@ static inline unsigned long vma_pages(st
53180 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
53181 }
53182
53183 -#ifdef CONFIG_MMU
53184 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
53185 -#else
53186 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
53187 -{
53188 - return __pgprot(0);
53189 -}
53190 -#endif
53191 -
53192 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
53193 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
53194 unsigned long pfn, unsigned long size, pgprot_t);
53195 @@ -1647,7 +1631,7 @@ extern int unpoison_memory(unsigned long
53196 extern int sysctl_memory_failure_early_kill;
53197 extern int sysctl_memory_failure_recovery;
53198 extern void shake_page(struct page *p, int access);
53199 -extern atomic_long_t mce_bad_pages;
53200 +extern atomic_long_unchecked_t mce_bad_pages;
53201 extern int soft_offline_page(struct page *page, int flags);
53202
53203 extern void dump_page(struct page *page);
53204 @@ -1661,5 +1645,11 @@ extern void copy_user_huge_page(struct p
53205 unsigned int pages_per_huge_page);
53206 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
53207
53208 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53209 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
53210 +#else
53211 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
53212 +#endif
53213 +
53214 #endif /* __KERNEL__ */
53215 #endif /* _LINUX_MM_H */
53216 diff -urNp linux-3.0.3/include/linux/mm_types.h linux-3.0.3/include/linux/mm_types.h
53217 --- linux-3.0.3/include/linux/mm_types.h 2011-07-21 22:17:23.000000000 -0400
53218 +++ linux-3.0.3/include/linux/mm_types.h 2011-08-23 21:47:56.000000000 -0400
53219 @@ -184,6 +184,8 @@ struct vm_area_struct {
53220 #ifdef CONFIG_NUMA
53221 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
53222 #endif
53223 +
53224 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
53225 };
53226
53227 struct core_thread {
53228 @@ -316,6 +318,24 @@ struct mm_struct {
53229 #ifdef CONFIG_CPUMASK_OFFSTACK
53230 struct cpumask cpumask_allocation;
53231 #endif
53232 +
53233 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53234 + unsigned long pax_flags;
53235 +#endif
53236 +
53237 +#ifdef CONFIG_PAX_DLRESOLVE
53238 + unsigned long call_dl_resolve;
53239 +#endif
53240 +
53241 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
53242 + unsigned long call_syscall;
53243 +#endif
53244 +
53245 +#ifdef CONFIG_PAX_ASLR
53246 + unsigned long delta_mmap; /* randomized offset */
53247 + unsigned long delta_stack; /* randomized offset */
53248 +#endif
53249 +
53250 };
53251
53252 static inline void mm_init_cpumask(struct mm_struct *mm)
53253 diff -urNp linux-3.0.3/include/linux/mmu_notifier.h linux-3.0.3/include/linux/mmu_notifier.h
53254 --- linux-3.0.3/include/linux/mmu_notifier.h 2011-07-21 22:17:23.000000000 -0400
53255 +++ linux-3.0.3/include/linux/mmu_notifier.h 2011-08-23 21:47:56.000000000 -0400
53256 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
53257 */
53258 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
53259 ({ \
53260 - pte_t __pte; \
53261 + pte_t ___pte; \
53262 struct vm_area_struct *___vma = __vma; \
53263 unsigned long ___address = __address; \
53264 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
53265 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
53266 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
53267 - __pte; \
53268 + ___pte; \
53269 })
53270
53271 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
53272 diff -urNp linux-3.0.3/include/linux/mmzone.h linux-3.0.3/include/linux/mmzone.h
53273 --- linux-3.0.3/include/linux/mmzone.h 2011-07-21 22:17:23.000000000 -0400
53274 +++ linux-3.0.3/include/linux/mmzone.h 2011-08-23 21:47:56.000000000 -0400
53275 @@ -350,7 +350,7 @@ struct zone {
53276 unsigned long flags; /* zone flags, see below */
53277
53278 /* Zone statistics */
53279 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
53280 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
53281
53282 /*
53283 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
53284 diff -urNp linux-3.0.3/include/linux/mod_devicetable.h linux-3.0.3/include/linux/mod_devicetable.h
53285 --- linux-3.0.3/include/linux/mod_devicetable.h 2011-07-21 22:17:23.000000000 -0400
53286 +++ linux-3.0.3/include/linux/mod_devicetable.h 2011-08-23 21:47:56.000000000 -0400
53287 @@ -12,7 +12,7 @@
53288 typedef unsigned long kernel_ulong_t;
53289 #endif
53290
53291 -#define PCI_ANY_ID (~0)
53292 +#define PCI_ANY_ID ((__u16)~0)
53293
53294 struct pci_device_id {
53295 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
53296 @@ -131,7 +131,7 @@ struct usb_device_id {
53297 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
53298 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
53299
53300 -#define HID_ANY_ID (~0)
53301 +#define HID_ANY_ID (~0U)
53302
53303 struct hid_device_id {
53304 __u16 bus;
53305 diff -urNp linux-3.0.3/include/linux/module.h linux-3.0.3/include/linux/module.h
53306 --- linux-3.0.3/include/linux/module.h 2011-07-21 22:17:23.000000000 -0400
53307 +++ linux-3.0.3/include/linux/module.h 2011-08-23 21:47:56.000000000 -0400
53308 @@ -16,6 +16,7 @@
53309 #include <linux/kobject.h>
53310 #include <linux/moduleparam.h>
53311 #include <linux/tracepoint.h>
53312 +#include <linux/fs.h>
53313
53314 #include <linux/percpu.h>
53315 #include <asm/module.h>
53316 @@ -325,19 +326,16 @@ struct module
53317 int (*init)(void);
53318
53319 /* If this is non-NULL, vfree after init() returns */
53320 - void *module_init;
53321 + void *module_init_rx, *module_init_rw;
53322
53323 /* Here is the actual code + data, vfree'd on unload. */
53324 - void *module_core;
53325 + void *module_core_rx, *module_core_rw;
53326
53327 /* Here are the sizes of the init and core sections */
53328 - unsigned int init_size, core_size;
53329 + unsigned int init_size_rw, core_size_rw;
53330
53331 /* The size of the executable code in each section. */
53332 - unsigned int init_text_size, core_text_size;
53333 -
53334 - /* Size of RO sections of the module (text+rodata) */
53335 - unsigned int init_ro_size, core_ro_size;
53336 + unsigned int init_size_rx, core_size_rx;
53337
53338 /* Arch-specific module values */
53339 struct mod_arch_specific arch;
53340 @@ -393,6 +391,10 @@ struct module
53341 #ifdef CONFIG_EVENT_TRACING
53342 struct ftrace_event_call **trace_events;
53343 unsigned int num_trace_events;
53344 + struct file_operations trace_id;
53345 + struct file_operations trace_enable;
53346 + struct file_operations trace_format;
53347 + struct file_operations trace_filter;
53348 #endif
53349 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
53350 unsigned int num_ftrace_callsites;
53351 @@ -443,16 +445,46 @@ bool is_module_address(unsigned long add
53352 bool is_module_percpu_address(unsigned long addr);
53353 bool is_module_text_address(unsigned long addr);
53354
53355 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
53356 +{
53357 +
53358 +#ifdef CONFIG_PAX_KERNEXEC
53359 + if (ktla_ktva(addr) >= (unsigned long)start &&
53360 + ktla_ktva(addr) < (unsigned long)start + size)
53361 + return 1;
53362 +#endif
53363 +
53364 + return ((void *)addr >= start && (void *)addr < start + size);
53365 +}
53366 +
53367 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
53368 +{
53369 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
53370 +}
53371 +
53372 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
53373 +{
53374 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
53375 +}
53376 +
53377 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
53378 +{
53379 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
53380 +}
53381 +
53382 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
53383 +{
53384 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
53385 +}
53386 +
53387 static inline int within_module_core(unsigned long addr, struct module *mod)
53388 {
53389 - return (unsigned long)mod->module_core <= addr &&
53390 - addr < (unsigned long)mod->module_core + mod->core_size;
53391 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
53392 }
53393
53394 static inline int within_module_init(unsigned long addr, struct module *mod)
53395 {
53396 - return (unsigned long)mod->module_init <= addr &&
53397 - addr < (unsigned long)mod->module_init + mod->init_size;
53398 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
53399 }
53400
53401 /* Search for module by name: must hold module_mutex. */
53402 diff -urNp linux-3.0.3/include/linux/moduleloader.h linux-3.0.3/include/linux/moduleloader.h
53403 --- linux-3.0.3/include/linux/moduleloader.h 2011-07-21 22:17:23.000000000 -0400
53404 +++ linux-3.0.3/include/linux/moduleloader.h 2011-08-23 21:47:56.000000000 -0400
53405 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
53406 sections. Returns NULL on failure. */
53407 void *module_alloc(unsigned long size);
53408
53409 +#ifdef CONFIG_PAX_KERNEXEC
53410 +void *module_alloc_exec(unsigned long size);
53411 +#else
53412 +#define module_alloc_exec(x) module_alloc(x)
53413 +#endif
53414 +
53415 /* Free memory returned from module_alloc. */
53416 void module_free(struct module *mod, void *module_region);
53417
53418 +#ifdef CONFIG_PAX_KERNEXEC
53419 +void module_free_exec(struct module *mod, void *module_region);
53420 +#else
53421 +#define module_free_exec(x, y) module_free((x), (y))
53422 +#endif
53423 +
53424 /* Apply the given relocation to the (simplified) ELF. Return -error
53425 or 0. */
53426 int apply_relocate(Elf_Shdr *sechdrs,
53427 diff -urNp linux-3.0.3/include/linux/moduleparam.h linux-3.0.3/include/linux/moduleparam.h
53428 --- linux-3.0.3/include/linux/moduleparam.h 2011-07-21 22:17:23.000000000 -0400
53429 +++ linux-3.0.3/include/linux/moduleparam.h 2011-08-23 21:47:56.000000000 -0400
53430 @@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
53431 * @len is usually just sizeof(string).
53432 */
53433 #define module_param_string(name, string, len, perm) \
53434 - static const struct kparam_string __param_string_##name \
53435 + static const struct kparam_string __param_string_##name __used \
53436 = { len, string }; \
53437 __module_param_call(MODULE_PARAM_PREFIX, name, \
53438 &param_ops_string, \
53439 @@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
53440 * module_param_named() for why this might be necessary.
53441 */
53442 #define module_param_array_named(name, array, type, nump, perm) \
53443 - static const struct kparam_array __param_arr_##name \
53444 + static const struct kparam_array __param_arr_##name __used \
53445 = { .max = ARRAY_SIZE(array), .num = nump, \
53446 .ops = &param_ops_##type, \
53447 .elemsize = sizeof(array[0]), .elem = array }; \
53448 diff -urNp linux-3.0.3/include/linux/namei.h linux-3.0.3/include/linux/namei.h
53449 --- linux-3.0.3/include/linux/namei.h 2011-07-21 22:17:23.000000000 -0400
53450 +++ linux-3.0.3/include/linux/namei.h 2011-08-23 21:47:56.000000000 -0400
53451 @@ -24,7 +24,7 @@ struct nameidata {
53452 unsigned seq;
53453 int last_type;
53454 unsigned depth;
53455 - char *saved_names[MAX_NESTED_LINKS + 1];
53456 + const char *saved_names[MAX_NESTED_LINKS + 1];
53457
53458 /* Intent data */
53459 union {
53460 @@ -91,12 +91,12 @@ extern int follow_up(struct path *);
53461 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
53462 extern void unlock_rename(struct dentry *, struct dentry *);
53463
53464 -static inline void nd_set_link(struct nameidata *nd, char *path)
53465 +static inline void nd_set_link(struct nameidata *nd, const char *path)
53466 {
53467 nd->saved_names[nd->depth] = path;
53468 }
53469
53470 -static inline char *nd_get_link(struct nameidata *nd)
53471 +static inline const char *nd_get_link(const struct nameidata *nd)
53472 {
53473 return nd->saved_names[nd->depth];
53474 }
53475 diff -urNp linux-3.0.3/include/linux/netdevice.h linux-3.0.3/include/linux/netdevice.h
53476 --- linux-3.0.3/include/linux/netdevice.h 2011-08-23 21:44:40.000000000 -0400
53477 +++ linux-3.0.3/include/linux/netdevice.h 2011-08-23 21:47:56.000000000 -0400
53478 @@ -979,6 +979,7 @@ struct net_device_ops {
53479 int (*ndo_set_features)(struct net_device *dev,
53480 u32 features);
53481 };
53482 +typedef struct net_device_ops __no_const net_device_ops_no_const;
53483
53484 /*
53485 * The DEVICE structure.
53486 diff -urNp linux-3.0.3/include/linux/netfilter/xt_gradm.h linux-3.0.3/include/linux/netfilter/xt_gradm.h
53487 --- linux-3.0.3/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
53488 +++ linux-3.0.3/include/linux/netfilter/xt_gradm.h 2011-08-23 21:48:14.000000000 -0400
53489 @@ -0,0 +1,9 @@
53490 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
53491 +#define _LINUX_NETFILTER_XT_GRADM_H 1
53492 +
53493 +struct xt_gradm_mtinfo {
53494 + __u16 flags;
53495 + __u16 invflags;
53496 +};
53497 +
53498 +#endif
53499 diff -urNp linux-3.0.3/include/linux/oprofile.h linux-3.0.3/include/linux/oprofile.h
53500 --- linux-3.0.3/include/linux/oprofile.h 2011-07-21 22:17:23.000000000 -0400
53501 +++ linux-3.0.3/include/linux/oprofile.h 2011-08-23 21:47:56.000000000 -0400
53502 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
53503 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
53504 char const * name, ulong * val);
53505
53506 -/** Create a file for read-only access to an atomic_t. */
53507 +/** Create a file for read-only access to an atomic_unchecked_t. */
53508 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
53509 - char const * name, atomic_t * val);
53510 + char const * name, atomic_unchecked_t * val);
53511
53512 /** create a directory */
53513 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
53514 diff -urNp linux-3.0.3/include/linux/padata.h linux-3.0.3/include/linux/padata.h
53515 --- linux-3.0.3/include/linux/padata.h 2011-07-21 22:17:23.000000000 -0400
53516 +++ linux-3.0.3/include/linux/padata.h 2011-08-23 21:47:56.000000000 -0400
53517 @@ -129,7 +129,7 @@ struct parallel_data {
53518 struct padata_instance *pinst;
53519 struct padata_parallel_queue __percpu *pqueue;
53520 struct padata_serial_queue __percpu *squeue;
53521 - atomic_t seq_nr;
53522 + atomic_unchecked_t seq_nr;
53523 atomic_t reorder_objects;
53524 atomic_t refcnt;
53525 unsigned int max_seq_nr;
53526 diff -urNp linux-3.0.3/include/linux/perf_event.h linux-3.0.3/include/linux/perf_event.h
53527 --- linux-3.0.3/include/linux/perf_event.h 2011-07-21 22:17:23.000000000 -0400
53528 +++ linux-3.0.3/include/linux/perf_event.h 2011-08-23 21:47:56.000000000 -0400
53529 @@ -761,8 +761,8 @@ struct perf_event {
53530
53531 enum perf_event_active_state state;
53532 unsigned int attach_state;
53533 - local64_t count;
53534 - atomic64_t child_count;
53535 + local64_t count; /* PaX: fix it one day */
53536 + atomic64_unchecked_t child_count;
53537
53538 /*
53539 * These are the total time in nanoseconds that the event
53540 @@ -813,8 +813,8 @@ struct perf_event {
53541 * These accumulate total time (in nanoseconds) that children
53542 * events have been enabled and running, respectively.
53543 */
53544 - atomic64_t child_total_time_enabled;
53545 - atomic64_t child_total_time_running;
53546 + atomic64_unchecked_t child_total_time_enabled;
53547 + atomic64_unchecked_t child_total_time_running;
53548
53549 /*
53550 * Protect attach/detach and child_list:
53551 diff -urNp linux-3.0.3/include/linux/pipe_fs_i.h linux-3.0.3/include/linux/pipe_fs_i.h
53552 --- linux-3.0.3/include/linux/pipe_fs_i.h 2011-07-21 22:17:23.000000000 -0400
53553 +++ linux-3.0.3/include/linux/pipe_fs_i.h 2011-08-23 21:47:56.000000000 -0400
53554 @@ -46,9 +46,9 @@ struct pipe_buffer {
53555 struct pipe_inode_info {
53556 wait_queue_head_t wait;
53557 unsigned int nrbufs, curbuf, buffers;
53558 - unsigned int readers;
53559 - unsigned int writers;
53560 - unsigned int waiting_writers;
53561 + atomic_t readers;
53562 + atomic_t writers;
53563 + atomic_t waiting_writers;
53564 unsigned int r_counter;
53565 unsigned int w_counter;
53566 struct page *tmp_page;
53567 diff -urNp linux-3.0.3/include/linux/pm_runtime.h linux-3.0.3/include/linux/pm_runtime.h
53568 --- linux-3.0.3/include/linux/pm_runtime.h 2011-07-21 22:17:23.000000000 -0400
53569 +++ linux-3.0.3/include/linux/pm_runtime.h 2011-08-23 21:47:56.000000000 -0400
53570 @@ -94,7 +94,7 @@ static inline bool pm_runtime_callbacks_
53571
53572 static inline void pm_runtime_mark_last_busy(struct device *dev)
53573 {
53574 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
53575 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
53576 }
53577
53578 #else /* !CONFIG_PM_RUNTIME */
53579 diff -urNp linux-3.0.3/include/linux/poison.h linux-3.0.3/include/linux/poison.h
53580 --- linux-3.0.3/include/linux/poison.h 2011-07-21 22:17:23.000000000 -0400
53581 +++ linux-3.0.3/include/linux/poison.h 2011-08-23 21:47:56.000000000 -0400
53582 @@ -19,8 +19,8 @@
53583 * under normal circumstances, used to verify that nobody uses
53584 * non-initialized list entries.
53585 */
53586 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
53587 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
53588 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
53589 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
53590
53591 /********** include/linux/timer.h **********/
53592 /*
53593 diff -urNp linux-3.0.3/include/linux/preempt.h linux-3.0.3/include/linux/preempt.h
53594 --- linux-3.0.3/include/linux/preempt.h 2011-07-21 22:17:23.000000000 -0400
53595 +++ linux-3.0.3/include/linux/preempt.h 2011-08-23 21:47:56.000000000 -0400
53596 @@ -115,7 +115,7 @@ struct preempt_ops {
53597 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
53598 void (*sched_out)(struct preempt_notifier *notifier,
53599 struct task_struct *next);
53600 -};
53601 +} __no_const;
53602
53603 /**
53604 * preempt_notifier - key for installing preemption notifiers
53605 diff -urNp linux-3.0.3/include/linux/proc_fs.h linux-3.0.3/include/linux/proc_fs.h
53606 --- linux-3.0.3/include/linux/proc_fs.h 2011-07-21 22:17:23.000000000 -0400
53607 +++ linux-3.0.3/include/linux/proc_fs.h 2011-08-23 21:48:14.000000000 -0400
53608 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
53609 return proc_create_data(name, mode, parent, proc_fops, NULL);
53610 }
53611
53612 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
53613 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
53614 +{
53615 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53616 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
53617 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53618 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
53619 +#else
53620 + return proc_create_data(name, mode, parent, proc_fops, NULL);
53621 +#endif
53622 +}
53623 +
53624 +
53625 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
53626 mode_t mode, struct proc_dir_entry *base,
53627 read_proc_t *read_proc, void * data)
53628 @@ -258,7 +271,7 @@ union proc_op {
53629 int (*proc_show)(struct seq_file *m,
53630 struct pid_namespace *ns, struct pid *pid,
53631 struct task_struct *task);
53632 -};
53633 +} __no_const;
53634
53635 struct ctl_table_header;
53636 struct ctl_table;
53637 diff -urNp linux-3.0.3/include/linux/ptrace.h linux-3.0.3/include/linux/ptrace.h
53638 --- linux-3.0.3/include/linux/ptrace.h 2011-07-21 22:17:23.000000000 -0400
53639 +++ linux-3.0.3/include/linux/ptrace.h 2011-08-23 21:48:14.000000000 -0400
53640 @@ -115,10 +115,10 @@ extern void __ptrace_unlink(struct task_
53641 extern void exit_ptrace(struct task_struct *tracer);
53642 #define PTRACE_MODE_READ 1
53643 #define PTRACE_MODE_ATTACH 2
53644 -/* Returns 0 on success, -errno on denial. */
53645 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
53646 /* Returns true on success, false on denial. */
53647 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
53648 +/* Returns true on success, false on denial. */
53649 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
53650
53651 static inline int ptrace_reparented(struct task_struct *child)
53652 {
53653 diff -urNp linux-3.0.3/include/linux/random.h linux-3.0.3/include/linux/random.h
53654 --- linux-3.0.3/include/linux/random.h 2011-08-23 21:44:40.000000000 -0400
53655 +++ linux-3.0.3/include/linux/random.h 2011-08-23 21:47:56.000000000 -0400
53656 @@ -69,12 +69,17 @@ void srandom32(u32 seed);
53657
53658 u32 prandom32(struct rnd_state *);
53659
53660 +static inline unsigned long pax_get_random_long(void)
53661 +{
53662 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
53663 +}
53664 +
53665 /*
53666 * Handle minimum values for seeds
53667 */
53668 static inline u32 __seed(u32 x, u32 m)
53669 {
53670 - return (x < m) ? x + m : x;
53671 + return (x <= m) ? x + m + 1 : x;
53672 }
53673
53674 /**
53675 diff -urNp linux-3.0.3/include/linux/reboot.h linux-3.0.3/include/linux/reboot.h
53676 --- linux-3.0.3/include/linux/reboot.h 2011-07-21 22:17:23.000000000 -0400
53677 +++ linux-3.0.3/include/linux/reboot.h 2011-08-23 21:47:56.000000000 -0400
53678 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
53679 * Architecture-specific implementations of sys_reboot commands.
53680 */
53681
53682 -extern void machine_restart(char *cmd);
53683 -extern void machine_halt(void);
53684 -extern void machine_power_off(void);
53685 +extern void machine_restart(char *cmd) __noreturn;
53686 +extern void machine_halt(void) __noreturn;
53687 +extern void machine_power_off(void) __noreturn;
53688
53689 extern void machine_shutdown(void);
53690 struct pt_regs;
53691 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
53692 */
53693
53694 extern void kernel_restart_prepare(char *cmd);
53695 -extern void kernel_restart(char *cmd);
53696 -extern void kernel_halt(void);
53697 -extern void kernel_power_off(void);
53698 +extern void kernel_restart(char *cmd) __noreturn;
53699 +extern void kernel_halt(void) __noreturn;
53700 +extern void kernel_power_off(void) __noreturn;
53701
53702 extern int C_A_D; /* for sysctl */
53703 void ctrl_alt_del(void);
53704 @@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
53705 * Emergency restart, callable from an interrupt handler.
53706 */
53707
53708 -extern void emergency_restart(void);
53709 +extern void emergency_restart(void) __noreturn;
53710 #include <asm/emergency-restart.h>
53711
53712 #endif
53713 diff -urNp linux-3.0.3/include/linux/reiserfs_fs.h linux-3.0.3/include/linux/reiserfs_fs.h
53714 --- linux-3.0.3/include/linux/reiserfs_fs.h 2011-07-21 22:17:23.000000000 -0400
53715 +++ linux-3.0.3/include/linux/reiserfs_fs.h 2011-08-23 21:47:56.000000000 -0400
53716 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
53717 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
53718
53719 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
53720 -#define get_generation(s) atomic_read (&fs_generation(s))
53721 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
53722 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
53723 #define __fs_changed(gen,s) (gen != get_generation (s))
53724 #define fs_changed(gen,s) \
53725 diff -urNp linux-3.0.3/include/linux/reiserfs_fs_sb.h linux-3.0.3/include/linux/reiserfs_fs_sb.h
53726 --- linux-3.0.3/include/linux/reiserfs_fs_sb.h 2011-07-21 22:17:23.000000000 -0400
53727 +++ linux-3.0.3/include/linux/reiserfs_fs_sb.h 2011-08-23 21:47:56.000000000 -0400
53728 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
53729 /* Comment? -Hans */
53730 wait_queue_head_t s_wait;
53731 /* To be obsoleted soon by per buffer seals.. -Hans */
53732 - atomic_t s_generation_counter; // increased by one every time the
53733 + atomic_unchecked_t s_generation_counter; // increased by one every time the
53734 // tree gets re-balanced
53735 unsigned long s_properties; /* File system properties. Currently holds
53736 on-disk FS format */
53737 diff -urNp linux-3.0.3/include/linux/relay.h linux-3.0.3/include/linux/relay.h
53738 --- linux-3.0.3/include/linux/relay.h 2011-07-21 22:17:23.000000000 -0400
53739 +++ linux-3.0.3/include/linux/relay.h 2011-08-23 21:47:56.000000000 -0400
53740 @@ -159,7 +159,7 @@ struct rchan_callbacks
53741 * The callback should return 0 if successful, negative if not.
53742 */
53743 int (*remove_buf_file)(struct dentry *dentry);
53744 -};
53745 +} __no_const;
53746
53747 /*
53748 * CONFIG_RELAY kernel API, kernel/relay.c
53749 diff -urNp linux-3.0.3/include/linux/rfkill.h linux-3.0.3/include/linux/rfkill.h
53750 --- linux-3.0.3/include/linux/rfkill.h 2011-07-21 22:17:23.000000000 -0400
53751 +++ linux-3.0.3/include/linux/rfkill.h 2011-08-23 21:47:56.000000000 -0400
53752 @@ -147,6 +147,7 @@ struct rfkill_ops {
53753 void (*query)(struct rfkill *rfkill, void *data);
53754 int (*set_block)(void *data, bool blocked);
53755 };
53756 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
53757
53758 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
53759 /**
53760 diff -urNp linux-3.0.3/include/linux/rmap.h linux-3.0.3/include/linux/rmap.h
53761 --- linux-3.0.3/include/linux/rmap.h 2011-07-21 22:17:23.000000000 -0400
53762 +++ linux-3.0.3/include/linux/rmap.h 2011-08-23 21:47:56.000000000 -0400
53763 @@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
53764 void anon_vma_init(void); /* create anon_vma_cachep */
53765 int anon_vma_prepare(struct vm_area_struct *);
53766 void unlink_anon_vmas(struct vm_area_struct *);
53767 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
53768 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
53769 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
53770 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
53771 void __anon_vma_link(struct vm_area_struct *);
53772
53773 static inline void anon_vma_merge(struct vm_area_struct *vma,
53774 diff -urNp linux-3.0.3/include/linux/sched.h linux-3.0.3/include/linux/sched.h
53775 --- linux-3.0.3/include/linux/sched.h 2011-07-21 22:17:23.000000000 -0400
53776 +++ linux-3.0.3/include/linux/sched.h 2011-08-23 21:48:14.000000000 -0400
53777 @@ -100,6 +100,7 @@ struct bio_list;
53778 struct fs_struct;
53779 struct perf_event_context;
53780 struct blk_plug;
53781 +struct linux_binprm;
53782
53783 /*
53784 * List of flags we want to share for kernel threads,
53785 @@ -380,10 +381,13 @@ struct user_namespace;
53786 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
53787
53788 extern int sysctl_max_map_count;
53789 +extern unsigned long sysctl_heap_stack_gap;
53790
53791 #include <linux/aio.h>
53792
53793 #ifdef CONFIG_MMU
53794 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
53795 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
53796 extern void arch_pick_mmap_layout(struct mm_struct *mm);
53797 extern unsigned long
53798 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
53799 @@ -629,6 +633,17 @@ struct signal_struct {
53800 #ifdef CONFIG_TASKSTATS
53801 struct taskstats *stats;
53802 #endif
53803 +
53804 +#ifdef CONFIG_GRKERNSEC
53805 + u32 curr_ip;
53806 + u32 saved_ip;
53807 + u32 gr_saddr;
53808 + u32 gr_daddr;
53809 + u16 gr_sport;
53810 + u16 gr_dport;
53811 + u8 used_accept:1;
53812 +#endif
53813 +
53814 #ifdef CONFIG_AUDIT
53815 unsigned audit_tty;
53816 struct tty_audit_buf *tty_audit_buf;
53817 @@ -710,6 +725,11 @@ struct user_struct {
53818 struct key *session_keyring; /* UID's default session keyring */
53819 #endif
53820
53821 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
53822 + unsigned int banned;
53823 + unsigned long ban_expires;
53824 +#endif
53825 +
53826 /* Hash table maintenance information */
53827 struct hlist_node uidhash_node;
53828 uid_t uid;
53829 @@ -1340,8 +1360,8 @@ struct task_struct {
53830 struct list_head thread_group;
53831
53832 struct completion *vfork_done; /* for vfork() */
53833 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
53834 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
53835 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
53836 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
53837
53838 cputime_t utime, stime, utimescaled, stimescaled;
53839 cputime_t gtime;
53840 @@ -1357,13 +1377,6 @@ struct task_struct {
53841 struct task_cputime cputime_expires;
53842 struct list_head cpu_timers[3];
53843
53844 -/* process credentials */
53845 - const struct cred __rcu *real_cred; /* objective and real subjective task
53846 - * credentials (COW) */
53847 - const struct cred __rcu *cred; /* effective (overridable) subjective task
53848 - * credentials (COW) */
53849 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
53850 -
53851 char comm[TASK_COMM_LEN]; /* executable name excluding path
53852 - access with [gs]et_task_comm (which lock
53853 it with task_lock())
53854 @@ -1380,8 +1393,16 @@ struct task_struct {
53855 #endif
53856 /* CPU-specific state of this task */
53857 struct thread_struct thread;
53858 +/* thread_info moved to task_struct */
53859 +#ifdef CONFIG_X86
53860 + struct thread_info tinfo;
53861 +#endif
53862 /* filesystem information */
53863 struct fs_struct *fs;
53864 +
53865 + const struct cred __rcu *cred; /* effective (overridable) subjective task
53866 + * credentials (COW) */
53867 +
53868 /* open file information */
53869 struct files_struct *files;
53870 /* namespaces */
53871 @@ -1428,6 +1449,11 @@ struct task_struct {
53872 struct rt_mutex_waiter *pi_blocked_on;
53873 #endif
53874
53875 +/* process credentials */
53876 + const struct cred __rcu *real_cred; /* objective and real subjective task
53877 + * credentials (COW) */
53878 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
53879 +
53880 #ifdef CONFIG_DEBUG_MUTEXES
53881 /* mutex deadlock detection */
53882 struct mutex_waiter *blocked_on;
53883 @@ -1538,6 +1564,21 @@ struct task_struct {
53884 unsigned long default_timer_slack_ns;
53885
53886 struct list_head *scm_work_list;
53887 +
53888 +#ifdef CONFIG_GRKERNSEC
53889 + /* grsecurity */
53890 + struct dentry *gr_chroot_dentry;
53891 + struct acl_subject_label *acl;
53892 + struct acl_role_label *role;
53893 + struct file *exec_file;
53894 + u16 acl_role_id;
53895 + /* is this the task that authenticated to the special role */
53896 + u8 acl_sp_role;
53897 + u8 is_writable;
53898 + u8 brute;
53899 + u8 gr_is_chrooted;
53900 +#endif
53901 +
53902 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
53903 /* Index of current stored address in ret_stack */
53904 int curr_ret_stack;
53905 @@ -1572,6 +1613,57 @@ struct task_struct {
53906 #endif
53907 };
53908
53909 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
53910 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
53911 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
53912 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
53913 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
53914 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
53915 +
53916 +#ifdef CONFIG_PAX_SOFTMODE
53917 +extern int pax_softmode;
53918 +#endif
53919 +
53920 +extern int pax_check_flags(unsigned long *);
53921 +
53922 +/* if tsk != current then task_lock must be held on it */
53923 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53924 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
53925 +{
53926 + if (likely(tsk->mm))
53927 + return tsk->mm->pax_flags;
53928 + else
53929 + return 0UL;
53930 +}
53931 +
53932 +/* if tsk != current then task_lock must be held on it */
53933 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
53934 +{
53935 + if (likely(tsk->mm)) {
53936 + tsk->mm->pax_flags = flags;
53937 + return 0;
53938 + }
53939 + return -EINVAL;
53940 +}
53941 +#endif
53942 +
53943 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
53944 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
53945 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
53946 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
53947 +#endif
53948 +
53949 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
53950 +extern void pax_report_insns(void *pc, void *sp);
53951 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
53952 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
53953 +
53954 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
53955 +extern void pax_track_stack(void);
53956 +#else
53957 +static inline void pax_track_stack(void) {}
53958 +#endif
53959 +
53960 /* Future-safe accessor for struct task_struct's cpus_allowed. */
53961 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
53962
53963 @@ -2056,7 +2148,9 @@ void yield(void);
53964 extern struct exec_domain default_exec_domain;
53965
53966 union thread_union {
53967 +#ifndef CONFIG_X86
53968 struct thread_info thread_info;
53969 +#endif
53970 unsigned long stack[THREAD_SIZE/sizeof(long)];
53971 };
53972
53973 @@ -2089,6 +2183,7 @@ extern struct pid_namespace init_pid_ns;
53974 */
53975
53976 extern struct task_struct *find_task_by_vpid(pid_t nr);
53977 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
53978 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
53979 struct pid_namespace *ns);
53980
53981 @@ -2225,7 +2320,7 @@ extern void __cleanup_sighand(struct sig
53982 extern void exit_itimers(struct signal_struct *);
53983 extern void flush_itimer_signals(void);
53984
53985 -extern NORET_TYPE void do_group_exit(int);
53986 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
53987
53988 extern void daemonize(const char *, ...);
53989 extern int allow_signal(int);
53990 @@ -2393,13 +2488,17 @@ static inline unsigned long *end_of_stac
53991
53992 #endif
53993
53994 -static inline int object_is_on_stack(void *obj)
53995 +static inline int object_starts_on_stack(void *obj)
53996 {
53997 - void *stack = task_stack_page(current);
53998 + const void *stack = task_stack_page(current);
53999
54000 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
54001 }
54002
54003 +#ifdef CONFIG_PAX_USERCOPY
54004 +extern int object_is_on_stack(const void *obj, unsigned long len);
54005 +#endif
54006 +
54007 extern void thread_info_cache_init(void);
54008
54009 #ifdef CONFIG_DEBUG_STACK_USAGE
54010 diff -urNp linux-3.0.3/include/linux/screen_info.h linux-3.0.3/include/linux/screen_info.h
54011 --- linux-3.0.3/include/linux/screen_info.h 2011-07-21 22:17:23.000000000 -0400
54012 +++ linux-3.0.3/include/linux/screen_info.h 2011-08-23 21:47:56.000000000 -0400
54013 @@ -43,7 +43,8 @@ struct screen_info {
54014 __u16 pages; /* 0x32 */
54015 __u16 vesa_attributes; /* 0x34 */
54016 __u32 capabilities; /* 0x36 */
54017 - __u8 _reserved[6]; /* 0x3a */
54018 + __u16 vesapm_size; /* 0x3a */
54019 + __u8 _reserved[4]; /* 0x3c */
54020 } __attribute__((packed));
54021
54022 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
54023 diff -urNp linux-3.0.3/include/linux/security.h linux-3.0.3/include/linux/security.h
54024 --- linux-3.0.3/include/linux/security.h 2011-07-21 22:17:23.000000000 -0400
54025 +++ linux-3.0.3/include/linux/security.h 2011-08-23 21:48:14.000000000 -0400
54026 @@ -36,6 +36,7 @@
54027 #include <linux/key.h>
54028 #include <linux/xfrm.h>
54029 #include <linux/slab.h>
54030 +#include <linux/grsecurity.h>
54031 #include <net/flow.h>
54032
54033 /* Maximum number of letters for an LSM name string */
54034 diff -urNp linux-3.0.3/include/linux/seq_file.h linux-3.0.3/include/linux/seq_file.h
54035 --- linux-3.0.3/include/linux/seq_file.h 2011-07-21 22:17:23.000000000 -0400
54036 +++ linux-3.0.3/include/linux/seq_file.h 2011-08-23 21:47:56.000000000 -0400
54037 @@ -32,6 +32,7 @@ struct seq_operations {
54038 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
54039 int (*show) (struct seq_file *m, void *v);
54040 };
54041 +typedef struct seq_operations __no_const seq_operations_no_const;
54042
54043 #define SEQ_SKIP 1
54044
54045 diff -urNp linux-3.0.3/include/linux/shmem_fs.h linux-3.0.3/include/linux/shmem_fs.h
54046 --- linux-3.0.3/include/linux/shmem_fs.h 2011-07-21 22:17:23.000000000 -0400
54047 +++ linux-3.0.3/include/linux/shmem_fs.h 2011-08-23 21:47:56.000000000 -0400
54048 @@ -10,7 +10,7 @@
54049
54050 #define SHMEM_NR_DIRECT 16
54051
54052 -#define SHMEM_SYMLINK_INLINE_LEN (SHMEM_NR_DIRECT * sizeof(swp_entry_t))
54053 +#define SHMEM_SYMLINK_INLINE_LEN 64
54054
54055 struct shmem_inode_info {
54056 spinlock_t lock;
54057 diff -urNp linux-3.0.3/include/linux/shm.h linux-3.0.3/include/linux/shm.h
54058 --- linux-3.0.3/include/linux/shm.h 2011-07-21 22:17:23.000000000 -0400
54059 +++ linux-3.0.3/include/linux/shm.h 2011-08-23 21:48:14.000000000 -0400
54060 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
54061 pid_t shm_cprid;
54062 pid_t shm_lprid;
54063 struct user_struct *mlock_user;
54064 +#ifdef CONFIG_GRKERNSEC
54065 + time_t shm_createtime;
54066 + pid_t shm_lapid;
54067 +#endif
54068 };
54069
54070 /* shm_mode upper byte flags */
54071 diff -urNp linux-3.0.3/include/linux/skbuff.h linux-3.0.3/include/linux/skbuff.h
54072 --- linux-3.0.3/include/linux/skbuff.h 2011-07-21 22:17:23.000000000 -0400
54073 +++ linux-3.0.3/include/linux/skbuff.h 2011-08-23 21:47:56.000000000 -0400
54074 @@ -592,7 +592,7 @@ static inline struct skb_shared_hwtstamp
54075 */
54076 static inline int skb_queue_empty(const struct sk_buff_head *list)
54077 {
54078 - return list->next == (struct sk_buff *)list;
54079 + return list->next == (const struct sk_buff *)list;
54080 }
54081
54082 /**
54083 @@ -605,7 +605,7 @@ static inline int skb_queue_empty(const
54084 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
54085 const struct sk_buff *skb)
54086 {
54087 - return skb->next == (struct sk_buff *)list;
54088 + return skb->next == (const struct sk_buff *)list;
54089 }
54090
54091 /**
54092 @@ -618,7 +618,7 @@ static inline bool skb_queue_is_last(con
54093 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
54094 const struct sk_buff *skb)
54095 {
54096 - return skb->prev == (struct sk_buff *)list;
54097 + return skb->prev == (const struct sk_buff *)list;
54098 }
54099
54100 /**
54101 @@ -1440,7 +1440,7 @@ static inline int pskb_network_may_pull(
54102 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
54103 */
54104 #ifndef NET_SKB_PAD
54105 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
54106 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
54107 #endif
54108
54109 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
54110 diff -urNp linux-3.0.3/include/linux/slab_def.h linux-3.0.3/include/linux/slab_def.h
54111 --- linux-3.0.3/include/linux/slab_def.h 2011-07-21 22:17:23.000000000 -0400
54112 +++ linux-3.0.3/include/linux/slab_def.h 2011-08-23 21:47:56.000000000 -0400
54113 @@ -96,10 +96,10 @@ struct kmem_cache {
54114 unsigned long node_allocs;
54115 unsigned long node_frees;
54116 unsigned long node_overflow;
54117 - atomic_t allochit;
54118 - atomic_t allocmiss;
54119 - atomic_t freehit;
54120 - atomic_t freemiss;
54121 + atomic_unchecked_t allochit;
54122 + atomic_unchecked_t allocmiss;
54123 + atomic_unchecked_t freehit;
54124 + atomic_unchecked_t freemiss;
54125
54126 /*
54127 * If debugging is enabled, then the allocator can add additional
54128 diff -urNp linux-3.0.3/include/linux/slab.h linux-3.0.3/include/linux/slab.h
54129 --- linux-3.0.3/include/linux/slab.h 2011-07-21 22:17:23.000000000 -0400
54130 +++ linux-3.0.3/include/linux/slab.h 2011-08-23 21:47:56.000000000 -0400
54131 @@ -11,12 +11,20 @@
54132
54133 #include <linux/gfp.h>
54134 #include <linux/types.h>
54135 +#include <linux/err.h>
54136
54137 /*
54138 * Flags to pass to kmem_cache_create().
54139 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
54140 */
54141 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
54142 +
54143 +#ifdef CONFIG_PAX_USERCOPY
54144 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
54145 +#else
54146 +#define SLAB_USERCOPY 0x00000000UL
54147 +#endif
54148 +
54149 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
54150 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
54151 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
54152 @@ -87,10 +95,13 @@
54153 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
54154 * Both make kfree a no-op.
54155 */
54156 -#define ZERO_SIZE_PTR ((void *)16)
54157 +#define ZERO_SIZE_PTR \
54158 +({ \
54159 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
54160 + (void *)(-MAX_ERRNO-1L); \
54161 +})
54162
54163 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
54164 - (unsigned long)ZERO_SIZE_PTR)
54165 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
54166
54167 /*
54168 * struct kmem_cache related prototypes
54169 @@ -141,6 +152,7 @@ void * __must_check krealloc(const void
54170 void kfree(const void *);
54171 void kzfree(const void *);
54172 size_t ksize(const void *);
54173 +void check_object_size(const void *ptr, unsigned long n, bool to);
54174
54175 /*
54176 * Allocator specific definitions. These are mainly used to establish optimized
54177 @@ -333,4 +345,59 @@ static inline void *kzalloc_node(size_t
54178
54179 void __init kmem_cache_init_late(void);
54180
54181 +#define kmalloc(x, y) \
54182 +({ \
54183 + void *___retval; \
54184 + intoverflow_t ___x = (intoverflow_t)x; \
54185 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
54186 + ___retval = NULL; \
54187 + else \
54188 + ___retval = kmalloc((size_t)___x, (y)); \
54189 + ___retval; \
54190 +})
54191 +
54192 +#define kmalloc_node(x, y, z) \
54193 +({ \
54194 + void *___retval; \
54195 + intoverflow_t ___x = (intoverflow_t)x; \
54196 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
54197 + ___retval = NULL; \
54198 + else \
54199 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
54200 + ___retval; \
54201 +})
54202 +
54203 +#define kzalloc(x, y) \
54204 +({ \
54205 + void *___retval; \
54206 + intoverflow_t ___x = (intoverflow_t)x; \
54207 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
54208 + ___retval = NULL; \
54209 + else \
54210 + ___retval = kzalloc((size_t)___x, (y)); \
54211 + ___retval; \
54212 +})
54213 +
54214 +#define __krealloc(x, y, z) \
54215 +({ \
54216 + void *___retval; \
54217 + intoverflow_t ___y = (intoverflow_t)y; \
54218 + if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
54219 + ___retval = NULL; \
54220 + else \
54221 + ___retval = __krealloc((x), (size_t)___y, (z)); \
54222 + ___retval; \
54223 +})
54224 +
54225 +#define krealloc(x, y, z) \
54226 +({ \
54227 + void *___retval; \
54228 + intoverflow_t ___y = (intoverflow_t)y; \
54229 + if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
54230 + ___retval = NULL; \
54231 + else \
54232 + ___retval = krealloc((x), (size_t)___y, (z)); \
54233 + ___retval; \
54234 +})
54235 +
54236 #endif /* _LINUX_SLAB_H */
54237 diff -urNp linux-3.0.3/include/linux/slub_def.h linux-3.0.3/include/linux/slub_def.h
54238 --- linux-3.0.3/include/linux/slub_def.h 2011-07-21 22:17:23.000000000 -0400
54239 +++ linux-3.0.3/include/linux/slub_def.h 2011-08-23 21:47:56.000000000 -0400
54240 @@ -82,7 +82,7 @@ struct kmem_cache {
54241 struct kmem_cache_order_objects max;
54242 struct kmem_cache_order_objects min;
54243 gfp_t allocflags; /* gfp flags to use on each alloc */
54244 - int refcount; /* Refcount for slab cache destroy */
54245 + atomic_t refcount; /* Refcount for slab cache destroy */
54246 void (*ctor)(void *);
54247 int inuse; /* Offset to metadata */
54248 int align; /* Alignment */
54249 @@ -218,7 +218,7 @@ static __always_inline struct kmem_cache
54250 }
54251
54252 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
54253 -void *__kmalloc(size_t size, gfp_t flags);
54254 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
54255
54256 static __always_inline void *
54257 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
54258 diff -urNp linux-3.0.3/include/linux/sonet.h linux-3.0.3/include/linux/sonet.h
54259 --- linux-3.0.3/include/linux/sonet.h 2011-07-21 22:17:23.000000000 -0400
54260 +++ linux-3.0.3/include/linux/sonet.h 2011-08-23 21:47:56.000000000 -0400
54261 @@ -61,7 +61,7 @@ struct sonet_stats {
54262 #include <asm/atomic.h>
54263
54264 struct k_sonet_stats {
54265 -#define __HANDLE_ITEM(i) atomic_t i
54266 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
54267 __SONET_ITEMS
54268 #undef __HANDLE_ITEM
54269 };
54270 diff -urNp linux-3.0.3/include/linux/sunrpc/clnt.h linux-3.0.3/include/linux/sunrpc/clnt.h
54271 --- linux-3.0.3/include/linux/sunrpc/clnt.h 2011-07-21 22:17:23.000000000 -0400
54272 +++ linux-3.0.3/include/linux/sunrpc/clnt.h 2011-08-23 21:47:56.000000000 -0400
54273 @@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
54274 {
54275 switch (sap->sa_family) {
54276 case AF_INET:
54277 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
54278 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
54279 case AF_INET6:
54280 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
54281 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
54282 }
54283 return 0;
54284 }
54285 @@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
54286 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
54287 const struct sockaddr *src)
54288 {
54289 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
54290 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
54291 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
54292
54293 dsin->sin_family = ssin->sin_family;
54294 @@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
54295 if (sa->sa_family != AF_INET6)
54296 return 0;
54297
54298 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
54299 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
54300 }
54301
54302 #endif /* __KERNEL__ */
54303 diff -urNp linux-3.0.3/include/linux/sunrpc/svc_rdma.h linux-3.0.3/include/linux/sunrpc/svc_rdma.h
54304 --- linux-3.0.3/include/linux/sunrpc/svc_rdma.h 2011-07-21 22:17:23.000000000 -0400
54305 +++ linux-3.0.3/include/linux/sunrpc/svc_rdma.h 2011-08-23 21:47:56.000000000 -0400
54306 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
54307 extern unsigned int svcrdma_max_requests;
54308 extern unsigned int svcrdma_max_req_size;
54309
54310 -extern atomic_t rdma_stat_recv;
54311 -extern atomic_t rdma_stat_read;
54312 -extern atomic_t rdma_stat_write;
54313 -extern atomic_t rdma_stat_sq_starve;
54314 -extern atomic_t rdma_stat_rq_starve;
54315 -extern atomic_t rdma_stat_rq_poll;
54316 -extern atomic_t rdma_stat_rq_prod;
54317 -extern atomic_t rdma_stat_sq_poll;
54318 -extern atomic_t rdma_stat_sq_prod;
54319 +extern atomic_unchecked_t rdma_stat_recv;
54320 +extern atomic_unchecked_t rdma_stat_read;
54321 +extern atomic_unchecked_t rdma_stat_write;
54322 +extern atomic_unchecked_t rdma_stat_sq_starve;
54323 +extern atomic_unchecked_t rdma_stat_rq_starve;
54324 +extern atomic_unchecked_t rdma_stat_rq_poll;
54325 +extern atomic_unchecked_t rdma_stat_rq_prod;
54326 +extern atomic_unchecked_t rdma_stat_sq_poll;
54327 +extern atomic_unchecked_t rdma_stat_sq_prod;
54328
54329 #define RPCRDMA_VERSION 1
54330
54331 diff -urNp linux-3.0.3/include/linux/sysctl.h linux-3.0.3/include/linux/sysctl.h
54332 --- linux-3.0.3/include/linux/sysctl.h 2011-07-21 22:17:23.000000000 -0400
54333 +++ linux-3.0.3/include/linux/sysctl.h 2011-08-23 21:48:14.000000000 -0400
54334 @@ -155,7 +155,11 @@ enum
54335 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
54336 };
54337
54338 -
54339 +#ifdef CONFIG_PAX_SOFTMODE
54340 +enum {
54341 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
54342 +};
54343 +#endif
54344
54345 /* CTL_VM names: */
54346 enum
54347 @@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
54348
54349 extern int proc_dostring(struct ctl_table *, int,
54350 void __user *, size_t *, loff_t *);
54351 +extern int proc_dostring_modpriv(struct ctl_table *, int,
54352 + void __user *, size_t *, loff_t *);
54353 extern int proc_dointvec(struct ctl_table *, int,
54354 void __user *, size_t *, loff_t *);
54355 extern int proc_dointvec_minmax(struct ctl_table *, int,
54356 diff -urNp linux-3.0.3/include/linux/tty_ldisc.h linux-3.0.3/include/linux/tty_ldisc.h
54357 --- linux-3.0.3/include/linux/tty_ldisc.h 2011-07-21 22:17:23.000000000 -0400
54358 +++ linux-3.0.3/include/linux/tty_ldisc.h 2011-08-23 21:47:56.000000000 -0400
54359 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
54360
54361 struct module *owner;
54362
54363 - int refcount;
54364 + atomic_t refcount;
54365 };
54366
54367 struct tty_ldisc {
54368 diff -urNp linux-3.0.3/include/linux/types.h linux-3.0.3/include/linux/types.h
54369 --- linux-3.0.3/include/linux/types.h 2011-07-21 22:17:23.000000000 -0400
54370 +++ linux-3.0.3/include/linux/types.h 2011-08-23 21:47:56.000000000 -0400
54371 @@ -213,10 +213,26 @@ typedef struct {
54372 int counter;
54373 } atomic_t;
54374
54375 +#ifdef CONFIG_PAX_REFCOUNT
54376 +typedef struct {
54377 + int counter;
54378 +} atomic_unchecked_t;
54379 +#else
54380 +typedef atomic_t atomic_unchecked_t;
54381 +#endif
54382 +
54383 #ifdef CONFIG_64BIT
54384 typedef struct {
54385 long counter;
54386 } atomic64_t;
54387 +
54388 +#ifdef CONFIG_PAX_REFCOUNT
54389 +typedef struct {
54390 + long counter;
54391 +} atomic64_unchecked_t;
54392 +#else
54393 +typedef atomic64_t atomic64_unchecked_t;
54394 +#endif
54395 #endif
54396
54397 struct list_head {
54398 diff -urNp linux-3.0.3/include/linux/uaccess.h linux-3.0.3/include/linux/uaccess.h
54399 --- linux-3.0.3/include/linux/uaccess.h 2011-07-21 22:17:23.000000000 -0400
54400 +++ linux-3.0.3/include/linux/uaccess.h 2011-08-23 21:47:56.000000000 -0400
54401 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
54402 long ret; \
54403 mm_segment_t old_fs = get_fs(); \
54404 \
54405 - set_fs(KERNEL_DS); \
54406 pagefault_disable(); \
54407 + set_fs(KERNEL_DS); \
54408 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
54409 - pagefault_enable(); \
54410 set_fs(old_fs); \
54411 + pagefault_enable(); \
54412 ret; \
54413 })
54414
54415 diff -urNp linux-3.0.3/include/linux/unaligned/access_ok.h linux-3.0.3/include/linux/unaligned/access_ok.h
54416 --- linux-3.0.3/include/linux/unaligned/access_ok.h 2011-07-21 22:17:23.000000000 -0400
54417 +++ linux-3.0.3/include/linux/unaligned/access_ok.h 2011-08-23 21:47:56.000000000 -0400
54418 @@ -6,32 +6,32 @@
54419
54420 static inline u16 get_unaligned_le16(const void *p)
54421 {
54422 - return le16_to_cpup((__le16 *)p);
54423 + return le16_to_cpup((const __le16 *)p);
54424 }
54425
54426 static inline u32 get_unaligned_le32(const void *p)
54427 {
54428 - return le32_to_cpup((__le32 *)p);
54429 + return le32_to_cpup((const __le32 *)p);
54430 }
54431
54432 static inline u64 get_unaligned_le64(const void *p)
54433 {
54434 - return le64_to_cpup((__le64 *)p);
54435 + return le64_to_cpup((const __le64 *)p);
54436 }
54437
54438 static inline u16 get_unaligned_be16(const void *p)
54439 {
54440 - return be16_to_cpup((__be16 *)p);
54441 + return be16_to_cpup((const __be16 *)p);
54442 }
54443
54444 static inline u32 get_unaligned_be32(const void *p)
54445 {
54446 - return be32_to_cpup((__be32 *)p);
54447 + return be32_to_cpup((const __be32 *)p);
54448 }
54449
54450 static inline u64 get_unaligned_be64(const void *p)
54451 {
54452 - return be64_to_cpup((__be64 *)p);
54453 + return be64_to_cpup((const __be64 *)p);
54454 }
54455
54456 static inline void put_unaligned_le16(u16 val, void *p)
54457 diff -urNp linux-3.0.3/include/linux/vmalloc.h linux-3.0.3/include/linux/vmalloc.h
54458 --- linux-3.0.3/include/linux/vmalloc.h 2011-07-21 22:17:23.000000000 -0400
54459 +++ linux-3.0.3/include/linux/vmalloc.h 2011-08-23 21:47:56.000000000 -0400
54460 @@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
54461 #define VM_MAP 0x00000004 /* vmap()ed pages */
54462 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
54463 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
54464 +
54465 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
54466 +#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
54467 +#endif
54468 +
54469 /* bits [20..32] reserved for arch specific ioremap internals */
54470
54471 /*
54472 @@ -155,4 +160,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
54473 # endif
54474 #endif
54475
54476 +#define vmalloc(x) \
54477 +({ \
54478 + void *___retval; \
54479 + intoverflow_t ___x = (intoverflow_t)x; \
54480 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
54481 + ___retval = NULL; \
54482 + else \
54483 + ___retval = vmalloc((unsigned long)___x); \
54484 + ___retval; \
54485 +})
54486 +
54487 +#define vzalloc(x) \
54488 +({ \
54489 + void *___retval; \
54490 + intoverflow_t ___x = (intoverflow_t)x; \
54491 + if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
54492 + ___retval = NULL; \
54493 + else \
54494 + ___retval = vzalloc((unsigned long)___x); \
54495 + ___retval; \
54496 +})
54497 +
54498 +#define __vmalloc(x, y, z) \
54499 +({ \
54500 + void *___retval; \
54501 + intoverflow_t ___x = (intoverflow_t)x; \
54502 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
54503 + ___retval = NULL; \
54504 + else \
54505 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
54506 + ___retval; \
54507 +})
54508 +
54509 +#define vmalloc_user(x) \
54510 +({ \
54511 + void *___retval; \
54512 + intoverflow_t ___x = (intoverflow_t)x; \
54513 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
54514 + ___retval = NULL; \
54515 + else \
54516 + ___retval = vmalloc_user((unsigned long)___x); \
54517 + ___retval; \
54518 +})
54519 +
54520 +#define vmalloc_exec(x) \
54521 +({ \
54522 + void *___retval; \
54523 + intoverflow_t ___x = (intoverflow_t)x; \
54524 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
54525 + ___retval = NULL; \
54526 + else \
54527 + ___retval = vmalloc_exec((unsigned long)___x); \
54528 + ___retval; \
54529 +})
54530 +
54531 +#define vmalloc_node(x, y) \
54532 +({ \
54533 + void *___retval; \
54534 + intoverflow_t ___x = (intoverflow_t)x; \
54535 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
54536 + ___retval = NULL; \
54537 + else \
54538 + ___retval = vmalloc_node((unsigned long)___x, (y));\
54539 + ___retval; \
54540 +})
54541 +
54542 +#define vzalloc_node(x, y) \
54543 +({ \
54544 + void *___retval; \
54545 + intoverflow_t ___x = (intoverflow_t)x; \
54546 + if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
54547 + ___retval = NULL; \
54548 + else \
54549 + ___retval = vzalloc_node((unsigned long)___x, (y));\
54550 + ___retval; \
54551 +})
54552 +
54553 +#define vmalloc_32(x) \
54554 +({ \
54555 + void *___retval; \
54556 + intoverflow_t ___x = (intoverflow_t)x; \
54557 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
54558 + ___retval = NULL; \
54559 + else \
54560 + ___retval = vmalloc_32((unsigned long)___x); \
54561 + ___retval; \
54562 +})
54563 +
54564 +#define vmalloc_32_user(x) \
54565 +({ \
54566 +void *___retval; \
54567 + intoverflow_t ___x = (intoverflow_t)x; \
54568 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
54569 + ___retval = NULL; \
54570 + else \
54571 + ___retval = vmalloc_32_user((unsigned long)___x);\
54572 + ___retval; \
54573 +})
54574 +
54575 #endif /* _LINUX_VMALLOC_H */
54576 diff -urNp linux-3.0.3/include/linux/vmstat.h linux-3.0.3/include/linux/vmstat.h
54577 --- linux-3.0.3/include/linux/vmstat.h 2011-07-21 22:17:23.000000000 -0400
54578 +++ linux-3.0.3/include/linux/vmstat.h 2011-08-23 21:47:56.000000000 -0400
54579 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(in
54580 /*
54581 * Zone based page accounting with per cpu differentials.
54582 */
54583 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
54584 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
54585
54586 static inline void zone_page_state_add(long x, struct zone *zone,
54587 enum zone_stat_item item)
54588 {
54589 - atomic_long_add(x, &zone->vm_stat[item]);
54590 - atomic_long_add(x, &vm_stat[item]);
54591 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
54592 + atomic_long_add_unchecked(x, &vm_stat[item]);
54593 }
54594
54595 static inline unsigned long global_page_state(enum zone_stat_item item)
54596 {
54597 - long x = atomic_long_read(&vm_stat[item]);
54598 + long x = atomic_long_read_unchecked(&vm_stat[item]);
54599 #ifdef CONFIG_SMP
54600 if (x < 0)
54601 x = 0;
54602 @@ -109,7 +109,7 @@ static inline unsigned long global_page_
54603 static inline unsigned long zone_page_state(struct zone *zone,
54604 enum zone_stat_item item)
54605 {
54606 - long x = atomic_long_read(&zone->vm_stat[item]);
54607 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
54608 #ifdef CONFIG_SMP
54609 if (x < 0)
54610 x = 0;
54611 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_st
54612 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
54613 enum zone_stat_item item)
54614 {
54615 - long x = atomic_long_read(&zone->vm_stat[item]);
54616 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
54617
54618 #ifdef CONFIG_SMP
54619 int cpu;
54620 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state
54621
54622 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
54623 {
54624 - atomic_long_inc(&zone->vm_stat[item]);
54625 - atomic_long_inc(&vm_stat[item]);
54626 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
54627 + atomic_long_inc_unchecked(&vm_stat[item]);
54628 }
54629
54630 static inline void __inc_zone_page_state(struct page *page,
54631 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state
54632
54633 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
54634 {
54635 - atomic_long_dec(&zone->vm_stat[item]);
54636 - atomic_long_dec(&vm_stat[item]);
54637 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
54638 + atomic_long_dec_unchecked(&vm_stat[item]);
54639 }
54640
54641 static inline void __dec_zone_page_state(struct page *page,
54642 diff -urNp linux-3.0.3/include/net/caif/cfctrl.h linux-3.0.3/include/net/caif/cfctrl.h
54643 --- linux-3.0.3/include/net/caif/cfctrl.h 2011-07-21 22:17:23.000000000 -0400
54644 +++ linux-3.0.3/include/net/caif/cfctrl.h 2011-08-23 21:47:56.000000000 -0400
54645 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
54646 void (*radioset_rsp)(void);
54647 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
54648 struct cflayer *client_layer);
54649 -};
54650 +} __no_const;
54651
54652 /* Link Setup Parameters for CAIF-Links. */
54653 struct cfctrl_link_param {
54654 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
54655 struct cfctrl {
54656 struct cfsrvl serv;
54657 struct cfctrl_rsp res;
54658 - atomic_t req_seq_no;
54659 - atomic_t rsp_seq_no;
54660 + atomic_unchecked_t req_seq_no;
54661 + atomic_unchecked_t rsp_seq_no;
54662 struct list_head list;
54663 /* Protects from simultaneous access to first_req list */
54664 spinlock_t info_list_lock;
54665 diff -urNp linux-3.0.3/include/net/flow.h linux-3.0.3/include/net/flow.h
54666 --- linux-3.0.3/include/net/flow.h 2011-07-21 22:17:23.000000000 -0400
54667 +++ linux-3.0.3/include/net/flow.h 2011-08-23 21:47:56.000000000 -0400
54668 @@ -188,6 +188,6 @@ extern struct flow_cache_object *flow_ca
54669 u8 dir, flow_resolve_t resolver, void *ctx);
54670
54671 extern void flow_cache_flush(void);
54672 -extern atomic_t flow_cache_genid;
54673 +extern atomic_unchecked_t flow_cache_genid;
54674
54675 #endif
54676 diff -urNp linux-3.0.3/include/net/inetpeer.h linux-3.0.3/include/net/inetpeer.h
54677 --- linux-3.0.3/include/net/inetpeer.h 2011-07-21 22:17:23.000000000 -0400
54678 +++ linux-3.0.3/include/net/inetpeer.h 2011-08-23 21:47:56.000000000 -0400
54679 @@ -43,8 +43,8 @@ struct inet_peer {
54680 */
54681 union {
54682 struct {
54683 - atomic_t rid; /* Frag reception counter */
54684 - atomic_t ip_id_count; /* IP ID for the next packet */
54685 + atomic_unchecked_t rid; /* Frag reception counter */
54686 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
54687 __u32 tcp_ts;
54688 __u32 tcp_ts_stamp;
54689 u32 metrics[RTAX_MAX];
54690 @@ -108,7 +108,7 @@ static inline __u16 inet_getid(struct in
54691 {
54692 more++;
54693 inet_peer_refcheck(p);
54694 - return atomic_add_return(more, &p->ip_id_count) - more;
54695 + return atomic_add_return_unchecked(more, &p->ip_id_count) - more;
54696 }
54697
54698 #endif /* _NET_INETPEER_H */
54699 diff -urNp linux-3.0.3/include/net/ip_fib.h linux-3.0.3/include/net/ip_fib.h
54700 --- linux-3.0.3/include/net/ip_fib.h 2011-07-21 22:17:23.000000000 -0400
54701 +++ linux-3.0.3/include/net/ip_fib.h 2011-08-23 21:47:56.000000000 -0400
54702 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
54703
54704 #define FIB_RES_SADDR(net, res) \
54705 ((FIB_RES_NH(res).nh_saddr_genid == \
54706 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
54707 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
54708 FIB_RES_NH(res).nh_saddr : \
54709 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
54710 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
54711 diff -urNp linux-3.0.3/include/net/ip_vs.h linux-3.0.3/include/net/ip_vs.h
54712 --- linux-3.0.3/include/net/ip_vs.h 2011-07-21 22:17:23.000000000 -0400
54713 +++ linux-3.0.3/include/net/ip_vs.h 2011-08-23 21:47:56.000000000 -0400
54714 @@ -509,7 +509,7 @@ struct ip_vs_conn {
54715 struct ip_vs_conn *control; /* Master control connection */
54716 atomic_t n_control; /* Number of controlled ones */
54717 struct ip_vs_dest *dest; /* real server */
54718 - atomic_t in_pkts; /* incoming packet counter */
54719 + atomic_unchecked_t in_pkts; /* incoming packet counter */
54720
54721 /* packet transmitter for different forwarding methods. If it
54722 mangles the packet, it must return NF_DROP or better NF_STOLEN,
54723 @@ -647,7 +647,7 @@ struct ip_vs_dest {
54724 __be16 port; /* port number of the server */
54725 union nf_inet_addr addr; /* IP address of the server */
54726 volatile unsigned flags; /* dest status flags */
54727 - atomic_t conn_flags; /* flags to copy to conn */
54728 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
54729 atomic_t weight; /* server weight */
54730
54731 atomic_t refcnt; /* reference counter */
54732 diff -urNp linux-3.0.3/include/net/irda/ircomm_core.h linux-3.0.3/include/net/irda/ircomm_core.h
54733 --- linux-3.0.3/include/net/irda/ircomm_core.h 2011-07-21 22:17:23.000000000 -0400
54734 +++ linux-3.0.3/include/net/irda/ircomm_core.h 2011-08-23 21:47:56.000000000 -0400
54735 @@ -51,7 +51,7 @@ typedef struct {
54736 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
54737 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
54738 struct ircomm_info *);
54739 -} call_t;
54740 +} __no_const call_t;
54741
54742 struct ircomm_cb {
54743 irda_queue_t queue;
54744 diff -urNp linux-3.0.3/include/net/irda/ircomm_tty.h linux-3.0.3/include/net/irda/ircomm_tty.h
54745 --- linux-3.0.3/include/net/irda/ircomm_tty.h 2011-07-21 22:17:23.000000000 -0400
54746 +++ linux-3.0.3/include/net/irda/ircomm_tty.h 2011-08-23 21:47:56.000000000 -0400
54747 @@ -35,6 +35,7 @@
54748 #include <linux/termios.h>
54749 #include <linux/timer.h>
54750 #include <linux/tty.h> /* struct tty_struct */
54751 +#include <asm/local.h>
54752
54753 #include <net/irda/irias_object.h>
54754 #include <net/irda/ircomm_core.h>
54755 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
54756 unsigned short close_delay;
54757 unsigned short closing_wait; /* time to wait before closing */
54758
54759 - int open_count;
54760 - int blocked_open; /* # of blocked opens */
54761 + local_t open_count;
54762 + local_t blocked_open; /* # of blocked opens */
54763
54764 /* Protect concurent access to :
54765 * o self->open_count
54766 diff -urNp linux-3.0.3/include/net/iucv/af_iucv.h linux-3.0.3/include/net/iucv/af_iucv.h
54767 --- linux-3.0.3/include/net/iucv/af_iucv.h 2011-07-21 22:17:23.000000000 -0400
54768 +++ linux-3.0.3/include/net/iucv/af_iucv.h 2011-08-23 21:47:56.000000000 -0400
54769 @@ -87,7 +87,7 @@ struct iucv_sock {
54770 struct iucv_sock_list {
54771 struct hlist_head head;
54772 rwlock_t lock;
54773 - atomic_t autobind_name;
54774 + atomic_unchecked_t autobind_name;
54775 };
54776
54777 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
54778 diff -urNp linux-3.0.3/include/net/lapb.h linux-3.0.3/include/net/lapb.h
54779 --- linux-3.0.3/include/net/lapb.h 2011-07-21 22:17:23.000000000 -0400
54780 +++ linux-3.0.3/include/net/lapb.h 2011-08-23 21:47:56.000000000 -0400
54781 @@ -95,7 +95,7 @@ struct lapb_cb {
54782 struct sk_buff_head write_queue;
54783 struct sk_buff_head ack_queue;
54784 unsigned char window;
54785 - struct lapb_register_struct callbacks;
54786 + struct lapb_register_struct *callbacks;
54787
54788 /* FRMR control information */
54789 struct lapb_frame frmr_data;
54790 diff -urNp linux-3.0.3/include/net/neighbour.h linux-3.0.3/include/net/neighbour.h
54791 --- linux-3.0.3/include/net/neighbour.h 2011-07-21 22:17:23.000000000 -0400
54792 +++ linux-3.0.3/include/net/neighbour.h 2011-08-23 21:47:56.000000000 -0400
54793 @@ -117,7 +117,7 @@ struct neighbour {
54794 };
54795
54796 struct neigh_ops {
54797 - int family;
54798 + const int family;
54799 void (*solicit)(struct neighbour *, struct sk_buff*);
54800 void (*error_report)(struct neighbour *, struct sk_buff*);
54801 int (*output)(struct sk_buff*);
54802 diff -urNp linux-3.0.3/include/net/netlink.h linux-3.0.3/include/net/netlink.h
54803 --- linux-3.0.3/include/net/netlink.h 2011-07-21 22:17:23.000000000 -0400
54804 +++ linux-3.0.3/include/net/netlink.h 2011-08-23 21:47:56.000000000 -0400
54805 @@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
54806 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
54807 {
54808 if (mark)
54809 - skb_trim(skb, (unsigned char *) mark - skb->data);
54810 + skb_trim(skb, (const unsigned char *) mark - skb->data);
54811 }
54812
54813 /**
54814 diff -urNp linux-3.0.3/include/net/netns/ipv4.h linux-3.0.3/include/net/netns/ipv4.h
54815 --- linux-3.0.3/include/net/netns/ipv4.h 2011-07-21 22:17:23.000000000 -0400
54816 +++ linux-3.0.3/include/net/netns/ipv4.h 2011-08-23 21:47:56.000000000 -0400
54817 @@ -56,8 +56,8 @@ struct netns_ipv4 {
54818
54819 unsigned int sysctl_ping_group_range[2];
54820
54821 - atomic_t rt_genid;
54822 - atomic_t dev_addr_genid;
54823 + atomic_unchecked_t rt_genid;
54824 + atomic_unchecked_t dev_addr_genid;
54825
54826 #ifdef CONFIG_IP_MROUTE
54827 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
54828 diff -urNp linux-3.0.3/include/net/sctp/sctp.h linux-3.0.3/include/net/sctp/sctp.h
54829 --- linux-3.0.3/include/net/sctp/sctp.h 2011-07-21 22:17:23.000000000 -0400
54830 +++ linux-3.0.3/include/net/sctp/sctp.h 2011-08-23 21:47:56.000000000 -0400
54831 @@ -315,9 +315,9 @@ do { \
54832
54833 #else /* SCTP_DEBUG */
54834
54835 -#define SCTP_DEBUG_PRINTK(whatever...)
54836 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
54837 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
54838 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
54839 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
54840 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
54841 #define SCTP_ENABLE_DEBUG
54842 #define SCTP_DISABLE_DEBUG
54843 #define SCTP_ASSERT(expr, str, func)
54844 diff -urNp linux-3.0.3/include/net/sock.h linux-3.0.3/include/net/sock.h
54845 --- linux-3.0.3/include/net/sock.h 2011-07-21 22:17:23.000000000 -0400
54846 +++ linux-3.0.3/include/net/sock.h 2011-08-23 21:47:56.000000000 -0400
54847 @@ -277,7 +277,7 @@ struct sock {
54848 #ifdef CONFIG_RPS
54849 __u32 sk_rxhash;
54850 #endif
54851 - atomic_t sk_drops;
54852 + atomic_unchecked_t sk_drops;
54853 int sk_rcvbuf;
54854
54855 struct sk_filter __rcu *sk_filter;
54856 @@ -1390,7 +1390,7 @@ static inline void sk_nocaps_add(struct
54857 }
54858
54859 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
54860 - char __user *from, char *to,
54861 + char __user *from, unsigned char *to,
54862 int copy, int offset)
54863 {
54864 if (skb->ip_summed == CHECKSUM_NONE) {
54865 diff -urNp linux-3.0.3/include/net/tcp.h linux-3.0.3/include/net/tcp.h
54866 --- linux-3.0.3/include/net/tcp.h 2011-07-21 22:17:23.000000000 -0400
54867 +++ linux-3.0.3/include/net/tcp.h 2011-08-23 21:47:56.000000000 -0400
54868 @@ -1374,8 +1374,8 @@ enum tcp_seq_states {
54869 struct tcp_seq_afinfo {
54870 char *name;
54871 sa_family_t family;
54872 - struct file_operations seq_fops;
54873 - struct seq_operations seq_ops;
54874 + file_operations_no_const seq_fops;
54875 + seq_operations_no_const seq_ops;
54876 };
54877
54878 struct tcp_iter_state {
54879 diff -urNp linux-3.0.3/include/net/udp.h linux-3.0.3/include/net/udp.h
54880 --- linux-3.0.3/include/net/udp.h 2011-07-21 22:17:23.000000000 -0400
54881 +++ linux-3.0.3/include/net/udp.h 2011-08-23 21:47:56.000000000 -0400
54882 @@ -234,8 +234,8 @@ struct udp_seq_afinfo {
54883 char *name;
54884 sa_family_t family;
54885 struct udp_table *udp_table;
54886 - struct file_operations seq_fops;
54887 - struct seq_operations seq_ops;
54888 + file_operations_no_const seq_fops;
54889 + seq_operations_no_const seq_ops;
54890 };
54891
54892 struct udp_iter_state {
54893 diff -urNp linux-3.0.3/include/net/xfrm.h linux-3.0.3/include/net/xfrm.h
54894 --- linux-3.0.3/include/net/xfrm.h 2011-07-21 22:17:23.000000000 -0400
54895 +++ linux-3.0.3/include/net/xfrm.h 2011-08-23 21:47:56.000000000 -0400
54896 @@ -505,7 +505,7 @@ struct xfrm_policy {
54897 struct timer_list timer;
54898
54899 struct flow_cache_object flo;
54900 - atomic_t genid;
54901 + atomic_unchecked_t genid;
54902 u32 priority;
54903 u32 index;
54904 struct xfrm_mark mark;
54905 diff -urNp linux-3.0.3/include/rdma/iw_cm.h linux-3.0.3/include/rdma/iw_cm.h
54906 --- linux-3.0.3/include/rdma/iw_cm.h 2011-07-21 22:17:23.000000000 -0400
54907 +++ linux-3.0.3/include/rdma/iw_cm.h 2011-08-23 21:47:56.000000000 -0400
54908 @@ -120,7 +120,7 @@ struct iw_cm_verbs {
54909 int backlog);
54910
54911 int (*destroy_listen)(struct iw_cm_id *cm_id);
54912 -};
54913 +} __no_const;
54914
54915 /**
54916 * iw_create_cm_id - Create an IW CM identifier.
54917 diff -urNp linux-3.0.3/include/scsi/libfc.h linux-3.0.3/include/scsi/libfc.h
54918 --- linux-3.0.3/include/scsi/libfc.h 2011-07-21 22:17:23.000000000 -0400
54919 +++ linux-3.0.3/include/scsi/libfc.h 2011-08-23 21:47:56.000000000 -0400
54920 @@ -750,6 +750,7 @@ struct libfc_function_template {
54921 */
54922 void (*disc_stop_final) (struct fc_lport *);
54923 };
54924 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
54925
54926 /**
54927 * struct fc_disc - Discovery context
54928 @@ -853,7 +854,7 @@ struct fc_lport {
54929 struct fc_vport *vport;
54930
54931 /* Operational Information */
54932 - struct libfc_function_template tt;
54933 + libfc_function_template_no_const tt;
54934 u8 link_up;
54935 u8 qfull;
54936 enum fc_lport_state state;
54937 diff -urNp linux-3.0.3/include/scsi/scsi_device.h linux-3.0.3/include/scsi/scsi_device.h
54938 --- linux-3.0.3/include/scsi/scsi_device.h 2011-07-21 22:17:23.000000000 -0400
54939 +++ linux-3.0.3/include/scsi/scsi_device.h 2011-08-23 21:47:56.000000000 -0400
54940 @@ -161,9 +161,9 @@ struct scsi_device {
54941 unsigned int max_device_blocked; /* what device_blocked counts down from */
54942 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
54943
54944 - atomic_t iorequest_cnt;
54945 - atomic_t iodone_cnt;
54946 - atomic_t ioerr_cnt;
54947 + atomic_unchecked_t iorequest_cnt;
54948 + atomic_unchecked_t iodone_cnt;
54949 + atomic_unchecked_t ioerr_cnt;
54950
54951 struct device sdev_gendev,
54952 sdev_dev;
54953 diff -urNp linux-3.0.3/include/scsi/scsi_transport_fc.h linux-3.0.3/include/scsi/scsi_transport_fc.h
54954 --- linux-3.0.3/include/scsi/scsi_transport_fc.h 2011-07-21 22:17:23.000000000 -0400
54955 +++ linux-3.0.3/include/scsi/scsi_transport_fc.h 2011-08-23 21:47:56.000000000 -0400
54956 @@ -666,9 +666,9 @@ struct fc_function_template {
54957 int (*bsg_timeout)(struct fc_bsg_job *);
54958
54959 /* allocation lengths for host-specific data */
54960 - u32 dd_fcrport_size;
54961 - u32 dd_fcvport_size;
54962 - u32 dd_bsg_size;
54963 + const u32 dd_fcrport_size;
54964 + const u32 dd_fcvport_size;
54965 + const u32 dd_bsg_size;
54966
54967 /*
54968 * The driver sets these to tell the transport class it
54969 @@ -678,39 +678,39 @@ struct fc_function_template {
54970 */
54971
54972 /* remote port fixed attributes */
54973 - unsigned long show_rport_maxframe_size:1;
54974 - unsigned long show_rport_supported_classes:1;
54975 - unsigned long show_rport_dev_loss_tmo:1;
54976 + const unsigned long show_rport_maxframe_size:1;
54977 + const unsigned long show_rport_supported_classes:1;
54978 + const unsigned long show_rport_dev_loss_tmo:1;
54979
54980 /*
54981 * target dynamic attributes
54982 * These should all be "1" if the driver uses the remote port
54983 * add/delete functions (so attributes reflect rport values).
54984 */
54985 - unsigned long show_starget_node_name:1;
54986 - unsigned long show_starget_port_name:1;
54987 - unsigned long show_starget_port_id:1;
54988 + const unsigned long show_starget_node_name:1;
54989 + const unsigned long show_starget_port_name:1;
54990 + const unsigned long show_starget_port_id:1;
54991
54992 /* host fixed attributes */
54993 - unsigned long show_host_node_name:1;
54994 - unsigned long show_host_port_name:1;
54995 - unsigned long show_host_permanent_port_name:1;
54996 - unsigned long show_host_supported_classes:1;
54997 - unsigned long show_host_supported_fc4s:1;
54998 - unsigned long show_host_supported_speeds:1;
54999 - unsigned long show_host_maxframe_size:1;
55000 - unsigned long show_host_serial_number:1;
55001 + const unsigned long show_host_node_name:1;
55002 + const unsigned long show_host_port_name:1;
55003 + const unsigned long show_host_permanent_port_name:1;
55004 + const unsigned long show_host_supported_classes:1;
55005 + const unsigned long show_host_supported_fc4s:1;
55006 + const unsigned long show_host_supported_speeds:1;
55007 + const unsigned long show_host_maxframe_size:1;
55008 + const unsigned long show_host_serial_number:1;
55009 /* host dynamic attributes */
55010 - unsigned long show_host_port_id:1;
55011 - unsigned long show_host_port_type:1;
55012 - unsigned long show_host_port_state:1;
55013 - unsigned long show_host_active_fc4s:1;
55014 - unsigned long show_host_speed:1;
55015 - unsigned long show_host_fabric_name:1;
55016 - unsigned long show_host_symbolic_name:1;
55017 - unsigned long show_host_system_hostname:1;
55018 + const unsigned long show_host_port_id:1;
55019 + const unsigned long show_host_port_type:1;
55020 + const unsigned long show_host_port_state:1;
55021 + const unsigned long show_host_active_fc4s:1;
55022 + const unsigned long show_host_speed:1;
55023 + const unsigned long show_host_fabric_name:1;
55024 + const unsigned long show_host_symbolic_name:1;
55025 + const unsigned long show_host_system_hostname:1;
55026
55027 - unsigned long disable_target_scan:1;
55028 + const unsigned long disable_target_scan:1;
55029 };
55030
55031
55032 diff -urNp linux-3.0.3/include/sound/ak4xxx-adda.h linux-3.0.3/include/sound/ak4xxx-adda.h
55033 --- linux-3.0.3/include/sound/ak4xxx-adda.h 2011-07-21 22:17:23.000000000 -0400
55034 +++ linux-3.0.3/include/sound/ak4xxx-adda.h 2011-08-23 21:47:56.000000000 -0400
55035 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
55036 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
55037 unsigned char val);
55038 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
55039 -};
55040 +} __no_const;
55041
55042 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
55043
55044 diff -urNp linux-3.0.3/include/sound/hwdep.h linux-3.0.3/include/sound/hwdep.h
55045 --- linux-3.0.3/include/sound/hwdep.h 2011-07-21 22:17:23.000000000 -0400
55046 +++ linux-3.0.3/include/sound/hwdep.h 2011-08-23 21:47:56.000000000 -0400
55047 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
55048 struct snd_hwdep_dsp_status *status);
55049 int (*dsp_load)(struct snd_hwdep *hw,
55050 struct snd_hwdep_dsp_image *image);
55051 -};
55052 +} __no_const;
55053
55054 struct snd_hwdep {
55055 struct snd_card *card;
55056 diff -urNp linux-3.0.3/include/sound/info.h linux-3.0.3/include/sound/info.h
55057 --- linux-3.0.3/include/sound/info.h 2011-07-21 22:17:23.000000000 -0400
55058 +++ linux-3.0.3/include/sound/info.h 2011-08-23 21:47:56.000000000 -0400
55059 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
55060 struct snd_info_buffer *buffer);
55061 void (*write)(struct snd_info_entry *entry,
55062 struct snd_info_buffer *buffer);
55063 -};
55064 +} __no_const;
55065
55066 struct snd_info_entry_ops {
55067 int (*open)(struct snd_info_entry *entry,
55068 diff -urNp linux-3.0.3/include/sound/pcm.h linux-3.0.3/include/sound/pcm.h
55069 --- linux-3.0.3/include/sound/pcm.h 2011-07-21 22:17:23.000000000 -0400
55070 +++ linux-3.0.3/include/sound/pcm.h 2011-08-23 21:47:56.000000000 -0400
55071 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
55072 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
55073 int (*ack)(struct snd_pcm_substream *substream);
55074 };
55075 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
55076
55077 /*
55078 *
55079 diff -urNp linux-3.0.3/include/sound/sb16_csp.h linux-3.0.3/include/sound/sb16_csp.h
55080 --- linux-3.0.3/include/sound/sb16_csp.h 2011-07-21 22:17:23.000000000 -0400
55081 +++ linux-3.0.3/include/sound/sb16_csp.h 2011-08-23 21:47:56.000000000 -0400
55082 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
55083 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
55084 int (*csp_stop) (struct snd_sb_csp * p);
55085 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
55086 -};
55087 +} __no_const;
55088
55089 /*
55090 * CSP private data
55091 diff -urNp linux-3.0.3/include/sound/soc.h linux-3.0.3/include/sound/soc.h
55092 --- linux-3.0.3/include/sound/soc.h 2011-07-21 22:17:23.000000000 -0400
55093 +++ linux-3.0.3/include/sound/soc.h 2011-08-23 21:47:56.000000000 -0400
55094 @@ -635,7 +635,7 @@ struct snd_soc_platform_driver {
55095 struct snd_soc_dai *);
55096
55097 /* platform stream ops */
55098 - struct snd_pcm_ops *ops;
55099 + struct snd_pcm_ops * const ops;
55100 };
55101
55102 struct snd_soc_platform {
55103 diff -urNp linux-3.0.3/include/sound/ymfpci.h linux-3.0.3/include/sound/ymfpci.h
55104 --- linux-3.0.3/include/sound/ymfpci.h 2011-07-21 22:17:23.000000000 -0400
55105 +++ linux-3.0.3/include/sound/ymfpci.h 2011-08-23 21:47:56.000000000 -0400
55106 @@ -358,7 +358,7 @@ struct snd_ymfpci {
55107 spinlock_t reg_lock;
55108 spinlock_t voice_lock;
55109 wait_queue_head_t interrupt_sleep;
55110 - atomic_t interrupt_sleep_count;
55111 + atomic_unchecked_t interrupt_sleep_count;
55112 struct snd_info_entry *proc_entry;
55113 const struct firmware *dsp_microcode;
55114 const struct firmware *controller_microcode;
55115 diff -urNp linux-3.0.3/include/target/target_core_base.h linux-3.0.3/include/target/target_core_base.h
55116 --- linux-3.0.3/include/target/target_core_base.h 2011-07-21 22:17:23.000000000 -0400
55117 +++ linux-3.0.3/include/target/target_core_base.h 2011-08-23 21:47:56.000000000 -0400
55118 @@ -364,7 +364,7 @@ struct t10_reservation_ops {
55119 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
55120 int (*t10_pr_register)(struct se_cmd *);
55121 int (*t10_pr_clear)(struct se_cmd *);
55122 -};
55123 +} __no_const;
55124
55125 struct t10_reservation_template {
55126 /* Reservation effects all target ports */
55127 @@ -432,8 +432,8 @@ struct se_transport_task {
55128 atomic_t t_task_cdbs_left;
55129 atomic_t t_task_cdbs_ex_left;
55130 atomic_t t_task_cdbs_timeout_left;
55131 - atomic_t t_task_cdbs_sent;
55132 - atomic_t t_transport_aborted;
55133 + atomic_unchecked_t t_task_cdbs_sent;
55134 + atomic_unchecked_t t_transport_aborted;
55135 atomic_t t_transport_active;
55136 atomic_t t_transport_complete;
55137 atomic_t t_transport_queue_active;
55138 @@ -774,7 +774,7 @@ struct se_device {
55139 atomic_t active_cmds;
55140 atomic_t simple_cmds;
55141 atomic_t depth_left;
55142 - atomic_t dev_ordered_id;
55143 + atomic_unchecked_t dev_ordered_id;
55144 atomic_t dev_tur_active;
55145 atomic_t execute_tasks;
55146 atomic_t dev_status_thr_count;
55147 diff -urNp linux-3.0.3/include/trace/events/irq.h linux-3.0.3/include/trace/events/irq.h
55148 --- linux-3.0.3/include/trace/events/irq.h 2011-07-21 22:17:23.000000000 -0400
55149 +++ linux-3.0.3/include/trace/events/irq.h 2011-08-23 21:47:56.000000000 -0400
55150 @@ -36,7 +36,7 @@ struct softirq_action;
55151 */
55152 TRACE_EVENT(irq_handler_entry,
55153
55154 - TP_PROTO(int irq, struct irqaction *action),
55155 + TP_PROTO(int irq, const struct irqaction *action),
55156
55157 TP_ARGS(irq, action),
55158
55159 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
55160 */
55161 TRACE_EVENT(irq_handler_exit,
55162
55163 - TP_PROTO(int irq, struct irqaction *action, int ret),
55164 + TP_PROTO(int irq, const struct irqaction *action, int ret),
55165
55166 TP_ARGS(irq, action, ret),
55167
55168 diff -urNp linux-3.0.3/include/video/udlfb.h linux-3.0.3/include/video/udlfb.h
55169 --- linux-3.0.3/include/video/udlfb.h 2011-07-21 22:17:23.000000000 -0400
55170 +++ linux-3.0.3/include/video/udlfb.h 2011-08-23 21:47:56.000000000 -0400
55171 @@ -51,10 +51,10 @@ struct dlfb_data {
55172 int base8;
55173 u32 pseudo_palette[256];
55174 /* blit-only rendering path metrics, exposed through sysfs */
55175 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
55176 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
55177 - atomic_t bytes_sent; /* to usb, after compression including overhead */
55178 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
55179 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
55180 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
55181 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
55182 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
55183 };
55184
55185 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
55186 diff -urNp linux-3.0.3/include/video/uvesafb.h linux-3.0.3/include/video/uvesafb.h
55187 --- linux-3.0.3/include/video/uvesafb.h 2011-07-21 22:17:23.000000000 -0400
55188 +++ linux-3.0.3/include/video/uvesafb.h 2011-08-23 21:47:56.000000000 -0400
55189 @@ -177,6 +177,7 @@ struct uvesafb_par {
55190 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
55191 u8 pmi_setpal; /* PMI for palette changes */
55192 u16 *pmi_base; /* protected mode interface location */
55193 + u8 *pmi_code; /* protected mode code location */
55194 void *pmi_start;
55195 void *pmi_pal;
55196 u8 *vbe_state_orig; /*
55197 diff -urNp linux-3.0.3/init/do_mounts.c linux-3.0.3/init/do_mounts.c
55198 --- linux-3.0.3/init/do_mounts.c 2011-07-21 22:17:23.000000000 -0400
55199 +++ linux-3.0.3/init/do_mounts.c 2011-08-23 21:47:56.000000000 -0400
55200 @@ -287,7 +287,7 @@ static void __init get_fs_names(char *pa
55201
55202 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
55203 {
55204 - int err = sys_mount(name, "/root", fs, flags, data);
55205 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
55206 if (err)
55207 return err;
55208
55209 @@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
55210 va_start(args, fmt);
55211 vsprintf(buf, fmt, args);
55212 va_end(args);
55213 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
55214 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
55215 if (fd >= 0) {
55216 sys_ioctl(fd, FDEJECT, 0);
55217 sys_close(fd);
55218 }
55219 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
55220 - fd = sys_open("/dev/console", O_RDWR, 0);
55221 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
55222 if (fd >= 0) {
55223 sys_ioctl(fd, TCGETS, (long)&termios);
55224 termios.c_lflag &= ~ICANON;
55225 sys_ioctl(fd, TCSETSF, (long)&termios);
55226 - sys_read(fd, &c, 1);
55227 + sys_read(fd, (char __user *)&c, 1);
55228 termios.c_lflag |= ICANON;
55229 sys_ioctl(fd, TCSETSF, (long)&termios);
55230 sys_close(fd);
55231 @@ -488,6 +488,6 @@ void __init prepare_namespace(void)
55232 mount_root();
55233 out:
55234 devtmpfs_mount("dev");
55235 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
55236 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
55237 sys_chroot((const char __user __force *)".");
55238 }
55239 diff -urNp linux-3.0.3/init/do_mounts.h linux-3.0.3/init/do_mounts.h
55240 --- linux-3.0.3/init/do_mounts.h 2011-07-21 22:17:23.000000000 -0400
55241 +++ linux-3.0.3/init/do_mounts.h 2011-08-23 21:47:56.000000000 -0400
55242 @@ -15,15 +15,15 @@ extern int root_mountflags;
55243
55244 static inline int create_dev(char *name, dev_t dev)
55245 {
55246 - sys_unlink(name);
55247 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
55248 + sys_unlink((__force char __user *)name);
55249 + return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
55250 }
55251
55252 #if BITS_PER_LONG == 32
55253 static inline u32 bstat(char *name)
55254 {
55255 struct stat64 stat;
55256 - if (sys_stat64(name, &stat) != 0)
55257 + if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
55258 return 0;
55259 if (!S_ISBLK(stat.st_mode))
55260 return 0;
55261 diff -urNp linux-3.0.3/init/do_mounts_initrd.c linux-3.0.3/init/do_mounts_initrd.c
55262 --- linux-3.0.3/init/do_mounts_initrd.c 2011-07-21 22:17:23.000000000 -0400
55263 +++ linux-3.0.3/init/do_mounts_initrd.c 2011-08-23 21:47:56.000000000 -0400
55264 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
55265 create_dev("/dev/root.old", Root_RAM0);
55266 /* mount initrd on rootfs' /root */
55267 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
55268 - sys_mkdir("/old", 0700);
55269 - root_fd = sys_open("/", 0, 0);
55270 - old_fd = sys_open("/old", 0, 0);
55271 + sys_mkdir((__force const char __user *)"/old", 0700);
55272 + root_fd = sys_open((__force const char __user *)"/", 0, 0);
55273 + old_fd = sys_open((__force const char __user *)"/old", 0, 0);
55274 /* move initrd over / and chdir/chroot in initrd root */
55275 - sys_chdir("/root");
55276 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
55277 - sys_chroot(".");
55278 + sys_chdir((__force const char __user *)"/root");
55279 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
55280 + sys_chroot((__force const char __user *)".");
55281
55282 /*
55283 * In case that a resume from disk is carried out by linuxrc or one of
55284 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
55285
55286 /* move initrd to rootfs' /old */
55287 sys_fchdir(old_fd);
55288 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
55289 + sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
55290 /* switch root and cwd back to / of rootfs */
55291 sys_fchdir(root_fd);
55292 - sys_chroot(".");
55293 + sys_chroot((__force const char __user *)".");
55294 sys_close(old_fd);
55295 sys_close(root_fd);
55296
55297 if (new_decode_dev(real_root_dev) == Root_RAM0) {
55298 - sys_chdir("/old");
55299 + sys_chdir((__force const char __user *)"/old");
55300 return;
55301 }
55302
55303 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
55304 mount_root();
55305
55306 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
55307 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
55308 + error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
55309 if (!error)
55310 printk("okay\n");
55311 else {
55312 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
55313 + int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
55314 if (error == -ENOENT)
55315 printk("/initrd does not exist. Ignored.\n");
55316 else
55317 printk("failed\n");
55318 printk(KERN_NOTICE "Unmounting old root\n");
55319 - sys_umount("/old", MNT_DETACH);
55320 + sys_umount((__force char __user *)"/old", MNT_DETACH);
55321 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
55322 if (fd < 0) {
55323 error = fd;
55324 @@ -116,11 +116,11 @@ int __init initrd_load(void)
55325 * mounted in the normal path.
55326 */
55327 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
55328 - sys_unlink("/initrd.image");
55329 + sys_unlink((__force const char __user *)"/initrd.image");
55330 handle_initrd();
55331 return 1;
55332 }
55333 }
55334 - sys_unlink("/initrd.image");
55335 + sys_unlink((__force const char __user *)"/initrd.image");
55336 return 0;
55337 }
55338 diff -urNp linux-3.0.3/init/do_mounts_md.c linux-3.0.3/init/do_mounts_md.c
55339 --- linux-3.0.3/init/do_mounts_md.c 2011-07-21 22:17:23.000000000 -0400
55340 +++ linux-3.0.3/init/do_mounts_md.c 2011-08-23 21:47:56.000000000 -0400
55341 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
55342 partitioned ? "_d" : "", minor,
55343 md_setup_args[ent].device_names);
55344
55345 - fd = sys_open(name, 0, 0);
55346 + fd = sys_open((__force char __user *)name, 0, 0);
55347 if (fd < 0) {
55348 printk(KERN_ERR "md: open failed - cannot start "
55349 "array %s\n", name);
55350 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
55351 * array without it
55352 */
55353 sys_close(fd);
55354 - fd = sys_open(name, 0, 0);
55355 + fd = sys_open((__force char __user *)name, 0, 0);
55356 sys_ioctl(fd, BLKRRPART, 0);
55357 }
55358 sys_close(fd);
55359 diff -urNp linux-3.0.3/init/initramfs.c linux-3.0.3/init/initramfs.c
55360 --- linux-3.0.3/init/initramfs.c 2011-07-21 22:17:23.000000000 -0400
55361 +++ linux-3.0.3/init/initramfs.c 2011-08-23 21:47:56.000000000 -0400
55362 @@ -74,7 +74,7 @@ static void __init free_hash(void)
55363 }
55364 }
55365
55366 -static long __init do_utime(char __user *filename, time_t mtime)
55367 +static long __init do_utime(__force char __user *filename, time_t mtime)
55368 {
55369 struct timespec t[2];
55370
55371 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
55372 struct dir_entry *de, *tmp;
55373 list_for_each_entry_safe(de, tmp, &dir_list, list) {
55374 list_del(&de->list);
55375 - do_utime(de->name, de->mtime);
55376 + do_utime((__force char __user *)de->name, de->mtime);
55377 kfree(de->name);
55378 kfree(de);
55379 }
55380 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
55381 if (nlink >= 2) {
55382 char *old = find_link(major, minor, ino, mode, collected);
55383 if (old)
55384 - return (sys_link(old, collected) < 0) ? -1 : 1;
55385 + return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
55386 }
55387 return 0;
55388 }
55389 @@ -280,11 +280,11 @@ static void __init clean_path(char *path
55390 {
55391 struct stat st;
55392
55393 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
55394 + if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
55395 if (S_ISDIR(st.st_mode))
55396 - sys_rmdir(path);
55397 + sys_rmdir((__force char __user *)path);
55398 else
55399 - sys_unlink(path);
55400 + sys_unlink((__force char __user *)path);
55401 }
55402 }
55403
55404 @@ -305,7 +305,7 @@ static int __init do_name(void)
55405 int openflags = O_WRONLY|O_CREAT;
55406 if (ml != 1)
55407 openflags |= O_TRUNC;
55408 - wfd = sys_open(collected, openflags, mode);
55409 + wfd = sys_open((__force char __user *)collected, openflags, mode);
55410
55411 if (wfd >= 0) {
55412 sys_fchown(wfd, uid, gid);
55413 @@ -317,17 +317,17 @@ static int __init do_name(void)
55414 }
55415 }
55416 } else if (S_ISDIR(mode)) {
55417 - sys_mkdir(collected, mode);
55418 - sys_chown(collected, uid, gid);
55419 - sys_chmod(collected, mode);
55420 + sys_mkdir((__force char __user *)collected, mode);
55421 + sys_chown((__force char __user *)collected, uid, gid);
55422 + sys_chmod((__force char __user *)collected, mode);
55423 dir_add(collected, mtime);
55424 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
55425 S_ISFIFO(mode) || S_ISSOCK(mode)) {
55426 if (maybe_link() == 0) {
55427 - sys_mknod(collected, mode, rdev);
55428 - sys_chown(collected, uid, gid);
55429 - sys_chmod(collected, mode);
55430 - do_utime(collected, mtime);
55431 + sys_mknod((__force char __user *)collected, mode, rdev);
55432 + sys_chown((__force char __user *)collected, uid, gid);
55433 + sys_chmod((__force char __user *)collected, mode);
55434 + do_utime((__force char __user *)collected, mtime);
55435 }
55436 }
55437 return 0;
55438 @@ -336,15 +336,15 @@ static int __init do_name(void)
55439 static int __init do_copy(void)
55440 {
55441 if (count >= body_len) {
55442 - sys_write(wfd, victim, body_len);
55443 + sys_write(wfd, (__force char __user *)victim, body_len);
55444 sys_close(wfd);
55445 - do_utime(vcollected, mtime);
55446 + do_utime((__force char __user *)vcollected, mtime);
55447 kfree(vcollected);
55448 eat(body_len);
55449 state = SkipIt;
55450 return 0;
55451 } else {
55452 - sys_write(wfd, victim, count);
55453 + sys_write(wfd, (__force char __user *)victim, count);
55454 body_len -= count;
55455 eat(count);
55456 return 1;
55457 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
55458 {
55459 collected[N_ALIGN(name_len) + body_len] = '\0';
55460 clean_path(collected, 0);
55461 - sys_symlink(collected + N_ALIGN(name_len), collected);
55462 - sys_lchown(collected, uid, gid);
55463 - do_utime(collected, mtime);
55464 + sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
55465 + sys_lchown((__force char __user *)collected, uid, gid);
55466 + do_utime((__force char __user *)collected, mtime);
55467 state = SkipIt;
55468 next_state = Reset;
55469 return 0;
55470 diff -urNp linux-3.0.3/init/Kconfig linux-3.0.3/init/Kconfig
55471 --- linux-3.0.3/init/Kconfig 2011-07-21 22:17:23.000000000 -0400
55472 +++ linux-3.0.3/init/Kconfig 2011-08-23 21:47:56.000000000 -0400
55473 @@ -1195,7 +1195,7 @@ config SLUB_DEBUG
55474
55475 config COMPAT_BRK
55476 bool "Disable heap randomization"
55477 - default y
55478 + default n
55479 help
55480 Randomizing heap placement makes heap exploits harder, but it
55481 also breaks ancient binaries (including anything libc5 based).
55482 diff -urNp linux-3.0.3/init/main.c linux-3.0.3/init/main.c
55483 --- linux-3.0.3/init/main.c 2011-07-21 22:17:23.000000000 -0400
55484 +++ linux-3.0.3/init/main.c 2011-08-23 21:48:14.000000000 -0400
55485 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
55486 extern void tc_init(void);
55487 #endif
55488
55489 +extern void grsecurity_init(void);
55490 +
55491 /*
55492 * Debug helper: via this flag we know that we are in 'early bootup code'
55493 * where only the boot processor is running with IRQ disabled. This means
55494 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char
55495
55496 __setup("reset_devices", set_reset_devices);
55497
55498 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
55499 +extern char pax_enter_kernel_user[];
55500 +extern char pax_exit_kernel_user[];
55501 +extern pgdval_t clone_pgd_mask;
55502 +#endif
55503 +
55504 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
55505 +static int __init setup_pax_nouderef(char *str)
55506 +{
55507 +#ifdef CONFIG_X86_32
55508 + unsigned int cpu;
55509 + struct desc_struct *gdt;
55510 +
55511 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
55512 + gdt = get_cpu_gdt_table(cpu);
55513 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
55514 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
55515 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
55516 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
55517 + }
55518 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
55519 +#else
55520 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
55521 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
55522 + clone_pgd_mask = ~(pgdval_t)0UL;
55523 +#endif
55524 +
55525 + return 0;
55526 +}
55527 +early_param("pax_nouderef", setup_pax_nouderef);
55528 +#endif
55529 +
55530 +#ifdef CONFIG_PAX_SOFTMODE
55531 +int pax_softmode;
55532 +
55533 +static int __init setup_pax_softmode(char *str)
55534 +{
55535 + get_option(&str, &pax_softmode);
55536 + return 1;
55537 +}
55538 +__setup("pax_softmode=", setup_pax_softmode);
55539 +#endif
55540 +
55541 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
55542 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
55543 static const char *panic_later, *panic_param;
55544 @@ -667,6 +712,7 @@ int __init_or_module do_one_initcall(ini
55545 {
55546 int count = preempt_count();
55547 int ret;
55548 + const char *msg1 = "", *msg2 = "";
55549
55550 if (initcall_debug)
55551 ret = do_one_initcall_debug(fn);
55552 @@ -679,15 +725,15 @@ int __init_or_module do_one_initcall(ini
55553 sprintf(msgbuf, "error code %d ", ret);
55554
55555 if (preempt_count() != count) {
55556 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
55557 + msg1 = " preemption imbalance";
55558 preempt_count() = count;
55559 }
55560 if (irqs_disabled()) {
55561 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
55562 + msg2 = " disabled interrupts";
55563 local_irq_enable();
55564 }
55565 - if (msgbuf[0]) {
55566 - printk("initcall %pF returned with %s\n", fn, msgbuf);
55567 + if (msgbuf[0] || *msg1 || *msg2) {
55568 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
55569 }
55570
55571 return ret;
55572 @@ -805,7 +851,7 @@ static int __init kernel_init(void * unu
55573 do_basic_setup();
55574
55575 /* Open the /dev/console on the rootfs, this should never fail */
55576 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
55577 + if (sys_open((__force const char __user *) "/dev/console", O_RDWR, 0) < 0)
55578 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
55579
55580 (void) sys_dup(0);
55581 @@ -818,11 +864,13 @@ static int __init kernel_init(void * unu
55582 if (!ramdisk_execute_command)
55583 ramdisk_execute_command = "/init";
55584
55585 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
55586 + if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
55587 ramdisk_execute_command = NULL;
55588 prepare_namespace();
55589 }
55590
55591 + grsecurity_init();
55592 +
55593 /*
55594 * Ok, we have completed the initial bootup, and
55595 * we're essentially up and running. Get rid of the
55596 diff -urNp linux-3.0.3/ipc/mqueue.c linux-3.0.3/ipc/mqueue.c
55597 --- linux-3.0.3/ipc/mqueue.c 2011-07-21 22:17:23.000000000 -0400
55598 +++ linux-3.0.3/ipc/mqueue.c 2011-08-23 21:48:14.000000000 -0400
55599 @@ -154,6 +154,7 @@ static struct inode *mqueue_get_inode(st
55600 mq_bytes = (mq_msg_tblsz +
55601 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
55602
55603 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
55604 spin_lock(&mq_lock);
55605 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
55606 u->mq_bytes + mq_bytes >
55607 diff -urNp linux-3.0.3/ipc/msg.c linux-3.0.3/ipc/msg.c
55608 --- linux-3.0.3/ipc/msg.c 2011-07-21 22:17:23.000000000 -0400
55609 +++ linux-3.0.3/ipc/msg.c 2011-08-23 21:47:56.000000000 -0400
55610 @@ -309,18 +309,19 @@ static inline int msg_security(struct ke
55611 return security_msg_queue_associate(msq, msgflg);
55612 }
55613
55614 +static struct ipc_ops msg_ops = {
55615 + .getnew = newque,
55616 + .associate = msg_security,
55617 + .more_checks = NULL
55618 +};
55619 +
55620 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
55621 {
55622 struct ipc_namespace *ns;
55623 - struct ipc_ops msg_ops;
55624 struct ipc_params msg_params;
55625
55626 ns = current->nsproxy->ipc_ns;
55627
55628 - msg_ops.getnew = newque;
55629 - msg_ops.associate = msg_security;
55630 - msg_ops.more_checks = NULL;
55631 -
55632 msg_params.key = key;
55633 msg_params.flg = msgflg;
55634
55635 diff -urNp linux-3.0.3/ipc/sem.c linux-3.0.3/ipc/sem.c
55636 --- linux-3.0.3/ipc/sem.c 2011-08-23 21:44:40.000000000 -0400
55637 +++ linux-3.0.3/ipc/sem.c 2011-08-23 21:48:14.000000000 -0400
55638 @@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
55639 return 0;
55640 }
55641
55642 +static struct ipc_ops sem_ops = {
55643 + .getnew = newary,
55644 + .associate = sem_security,
55645 + .more_checks = sem_more_checks
55646 +};
55647 +
55648 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
55649 {
55650 struct ipc_namespace *ns;
55651 - struct ipc_ops sem_ops;
55652 struct ipc_params sem_params;
55653
55654 ns = current->nsproxy->ipc_ns;
55655 @@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
55656 if (nsems < 0 || nsems > ns->sc_semmsl)
55657 return -EINVAL;
55658
55659 - sem_ops.getnew = newary;
55660 - sem_ops.associate = sem_security;
55661 - sem_ops.more_checks = sem_more_checks;
55662 -
55663 sem_params.key = key;
55664 sem_params.flg = semflg;
55665 sem_params.u.nsems = nsems;
55666 @@ -854,6 +855,8 @@ static int semctl_main(struct ipc_namesp
55667 int nsems;
55668 struct list_head tasks;
55669
55670 + pax_track_stack();
55671 +
55672 sma = sem_lock_check(ns, semid);
55673 if (IS_ERR(sma))
55674 return PTR_ERR(sma);
55675 @@ -1301,6 +1304,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
55676 struct ipc_namespace *ns;
55677 struct list_head tasks;
55678
55679 + pax_track_stack();
55680 +
55681 ns = current->nsproxy->ipc_ns;
55682
55683 if (nsops < 1 || semid < 0)
55684 diff -urNp linux-3.0.3/ipc/shm.c linux-3.0.3/ipc/shm.c
55685 --- linux-3.0.3/ipc/shm.c 2011-07-21 22:17:23.000000000 -0400
55686 +++ linux-3.0.3/ipc/shm.c 2011-08-23 21:48:14.000000000 -0400
55687 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
55688 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
55689 #endif
55690
55691 +#ifdef CONFIG_GRKERNSEC
55692 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55693 + const time_t shm_createtime, const uid_t cuid,
55694 + const int shmid);
55695 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55696 + const time_t shm_createtime);
55697 +#endif
55698 +
55699 void shm_init_ns(struct ipc_namespace *ns)
55700 {
55701 ns->shm_ctlmax = SHMMAX;
55702 @@ -401,6 +409,14 @@ static int newseg(struct ipc_namespace *
55703 shp->shm_lprid = 0;
55704 shp->shm_atim = shp->shm_dtim = 0;
55705 shp->shm_ctim = get_seconds();
55706 +#ifdef CONFIG_GRKERNSEC
55707 + {
55708 + struct timespec timeval;
55709 + do_posix_clock_monotonic_gettime(&timeval);
55710 +
55711 + shp->shm_createtime = timeval.tv_sec;
55712 + }
55713 +#endif
55714 shp->shm_segsz = size;
55715 shp->shm_nattch = 0;
55716 shp->shm_file = file;
55717 @@ -451,18 +467,19 @@ static inline int shm_more_checks(struct
55718 return 0;
55719 }
55720
55721 +static struct ipc_ops shm_ops = {
55722 + .getnew = newseg,
55723 + .associate = shm_security,
55724 + .more_checks = shm_more_checks
55725 +};
55726 +
55727 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
55728 {
55729 struct ipc_namespace *ns;
55730 - struct ipc_ops shm_ops;
55731 struct ipc_params shm_params;
55732
55733 ns = current->nsproxy->ipc_ns;
55734
55735 - shm_ops.getnew = newseg;
55736 - shm_ops.associate = shm_security;
55737 - shm_ops.more_checks = shm_more_checks;
55738 -
55739 shm_params.key = key;
55740 shm_params.flg = shmflg;
55741 shm_params.u.size = size;
55742 @@ -762,8 +779,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
55743 case SHM_LOCK:
55744 case SHM_UNLOCK:
55745 {
55746 - struct file *uninitialized_var(shm_file);
55747 -
55748 lru_add_drain_all(); /* drain pagevecs to lru lists */
55749
55750 shp = shm_lock_check(ns, shmid);
55751 @@ -896,9 +911,21 @@ long do_shmat(int shmid, char __user *sh
55752 if (err)
55753 goto out_unlock;
55754
55755 +#ifdef CONFIG_GRKERNSEC
55756 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
55757 + shp->shm_perm.cuid, shmid) ||
55758 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
55759 + err = -EACCES;
55760 + goto out_unlock;
55761 + }
55762 +#endif
55763 +
55764 path = shp->shm_file->f_path;
55765 path_get(&path);
55766 shp->shm_nattch++;
55767 +#ifdef CONFIG_GRKERNSEC
55768 + shp->shm_lapid = current->pid;
55769 +#endif
55770 size = i_size_read(path.dentry->d_inode);
55771 shm_unlock(shp);
55772
55773 diff -urNp linux-3.0.3/kernel/acct.c linux-3.0.3/kernel/acct.c
55774 --- linux-3.0.3/kernel/acct.c 2011-07-21 22:17:23.000000000 -0400
55775 +++ linux-3.0.3/kernel/acct.c 2011-08-23 21:47:56.000000000 -0400
55776 @@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
55777 */
55778 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
55779 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
55780 - file->f_op->write(file, (char *)&ac,
55781 + file->f_op->write(file, (__force char __user *)&ac,
55782 sizeof(acct_t), &file->f_pos);
55783 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
55784 set_fs(fs);
55785 diff -urNp linux-3.0.3/kernel/audit.c linux-3.0.3/kernel/audit.c
55786 --- linux-3.0.3/kernel/audit.c 2011-07-21 22:17:23.000000000 -0400
55787 +++ linux-3.0.3/kernel/audit.c 2011-08-23 21:47:56.000000000 -0400
55788 @@ -112,7 +112,7 @@ u32 audit_sig_sid = 0;
55789 3) suppressed due to audit_rate_limit
55790 4) suppressed due to audit_backlog_limit
55791 */
55792 -static atomic_t audit_lost = ATOMIC_INIT(0);
55793 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
55794
55795 /* The netlink socket. */
55796 static struct sock *audit_sock;
55797 @@ -234,7 +234,7 @@ void audit_log_lost(const char *message)
55798 unsigned long now;
55799 int print;
55800
55801 - atomic_inc(&audit_lost);
55802 + atomic_inc_unchecked(&audit_lost);
55803
55804 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
55805
55806 @@ -253,7 +253,7 @@ void audit_log_lost(const char *message)
55807 printk(KERN_WARNING
55808 "audit: audit_lost=%d audit_rate_limit=%d "
55809 "audit_backlog_limit=%d\n",
55810 - atomic_read(&audit_lost),
55811 + atomic_read_unchecked(&audit_lost),
55812 audit_rate_limit,
55813 audit_backlog_limit);
55814 audit_panic(message);
55815 @@ -686,7 +686,7 @@ static int audit_receive_msg(struct sk_b
55816 status_set.pid = audit_pid;
55817 status_set.rate_limit = audit_rate_limit;
55818 status_set.backlog_limit = audit_backlog_limit;
55819 - status_set.lost = atomic_read(&audit_lost);
55820 + status_set.lost = atomic_read_unchecked(&audit_lost);
55821 status_set.backlog = skb_queue_len(&audit_skb_queue);
55822 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
55823 &status_set, sizeof(status_set));
55824 diff -urNp linux-3.0.3/kernel/auditsc.c linux-3.0.3/kernel/auditsc.c
55825 --- linux-3.0.3/kernel/auditsc.c 2011-07-21 22:17:23.000000000 -0400
55826 +++ linux-3.0.3/kernel/auditsc.c 2011-08-23 21:47:56.000000000 -0400
55827 @@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_conte
55828 }
55829
55830 /* global counter which is incremented every time something logs in */
55831 -static atomic_t session_id = ATOMIC_INIT(0);
55832 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
55833
55834 /**
55835 * audit_set_loginuid - set a task's audit_context loginuid
55836 @@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT
55837 */
55838 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
55839 {
55840 - unsigned int sessionid = atomic_inc_return(&session_id);
55841 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
55842 struct audit_context *context = task->audit_context;
55843
55844 if (context && context->in_syscall) {
55845 diff -urNp linux-3.0.3/kernel/capability.c linux-3.0.3/kernel/capability.c
55846 --- linux-3.0.3/kernel/capability.c 2011-07-21 22:17:23.000000000 -0400
55847 +++ linux-3.0.3/kernel/capability.c 2011-08-23 21:48:14.000000000 -0400
55848 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
55849 * before modification is attempted and the application
55850 * fails.
55851 */
55852 + if (tocopy > ARRAY_SIZE(kdata))
55853 + return -EFAULT;
55854 +
55855 if (copy_to_user(dataptr, kdata, tocopy
55856 * sizeof(struct __user_cap_data_struct))) {
55857 return -EFAULT;
55858 @@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *n
55859 BUG();
55860 }
55861
55862 - if (security_capable(ns, current_cred(), cap) == 0) {
55863 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
55864 current->flags |= PF_SUPERPRIV;
55865 return true;
55866 }
55867 @@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *n
55868 }
55869 EXPORT_SYMBOL(ns_capable);
55870
55871 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
55872 +{
55873 + if (unlikely(!cap_valid(cap))) {
55874 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
55875 + BUG();
55876 + }
55877 +
55878 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
55879 + current->flags |= PF_SUPERPRIV;
55880 + return true;
55881 + }
55882 + return false;
55883 +}
55884 +EXPORT_SYMBOL(ns_capable_nolog);
55885 +
55886 +bool capable_nolog(int cap)
55887 +{
55888 + return ns_capable_nolog(&init_user_ns, cap);
55889 +}
55890 +EXPORT_SYMBOL(capable_nolog);
55891 +
55892 /**
55893 * task_ns_capable - Determine whether current task has a superior
55894 * capability targeted at a specific task's user namespace.
55895 @@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct
55896 }
55897 EXPORT_SYMBOL(task_ns_capable);
55898
55899 +bool task_ns_capable_nolog(struct task_struct *t, int cap)
55900 +{
55901 + return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
55902 +}
55903 +EXPORT_SYMBOL(task_ns_capable_nolog);
55904 +
55905 /**
55906 * nsown_capable - Check superior capability to one's own user_ns
55907 * @cap: The capability in question
55908 diff -urNp linux-3.0.3/kernel/cgroup.c linux-3.0.3/kernel/cgroup.c
55909 --- linux-3.0.3/kernel/cgroup.c 2011-07-21 22:17:23.000000000 -0400
55910 +++ linux-3.0.3/kernel/cgroup.c 2011-08-23 21:48:14.000000000 -0400
55911 @@ -593,6 +593,8 @@ static struct css_set *find_css_set(
55912 struct hlist_head *hhead;
55913 struct cg_cgroup_link *link;
55914
55915 + pax_track_stack();
55916 +
55917 /* First see if we already have a cgroup group that matches
55918 * the desired set */
55919 read_lock(&css_set_lock);
55920 diff -urNp linux-3.0.3/kernel/compat.c linux-3.0.3/kernel/compat.c
55921 --- linux-3.0.3/kernel/compat.c 2011-07-21 22:17:23.000000000 -0400
55922 +++ linux-3.0.3/kernel/compat.c 2011-08-23 21:48:14.000000000 -0400
55923 @@ -13,6 +13,7 @@
55924
55925 #include <linux/linkage.h>
55926 #include <linux/compat.h>
55927 +#include <linux/module.h>
55928 #include <linux/errno.h>
55929 #include <linux/time.h>
55930 #include <linux/signal.h>
55931 diff -urNp linux-3.0.3/kernel/configs.c linux-3.0.3/kernel/configs.c
55932 --- linux-3.0.3/kernel/configs.c 2011-07-21 22:17:23.000000000 -0400
55933 +++ linux-3.0.3/kernel/configs.c 2011-08-23 21:48:14.000000000 -0400
55934 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
55935 struct proc_dir_entry *entry;
55936
55937 /* create the current config file */
55938 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
55939 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
55940 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
55941 + &ikconfig_file_ops);
55942 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55943 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
55944 + &ikconfig_file_ops);
55945 +#endif
55946 +#else
55947 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
55948 &ikconfig_file_ops);
55949 +#endif
55950 +
55951 if (!entry)
55952 return -ENOMEM;
55953
55954 diff -urNp linux-3.0.3/kernel/cred.c linux-3.0.3/kernel/cred.c
55955 --- linux-3.0.3/kernel/cred.c 2011-07-21 22:17:23.000000000 -0400
55956 +++ linux-3.0.3/kernel/cred.c 2011-08-23 21:48:14.000000000 -0400
55957 @@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
55958 */
55959 void __put_cred(struct cred *cred)
55960 {
55961 + pax_track_stack();
55962 +
55963 kdebug("__put_cred(%p{%d,%d})", cred,
55964 atomic_read(&cred->usage),
55965 read_cred_subscribers(cred));
55966 @@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
55967 {
55968 struct cred *cred;
55969
55970 + pax_track_stack();
55971 +
55972 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
55973 atomic_read(&tsk->cred->usage),
55974 read_cred_subscribers(tsk->cred));
55975 @@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
55976 {
55977 const struct cred *cred;
55978
55979 + pax_track_stack();
55980 +
55981 rcu_read_lock();
55982
55983 do {
55984 @@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
55985 {
55986 struct cred *new;
55987
55988 + pax_track_stack();
55989 +
55990 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
55991 if (!new)
55992 return NULL;
55993 @@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
55994 const struct cred *old;
55995 struct cred *new;
55996
55997 + pax_track_stack();
55998 +
55999 validate_process_creds();
56000
56001 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
56002 @@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
56003 struct thread_group_cred *tgcred = NULL;
56004 struct cred *new;
56005
56006 + pax_track_stack();
56007 +
56008 #ifdef CONFIG_KEYS
56009 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
56010 if (!tgcred)
56011 @@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
56012 struct cred *new;
56013 int ret;
56014
56015 + pax_track_stack();
56016 +
56017 if (
56018 #ifdef CONFIG_KEYS
56019 !p->cred->thread_keyring &&
56020 @@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
56021 struct task_struct *task = current;
56022 const struct cred *old = task->real_cred;
56023
56024 + pax_track_stack();
56025 +
56026 kdebug("commit_creds(%p{%d,%d})", new,
56027 atomic_read(&new->usage),
56028 read_cred_subscribers(new));
56029 @@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
56030
56031 get_cred(new); /* we will require a ref for the subj creds too */
56032
56033 + gr_set_role_label(task, new->uid, new->gid);
56034 +
56035 /* dumpability changes */
56036 if (old->euid != new->euid ||
56037 old->egid != new->egid ||
56038 @@ -551,6 +569,8 @@ EXPORT_SYMBOL(commit_creds);
56039 */
56040 void abort_creds(struct cred *new)
56041 {
56042 + pax_track_stack();
56043 +
56044 kdebug("abort_creds(%p{%d,%d})", new,
56045 atomic_read(&new->usage),
56046 read_cred_subscribers(new));
56047 @@ -574,6 +594,8 @@ const struct cred *override_creds(const
56048 {
56049 const struct cred *old = current->cred;
56050
56051 + pax_track_stack();
56052 +
56053 kdebug("override_creds(%p{%d,%d})", new,
56054 atomic_read(&new->usage),
56055 read_cred_subscribers(new));
56056 @@ -603,6 +625,8 @@ void revert_creds(const struct cred *old
56057 {
56058 const struct cred *override = current->cred;
56059
56060 + pax_track_stack();
56061 +
56062 kdebug("revert_creds(%p{%d,%d})", old,
56063 atomic_read(&old->usage),
56064 read_cred_subscribers(old));
56065 @@ -649,6 +673,8 @@ struct cred *prepare_kernel_cred(struct
56066 const struct cred *old;
56067 struct cred *new;
56068
56069 + pax_track_stack();
56070 +
56071 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
56072 if (!new)
56073 return NULL;
56074 @@ -703,6 +729,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
56075 */
56076 int set_security_override(struct cred *new, u32 secid)
56077 {
56078 + pax_track_stack();
56079 +
56080 return security_kernel_act_as(new, secid);
56081 }
56082 EXPORT_SYMBOL(set_security_override);
56083 @@ -722,6 +750,8 @@ int set_security_override_from_ctx(struc
56084 u32 secid;
56085 int ret;
56086
56087 + pax_track_stack();
56088 +
56089 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
56090 if (ret < 0)
56091 return ret;
56092 diff -urNp linux-3.0.3/kernel/debug/debug_core.c linux-3.0.3/kernel/debug/debug_core.c
56093 --- linux-3.0.3/kernel/debug/debug_core.c 2011-07-21 22:17:23.000000000 -0400
56094 +++ linux-3.0.3/kernel/debug/debug_core.c 2011-08-23 21:47:56.000000000 -0400
56095 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
56096 */
56097 static atomic_t masters_in_kgdb;
56098 static atomic_t slaves_in_kgdb;
56099 -static atomic_t kgdb_break_tasklet_var;
56100 +static atomic_unchecked_t kgdb_break_tasklet_var;
56101 atomic_t kgdb_setting_breakpoint;
56102
56103 struct task_struct *kgdb_usethread;
56104 @@ -129,7 +129,7 @@ int kgdb_single_step;
56105 static pid_t kgdb_sstep_pid;
56106
56107 /* to keep track of the CPU which is doing the single stepping*/
56108 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
56109 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
56110
56111 /*
56112 * If you are debugging a problem where roundup (the collection of
56113 @@ -542,7 +542,7 @@ return_normal:
56114 * kernel will only try for the value of sstep_tries before
56115 * giving up and continuing on.
56116 */
56117 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
56118 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
56119 (kgdb_info[cpu].task &&
56120 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
56121 atomic_set(&kgdb_active, -1);
56122 @@ -636,8 +636,8 @@ cpu_master_loop:
56123 }
56124
56125 kgdb_restore:
56126 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
56127 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
56128 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
56129 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
56130 if (kgdb_info[sstep_cpu].task)
56131 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
56132 else
56133 @@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
56134 static void kgdb_tasklet_bpt(unsigned long ing)
56135 {
56136 kgdb_breakpoint();
56137 - atomic_set(&kgdb_break_tasklet_var, 0);
56138 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
56139 }
56140
56141 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
56142
56143 void kgdb_schedule_breakpoint(void)
56144 {
56145 - if (atomic_read(&kgdb_break_tasklet_var) ||
56146 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
56147 atomic_read(&kgdb_active) != -1 ||
56148 atomic_read(&kgdb_setting_breakpoint))
56149 return;
56150 - atomic_inc(&kgdb_break_tasklet_var);
56151 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
56152 tasklet_schedule(&kgdb_tasklet_breakpoint);
56153 }
56154 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
56155 diff -urNp linux-3.0.3/kernel/debug/kdb/kdb_main.c linux-3.0.3/kernel/debug/kdb/kdb_main.c
56156 --- linux-3.0.3/kernel/debug/kdb/kdb_main.c 2011-07-21 22:17:23.000000000 -0400
56157 +++ linux-3.0.3/kernel/debug/kdb/kdb_main.c 2011-08-23 21:47:56.000000000 -0400
56158 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
56159 list_for_each_entry(mod, kdb_modules, list) {
56160
56161 kdb_printf("%-20s%8u 0x%p ", mod->name,
56162 - mod->core_size, (void *)mod);
56163 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
56164 #ifdef CONFIG_MODULE_UNLOAD
56165 kdb_printf("%4d ", module_refcount(mod));
56166 #endif
56167 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
56168 kdb_printf(" (Loading)");
56169 else
56170 kdb_printf(" (Live)");
56171 - kdb_printf(" 0x%p", mod->module_core);
56172 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
56173
56174 #ifdef CONFIG_MODULE_UNLOAD
56175 {
56176 diff -urNp linux-3.0.3/kernel/events/core.c linux-3.0.3/kernel/events/core.c
56177 --- linux-3.0.3/kernel/events/core.c 2011-08-23 21:44:40.000000000 -0400
56178 +++ linux-3.0.3/kernel/events/core.c 2011-08-23 21:47:56.000000000 -0400
56179 @@ -170,7 +170,7 @@ int perf_proc_update_handler(struct ctl_
56180 return 0;
56181 }
56182
56183 -static atomic64_t perf_event_id;
56184 +static atomic64_unchecked_t perf_event_id;
56185
56186 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
56187 enum event_type_t event_type);
56188 @@ -2488,7 +2488,7 @@ static void __perf_event_read(void *info
56189
56190 static inline u64 perf_event_count(struct perf_event *event)
56191 {
56192 - return local64_read(&event->count) + atomic64_read(&event->child_count);
56193 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
56194 }
56195
56196 static u64 perf_event_read(struct perf_event *event)
56197 @@ -3023,9 +3023,9 @@ u64 perf_event_read_value(struct perf_ev
56198 mutex_lock(&event->child_mutex);
56199 total += perf_event_read(event);
56200 *enabled += event->total_time_enabled +
56201 - atomic64_read(&event->child_total_time_enabled);
56202 + atomic64_read_unchecked(&event->child_total_time_enabled);
56203 *running += event->total_time_running +
56204 - atomic64_read(&event->child_total_time_running);
56205 + atomic64_read_unchecked(&event->child_total_time_running);
56206
56207 list_for_each_entry(child, &event->child_list, child_list) {
56208 total += perf_event_read(child);
56209 @@ -3388,10 +3388,10 @@ void perf_event_update_userpage(struct p
56210 userpg->offset -= local64_read(&event->hw.prev_count);
56211
56212 userpg->time_enabled = event->total_time_enabled +
56213 - atomic64_read(&event->child_total_time_enabled);
56214 + atomic64_read_unchecked(&event->child_total_time_enabled);
56215
56216 userpg->time_running = event->total_time_running +
56217 - atomic64_read(&event->child_total_time_running);
56218 + atomic64_read_unchecked(&event->child_total_time_running);
56219
56220 barrier();
56221 ++userpg->lock;
56222 @@ -4188,11 +4188,11 @@ static void perf_output_read_one(struct
56223 values[n++] = perf_event_count(event);
56224 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
56225 values[n++] = enabled +
56226 - atomic64_read(&event->child_total_time_enabled);
56227 + atomic64_read_unchecked(&event->child_total_time_enabled);
56228 }
56229 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
56230 values[n++] = running +
56231 - atomic64_read(&event->child_total_time_running);
56232 + atomic64_read_unchecked(&event->child_total_time_running);
56233 }
56234 if (read_format & PERF_FORMAT_ID)
56235 values[n++] = primary_event_id(event);
56236 @@ -6190,7 +6190,7 @@ perf_event_alloc(struct perf_event_attr
56237 event->parent = parent_event;
56238
56239 event->ns = get_pid_ns(current->nsproxy->pid_ns);
56240 - event->id = atomic64_inc_return(&perf_event_id);
56241 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
56242
56243 event->state = PERF_EVENT_STATE_INACTIVE;
56244
56245 @@ -6713,10 +6713,10 @@ static void sync_child_event(struct perf
56246 /*
56247 * Add back the child's count to the parent's count:
56248 */
56249 - atomic64_add(child_val, &parent_event->child_count);
56250 - atomic64_add(child_event->total_time_enabled,
56251 + atomic64_add_unchecked(child_val, &parent_event->child_count);
56252 + atomic64_add_unchecked(child_event->total_time_enabled,
56253 &parent_event->child_total_time_enabled);
56254 - atomic64_add(child_event->total_time_running,
56255 + atomic64_add_unchecked(child_event->total_time_running,
56256 &parent_event->child_total_time_running);
56257
56258 /*
56259 diff -urNp linux-3.0.3/kernel/exit.c linux-3.0.3/kernel/exit.c
56260 --- linux-3.0.3/kernel/exit.c 2011-07-21 22:17:23.000000000 -0400
56261 +++ linux-3.0.3/kernel/exit.c 2011-08-23 21:48:14.000000000 -0400
56262 @@ -57,6 +57,10 @@
56263 #include <asm/pgtable.h>
56264 #include <asm/mmu_context.h>
56265
56266 +#ifdef CONFIG_GRKERNSEC
56267 +extern rwlock_t grsec_exec_file_lock;
56268 +#endif
56269 +
56270 static void exit_mm(struct task_struct * tsk);
56271
56272 static void __unhash_process(struct task_struct *p, bool group_dead)
56273 @@ -169,6 +173,10 @@ void release_task(struct task_struct * p
56274 struct task_struct *leader;
56275 int zap_leader;
56276 repeat:
56277 +#ifdef CONFIG_NET
56278 + gr_del_task_from_ip_table(p);
56279 +#endif
56280 +
56281 tracehook_prepare_release_task(p);
56282 /* don't need to get the RCU readlock here - the process is dead and
56283 * can't be modifying its own credentials. But shut RCU-lockdep up */
56284 @@ -338,11 +346,22 @@ static void reparent_to_kthreadd(void)
56285 {
56286 write_lock_irq(&tasklist_lock);
56287
56288 +#ifdef CONFIG_GRKERNSEC
56289 + write_lock(&grsec_exec_file_lock);
56290 + if (current->exec_file) {
56291 + fput(current->exec_file);
56292 + current->exec_file = NULL;
56293 + }
56294 + write_unlock(&grsec_exec_file_lock);
56295 +#endif
56296 +
56297 ptrace_unlink(current);
56298 /* Reparent to init */
56299 current->real_parent = current->parent = kthreadd_task;
56300 list_move_tail(&current->sibling, &current->real_parent->children);
56301
56302 + gr_set_kernel_label(current);
56303 +
56304 /* Set the exit signal to SIGCHLD so we signal init on exit */
56305 current->exit_signal = SIGCHLD;
56306
56307 @@ -394,7 +413,7 @@ int allow_signal(int sig)
56308 * know it'll be handled, so that they don't get converted to
56309 * SIGKILL or just silently dropped.
56310 */
56311 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
56312 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
56313 recalc_sigpending();
56314 spin_unlock_irq(&current->sighand->siglock);
56315 return 0;
56316 @@ -430,6 +449,17 @@ void daemonize(const char *name, ...)
56317 vsnprintf(current->comm, sizeof(current->comm), name, args);
56318 va_end(args);
56319
56320 +#ifdef CONFIG_GRKERNSEC
56321 + write_lock(&grsec_exec_file_lock);
56322 + if (current->exec_file) {
56323 + fput(current->exec_file);
56324 + current->exec_file = NULL;
56325 + }
56326 + write_unlock(&grsec_exec_file_lock);
56327 +#endif
56328 +
56329 + gr_set_kernel_label(current);
56330 +
56331 /*
56332 * If we were started as result of loading a module, close all of the
56333 * user space pages. We don't need them, and if we didn't close them
56334 @@ -904,15 +934,8 @@ NORET_TYPE void do_exit(long code)
56335 struct task_struct *tsk = current;
56336 int group_dead;
56337
56338 - profile_task_exit(tsk);
56339 -
56340 - WARN_ON(atomic_read(&tsk->fs_excl));
56341 - WARN_ON(blk_needs_flush_plug(tsk));
56342 -
56343 if (unlikely(in_interrupt()))
56344 panic("Aiee, killing interrupt handler!");
56345 - if (unlikely(!tsk->pid))
56346 - panic("Attempted to kill the idle task!");
56347
56348 /*
56349 * If do_exit is called because this processes oopsed, it's possible
56350 @@ -923,6 +946,14 @@ NORET_TYPE void do_exit(long code)
56351 */
56352 set_fs(USER_DS);
56353
56354 + profile_task_exit(tsk);
56355 +
56356 + WARN_ON(atomic_read(&tsk->fs_excl));
56357 + WARN_ON(blk_needs_flush_plug(tsk));
56358 +
56359 + if (unlikely(!tsk->pid))
56360 + panic("Attempted to kill the idle task!");
56361 +
56362 tracehook_report_exit(&code);
56363
56364 validate_creds_for_do_exit(tsk);
56365 @@ -983,6 +1014,9 @@ NORET_TYPE void do_exit(long code)
56366 tsk->exit_code = code;
56367 taskstats_exit(tsk, group_dead);
56368
56369 + gr_acl_handle_psacct(tsk, code);
56370 + gr_acl_handle_exit();
56371 +
56372 exit_mm(tsk);
56373
56374 if (group_dead)
56375 diff -urNp linux-3.0.3/kernel/fork.c linux-3.0.3/kernel/fork.c
56376 --- linux-3.0.3/kernel/fork.c 2011-07-21 22:17:23.000000000 -0400
56377 +++ linux-3.0.3/kernel/fork.c 2011-08-23 21:48:14.000000000 -0400
56378 @@ -286,7 +286,7 @@ static struct task_struct *dup_task_stru
56379 *stackend = STACK_END_MAGIC; /* for overflow detection */
56380
56381 #ifdef CONFIG_CC_STACKPROTECTOR
56382 - tsk->stack_canary = get_random_int();
56383 + tsk->stack_canary = pax_get_random_long();
56384 #endif
56385
56386 /* One for us, one for whoever does the "release_task()" (usually parent) */
56387 @@ -308,13 +308,77 @@ out:
56388 }
56389
56390 #ifdef CONFIG_MMU
56391 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
56392 +{
56393 + struct vm_area_struct *tmp;
56394 + unsigned long charge;
56395 + struct mempolicy *pol;
56396 + struct file *file;
56397 +
56398 + charge = 0;
56399 + if (mpnt->vm_flags & VM_ACCOUNT) {
56400 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
56401 + if (security_vm_enough_memory(len))
56402 + goto fail_nomem;
56403 + charge = len;
56404 + }
56405 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
56406 + if (!tmp)
56407 + goto fail_nomem;
56408 + *tmp = *mpnt;
56409 + tmp->vm_mm = mm;
56410 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
56411 + pol = mpol_dup(vma_policy(mpnt));
56412 + if (IS_ERR(pol))
56413 + goto fail_nomem_policy;
56414 + vma_set_policy(tmp, pol);
56415 + if (anon_vma_fork(tmp, mpnt))
56416 + goto fail_nomem_anon_vma_fork;
56417 + tmp->vm_flags &= ~VM_LOCKED;
56418 + tmp->vm_next = tmp->vm_prev = NULL;
56419 + tmp->vm_mirror = NULL;
56420 + file = tmp->vm_file;
56421 + if (file) {
56422 + struct inode *inode = file->f_path.dentry->d_inode;
56423 + struct address_space *mapping = file->f_mapping;
56424 +
56425 + get_file(file);
56426 + if (tmp->vm_flags & VM_DENYWRITE)
56427 + atomic_dec(&inode->i_writecount);
56428 + mutex_lock(&mapping->i_mmap_mutex);
56429 + if (tmp->vm_flags & VM_SHARED)
56430 + mapping->i_mmap_writable++;
56431 + flush_dcache_mmap_lock(mapping);
56432 + /* insert tmp into the share list, just after mpnt */
56433 + vma_prio_tree_add(tmp, mpnt);
56434 + flush_dcache_mmap_unlock(mapping);
56435 + mutex_unlock(&mapping->i_mmap_mutex);
56436 + }
56437 +
56438 + /*
56439 + * Clear hugetlb-related page reserves for children. This only
56440 + * affects MAP_PRIVATE mappings. Faults generated by the child
56441 + * are not guaranteed to succeed, even if read-only
56442 + */
56443 + if (is_vm_hugetlb_page(tmp))
56444 + reset_vma_resv_huge_pages(tmp);
56445 +
56446 + return tmp;
56447 +
56448 +fail_nomem_anon_vma_fork:
56449 + mpol_put(pol);
56450 +fail_nomem_policy:
56451 + kmem_cache_free(vm_area_cachep, tmp);
56452 +fail_nomem:
56453 + vm_unacct_memory(charge);
56454 + return NULL;
56455 +}
56456 +
56457 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
56458 {
56459 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
56460 struct rb_node **rb_link, *rb_parent;
56461 int retval;
56462 - unsigned long charge;
56463 - struct mempolicy *pol;
56464
56465 down_write(&oldmm->mmap_sem);
56466 flush_cache_dup_mm(oldmm);
56467 @@ -326,8 +390,8 @@ static int dup_mmap(struct mm_struct *mm
56468 mm->locked_vm = 0;
56469 mm->mmap = NULL;
56470 mm->mmap_cache = NULL;
56471 - mm->free_area_cache = oldmm->mmap_base;
56472 - mm->cached_hole_size = ~0UL;
56473 + mm->free_area_cache = oldmm->free_area_cache;
56474 + mm->cached_hole_size = oldmm->cached_hole_size;
56475 mm->map_count = 0;
56476 cpumask_clear(mm_cpumask(mm));
56477 mm->mm_rb = RB_ROOT;
56478 @@ -343,8 +407,6 @@ static int dup_mmap(struct mm_struct *mm
56479
56480 prev = NULL;
56481 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
56482 - struct file *file;
56483 -
56484 if (mpnt->vm_flags & VM_DONTCOPY) {
56485 long pages = vma_pages(mpnt);
56486 mm->total_vm -= pages;
56487 @@ -352,55 +414,13 @@ static int dup_mmap(struct mm_struct *mm
56488 -pages);
56489 continue;
56490 }
56491 - charge = 0;
56492 - if (mpnt->vm_flags & VM_ACCOUNT) {
56493 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
56494 - if (security_vm_enough_memory(len))
56495 - goto fail_nomem;
56496 - charge = len;
56497 - }
56498 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
56499 - if (!tmp)
56500 - goto fail_nomem;
56501 - *tmp = *mpnt;
56502 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
56503 - pol = mpol_dup(vma_policy(mpnt));
56504 - retval = PTR_ERR(pol);
56505 - if (IS_ERR(pol))
56506 - goto fail_nomem_policy;
56507 - vma_set_policy(tmp, pol);
56508 - tmp->vm_mm = mm;
56509 - if (anon_vma_fork(tmp, mpnt))
56510 - goto fail_nomem_anon_vma_fork;
56511 - tmp->vm_flags &= ~VM_LOCKED;
56512 - tmp->vm_next = tmp->vm_prev = NULL;
56513 - file = tmp->vm_file;
56514 - if (file) {
56515 - struct inode *inode = file->f_path.dentry->d_inode;
56516 - struct address_space *mapping = file->f_mapping;
56517 -
56518 - get_file(file);
56519 - if (tmp->vm_flags & VM_DENYWRITE)
56520 - atomic_dec(&inode->i_writecount);
56521 - mutex_lock(&mapping->i_mmap_mutex);
56522 - if (tmp->vm_flags & VM_SHARED)
56523 - mapping->i_mmap_writable++;
56524 - flush_dcache_mmap_lock(mapping);
56525 - /* insert tmp into the share list, just after mpnt */
56526 - vma_prio_tree_add(tmp, mpnt);
56527 - flush_dcache_mmap_unlock(mapping);
56528 - mutex_unlock(&mapping->i_mmap_mutex);
56529 + tmp = dup_vma(mm, mpnt);
56530 + if (!tmp) {
56531 + retval = -ENOMEM;
56532 + goto out;
56533 }
56534
56535 /*
56536 - * Clear hugetlb-related page reserves for children. This only
56537 - * affects MAP_PRIVATE mappings. Faults generated by the child
56538 - * are not guaranteed to succeed, even if read-only
56539 - */
56540 - if (is_vm_hugetlb_page(tmp))
56541 - reset_vma_resv_huge_pages(tmp);
56542 -
56543 - /*
56544 * Link in the new vma and copy the page table entries.
56545 */
56546 *pprev = tmp;
56547 @@ -421,6 +441,31 @@ static int dup_mmap(struct mm_struct *mm
56548 if (retval)
56549 goto out;
56550 }
56551 +
56552 +#ifdef CONFIG_PAX_SEGMEXEC
56553 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
56554 + struct vm_area_struct *mpnt_m;
56555 +
56556 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
56557 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
56558 +
56559 + if (!mpnt->vm_mirror)
56560 + continue;
56561 +
56562 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
56563 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
56564 + mpnt->vm_mirror = mpnt_m;
56565 + } else {
56566 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
56567 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
56568 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
56569 + mpnt->vm_mirror->vm_mirror = mpnt;
56570 + }
56571 + }
56572 + BUG_ON(mpnt_m);
56573 + }
56574 +#endif
56575 +
56576 /* a new mm has just been created */
56577 arch_dup_mmap(oldmm, mm);
56578 retval = 0;
56579 @@ -429,14 +474,6 @@ out:
56580 flush_tlb_mm(oldmm);
56581 up_write(&oldmm->mmap_sem);
56582 return retval;
56583 -fail_nomem_anon_vma_fork:
56584 - mpol_put(pol);
56585 -fail_nomem_policy:
56586 - kmem_cache_free(vm_area_cachep, tmp);
56587 -fail_nomem:
56588 - retval = -ENOMEM;
56589 - vm_unacct_memory(charge);
56590 - goto out;
56591 }
56592
56593 static inline int mm_alloc_pgd(struct mm_struct * mm)
56594 @@ -836,13 +873,14 @@ static int copy_fs(unsigned long clone_f
56595 spin_unlock(&fs->lock);
56596 return -EAGAIN;
56597 }
56598 - fs->users++;
56599 + atomic_inc(&fs->users);
56600 spin_unlock(&fs->lock);
56601 return 0;
56602 }
56603 tsk->fs = copy_fs_struct(fs);
56604 if (!tsk->fs)
56605 return -ENOMEM;
56606 + gr_set_chroot_entries(tsk, &tsk->fs->root);
56607 return 0;
56608 }
56609
56610 @@ -1104,10 +1142,13 @@ static struct task_struct *copy_process(
56611 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
56612 #endif
56613 retval = -EAGAIN;
56614 +
56615 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
56616 +
56617 if (atomic_read(&p->real_cred->user->processes) >=
56618 task_rlimit(p, RLIMIT_NPROC)) {
56619 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
56620 - p->real_cred->user != INIT_USER)
56621 + if (p->real_cred->user != INIT_USER &&
56622 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
56623 goto bad_fork_free;
56624 }
56625
56626 @@ -1250,6 +1291,8 @@ static struct task_struct *copy_process(
56627 if (clone_flags & CLONE_THREAD)
56628 p->tgid = current->tgid;
56629
56630 + gr_copy_label(p);
56631 +
56632 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
56633 /*
56634 * Clear TID on mm_release()?
56635 @@ -1414,6 +1457,8 @@ bad_fork_cleanup_count:
56636 bad_fork_free:
56637 free_task(p);
56638 fork_out:
56639 + gr_log_forkfail(retval);
56640 +
56641 return ERR_PTR(retval);
56642 }
56643
56644 @@ -1502,6 +1547,8 @@ long do_fork(unsigned long clone_flags,
56645 if (clone_flags & CLONE_PARENT_SETTID)
56646 put_user(nr, parent_tidptr);
56647
56648 + gr_handle_brute_check();
56649 +
56650 if (clone_flags & CLONE_VFORK) {
56651 p->vfork_done = &vfork;
56652 init_completion(&vfork);
56653 @@ -1610,7 +1657,7 @@ static int unshare_fs(unsigned long unsh
56654 return 0;
56655
56656 /* don't need lock here; in the worst case we'll do useless copy */
56657 - if (fs->users == 1)
56658 + if (atomic_read(&fs->users) == 1)
56659 return 0;
56660
56661 *new_fsp = copy_fs_struct(fs);
56662 @@ -1697,7 +1744,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
56663 fs = current->fs;
56664 spin_lock(&fs->lock);
56665 current->fs = new_fs;
56666 - if (--fs->users)
56667 + gr_set_chroot_entries(current, &current->fs->root);
56668 + if (atomic_dec_return(&fs->users))
56669 new_fs = NULL;
56670 else
56671 new_fs = fs;
56672 diff -urNp linux-3.0.3/kernel/futex.c linux-3.0.3/kernel/futex.c
56673 --- linux-3.0.3/kernel/futex.c 2011-08-23 21:44:40.000000000 -0400
56674 +++ linux-3.0.3/kernel/futex.c 2011-08-23 21:48:14.000000000 -0400
56675 @@ -54,6 +54,7 @@
56676 #include <linux/mount.h>
56677 #include <linux/pagemap.h>
56678 #include <linux/syscalls.h>
56679 +#include <linux/ptrace.h>
56680 #include <linux/signal.h>
56681 #include <linux/module.h>
56682 #include <linux/magic.h>
56683 @@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fsh
56684 struct page *page, *page_head;
56685 int err, ro = 0;
56686
56687 +#ifdef CONFIG_PAX_SEGMEXEC
56688 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
56689 + return -EFAULT;
56690 +#endif
56691 +
56692 /*
56693 * The futex address must be "naturally" aligned.
56694 */
56695 @@ -1863,6 +1869,8 @@ static int futex_wait(u32 __user *uaddr,
56696 struct futex_q q = futex_q_init;
56697 int ret;
56698
56699 + pax_track_stack();
56700 +
56701 if (!bitset)
56702 return -EINVAL;
56703 q.bitset = bitset;
56704 @@ -2259,6 +2267,8 @@ static int futex_wait_requeue_pi(u32 __u
56705 struct futex_q q = futex_q_init;
56706 int res, ret;
56707
56708 + pax_track_stack();
56709 +
56710 if (!bitset)
56711 return -EINVAL;
56712
56713 @@ -2431,7 +2441,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
56714 {
56715 struct robust_list_head __user *head;
56716 unsigned long ret;
56717 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
56718 const struct cred *cred = current_cred(), *pcred;
56719 +#endif
56720
56721 if (!futex_cmpxchg_enabled)
56722 return -ENOSYS;
56723 @@ -2447,6 +2459,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
56724 if (!p)
56725 goto err_unlock;
56726 ret = -EPERM;
56727 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56728 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
56729 + goto err_unlock;
56730 +#else
56731 pcred = __task_cred(p);
56732 /* If victim is in different user_ns, then uids are not
56733 comparable, so we must have CAP_SYS_PTRACE */
56734 @@ -2461,6 +2477,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
56735 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
56736 goto err_unlock;
56737 ok:
56738 +#endif
56739 head = p->robust_list;
56740 rcu_read_unlock();
56741 }
56742 @@ -2712,6 +2729,7 @@ static int __init futex_init(void)
56743 {
56744 u32 curval;
56745 int i;
56746 + mm_segment_t oldfs;
56747
56748 /*
56749 * This will fail and we want it. Some arch implementations do
56750 @@ -2723,8 +2741,11 @@ static int __init futex_init(void)
56751 * implementation, the non-functional ones will return
56752 * -ENOSYS.
56753 */
56754 + oldfs = get_fs();
56755 + set_fs(USER_DS);
56756 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
56757 futex_cmpxchg_enabled = 1;
56758 + set_fs(oldfs);
56759
56760 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
56761 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
56762 diff -urNp linux-3.0.3/kernel/futex_compat.c linux-3.0.3/kernel/futex_compat.c
56763 --- linux-3.0.3/kernel/futex_compat.c 2011-07-21 22:17:23.000000000 -0400
56764 +++ linux-3.0.3/kernel/futex_compat.c 2011-08-23 21:48:14.000000000 -0400
56765 @@ -10,6 +10,7 @@
56766 #include <linux/compat.h>
56767 #include <linux/nsproxy.h>
56768 #include <linux/futex.h>
56769 +#include <linux/ptrace.h>
56770
56771 #include <asm/uaccess.h>
56772
56773 @@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
56774 {
56775 struct compat_robust_list_head __user *head;
56776 unsigned long ret;
56777 - const struct cred *cred = current_cred(), *pcred;
56778 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
56779 + const struct cred *cred = current_cred();
56780 + const struct cred *pcred;
56781 +#endif
56782
56783 if (!futex_cmpxchg_enabled)
56784 return -ENOSYS;
56785 @@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
56786 if (!p)
56787 goto err_unlock;
56788 ret = -EPERM;
56789 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56790 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
56791 + goto err_unlock;
56792 +#else
56793 pcred = __task_cred(p);
56794 /* If victim is in different user_ns, then uids are not
56795 comparable, so we must have CAP_SYS_PTRACE */
56796 @@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
56797 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
56798 goto err_unlock;
56799 ok:
56800 +#endif
56801 head = p->compat_robust_list;
56802 rcu_read_unlock();
56803 }
56804 diff -urNp linux-3.0.3/kernel/gcov/base.c linux-3.0.3/kernel/gcov/base.c
56805 --- linux-3.0.3/kernel/gcov/base.c 2011-07-21 22:17:23.000000000 -0400
56806 +++ linux-3.0.3/kernel/gcov/base.c 2011-08-23 21:47:56.000000000 -0400
56807 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
56808 }
56809
56810 #ifdef CONFIG_MODULES
56811 -static inline int within(void *addr, void *start, unsigned long size)
56812 -{
56813 - return ((addr >= start) && (addr < start + size));
56814 -}
56815 -
56816 /* Update list and generate events when modules are unloaded. */
56817 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
56818 void *data)
56819 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
56820 prev = NULL;
56821 /* Remove entries located in module from linked list. */
56822 for (info = gcov_info_head; info; info = info->next) {
56823 - if (within(info, mod->module_core, mod->core_size)) {
56824 + if (within_module_core_rw((unsigned long)info, mod)) {
56825 if (prev)
56826 prev->next = info->next;
56827 else
56828 diff -urNp linux-3.0.3/kernel/hrtimer.c linux-3.0.3/kernel/hrtimer.c
56829 --- linux-3.0.3/kernel/hrtimer.c 2011-07-21 22:17:23.000000000 -0400
56830 +++ linux-3.0.3/kernel/hrtimer.c 2011-08-23 21:47:56.000000000 -0400
56831 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
56832 local_irq_restore(flags);
56833 }
56834
56835 -static void run_hrtimer_softirq(struct softirq_action *h)
56836 +static void run_hrtimer_softirq(void)
56837 {
56838 hrtimer_peek_ahead_timers();
56839 }
56840 diff -urNp linux-3.0.3/kernel/jump_label.c linux-3.0.3/kernel/jump_label.c
56841 --- linux-3.0.3/kernel/jump_label.c 2011-07-21 22:17:23.000000000 -0400
56842 +++ linux-3.0.3/kernel/jump_label.c 2011-08-23 21:47:56.000000000 -0400
56843 @@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entr
56844
56845 size = (((unsigned long)stop - (unsigned long)start)
56846 / sizeof(struct jump_entry));
56847 + pax_open_kernel();
56848 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
56849 + pax_close_kernel();
56850 }
56851
56852 static void jump_label_update(struct jump_label_key *key, int enable);
56853 @@ -297,10 +299,12 @@ static void jump_label_invalidate_module
56854 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
56855 struct jump_entry *iter;
56856
56857 + pax_open_kernel();
56858 for (iter = iter_start; iter < iter_stop; iter++) {
56859 if (within_module_init(iter->code, mod))
56860 iter->code = 0;
56861 }
56862 + pax_close_kernel();
56863 }
56864
56865 static int
56866 diff -urNp linux-3.0.3/kernel/kallsyms.c linux-3.0.3/kernel/kallsyms.c
56867 --- linux-3.0.3/kernel/kallsyms.c 2011-07-21 22:17:23.000000000 -0400
56868 +++ linux-3.0.3/kernel/kallsyms.c 2011-08-23 21:48:14.000000000 -0400
56869 @@ -11,6 +11,9 @@
56870 * Changed the compression method from stem compression to "table lookup"
56871 * compression (see scripts/kallsyms.c for a more complete description)
56872 */
56873 +#ifdef CONFIG_GRKERNSEC_HIDESYM
56874 +#define __INCLUDED_BY_HIDESYM 1
56875 +#endif
56876 #include <linux/kallsyms.h>
56877 #include <linux/module.h>
56878 #include <linux/init.h>
56879 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
56880
56881 static inline int is_kernel_inittext(unsigned long addr)
56882 {
56883 + if (system_state != SYSTEM_BOOTING)
56884 + return 0;
56885 +
56886 if (addr >= (unsigned long)_sinittext
56887 && addr <= (unsigned long)_einittext)
56888 return 1;
56889 return 0;
56890 }
56891
56892 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
56893 +#ifdef CONFIG_MODULES
56894 +static inline int is_module_text(unsigned long addr)
56895 +{
56896 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
56897 + return 1;
56898 +
56899 + addr = ktla_ktva(addr);
56900 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
56901 +}
56902 +#else
56903 +static inline int is_module_text(unsigned long addr)
56904 +{
56905 + return 0;
56906 +}
56907 +#endif
56908 +#endif
56909 +
56910 static inline int is_kernel_text(unsigned long addr)
56911 {
56912 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
56913 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
56914
56915 static inline int is_kernel(unsigned long addr)
56916 {
56917 +
56918 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
56919 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
56920 + return 1;
56921 +
56922 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
56923 +#else
56924 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
56925 +#endif
56926 +
56927 return 1;
56928 return in_gate_area_no_mm(addr);
56929 }
56930
56931 static int is_ksym_addr(unsigned long addr)
56932 {
56933 +
56934 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
56935 + if (is_module_text(addr))
56936 + return 0;
56937 +#endif
56938 +
56939 if (all_var)
56940 return is_kernel(addr);
56941
56942 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
56943
56944 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
56945 {
56946 - iter->name[0] = '\0';
56947 iter->nameoff = get_symbol_offset(new_pos);
56948 iter->pos = new_pos;
56949 }
56950 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
56951 {
56952 struct kallsym_iter *iter = m->private;
56953
56954 +#ifdef CONFIG_GRKERNSEC_HIDESYM
56955 + if (current_uid())
56956 + return 0;
56957 +#endif
56958 +
56959 /* Some debugging symbols have no name. Ignore them. */
56960 if (!iter->name[0])
56961 return 0;
56962 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
56963 struct kallsym_iter *iter;
56964 int ret;
56965
56966 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
56967 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
56968 if (!iter)
56969 return -ENOMEM;
56970 reset_iter(iter, 0);
56971 diff -urNp linux-3.0.3/kernel/kmod.c linux-3.0.3/kernel/kmod.c
56972 --- linux-3.0.3/kernel/kmod.c 2011-07-21 22:17:23.000000000 -0400
56973 +++ linux-3.0.3/kernel/kmod.c 2011-08-23 21:48:14.000000000 -0400
56974 @@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
56975 * If module auto-loading support is disabled then this function
56976 * becomes a no-operation.
56977 */
56978 -int __request_module(bool wait, const char *fmt, ...)
56979 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
56980 {
56981 - va_list args;
56982 char module_name[MODULE_NAME_LEN];
56983 unsigned int max_modprobes;
56984 int ret;
56985 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
56986 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
56987 static char *envp[] = { "HOME=/",
56988 "TERM=linux",
56989 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
56990 @@ -88,9 +87,7 @@ int __request_module(bool wait, const ch
56991 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
56992 static int kmod_loop_msg;
56993
56994 - va_start(args, fmt);
56995 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
56996 - va_end(args);
56997 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
56998 if (ret >= MODULE_NAME_LEN)
56999 return -ENAMETOOLONG;
57000
57001 @@ -98,6 +95,20 @@ int __request_module(bool wait, const ch
57002 if (ret)
57003 return ret;
57004
57005 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57006 + if (!current_uid()) {
57007 + /* hack to workaround consolekit/udisks stupidity */
57008 + read_lock(&tasklist_lock);
57009 + if (!strcmp(current->comm, "mount") &&
57010 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
57011 + read_unlock(&tasklist_lock);
57012 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
57013 + return -EPERM;
57014 + }
57015 + read_unlock(&tasklist_lock);
57016 + }
57017 +#endif
57018 +
57019 /* If modprobe needs a service that is in a module, we get a recursive
57020 * loop. Limit the number of running kmod threads to max_threads/2 or
57021 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
57022 @@ -131,6 +142,47 @@ int __request_module(bool wait, const ch
57023 atomic_dec(&kmod_concurrent);
57024 return ret;
57025 }
57026 +
57027 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
57028 +{
57029 + va_list args;
57030 + int ret;
57031 +
57032 + va_start(args, fmt);
57033 + ret = ____request_module(wait, module_param, fmt, args);
57034 + va_end(args);
57035 +
57036 + return ret;
57037 +}
57038 +
57039 +int __request_module(bool wait, const char *fmt, ...)
57040 +{
57041 + va_list args;
57042 + int ret;
57043 +
57044 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57045 + if (current_uid()) {
57046 + char module_param[MODULE_NAME_LEN];
57047 +
57048 + memset(module_param, 0, sizeof(module_param));
57049 +
57050 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
57051 +
57052 + va_start(args, fmt);
57053 + ret = ____request_module(wait, module_param, fmt, args);
57054 + va_end(args);
57055 +
57056 + return ret;
57057 + }
57058 +#endif
57059 +
57060 + va_start(args, fmt);
57061 + ret = ____request_module(wait, NULL, fmt, args);
57062 + va_end(args);
57063 +
57064 + return ret;
57065 +}
57066 +
57067 EXPORT_SYMBOL(__request_module);
57068 #endif /* CONFIG_MODULES */
57069
57070 diff -urNp linux-3.0.3/kernel/kprobes.c linux-3.0.3/kernel/kprobes.c
57071 --- linux-3.0.3/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
57072 +++ linux-3.0.3/kernel/kprobes.c 2011-08-23 21:47:56.000000000 -0400
57073 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
57074 * kernel image and loaded module images reside. This is required
57075 * so x86_64 can correctly handle the %rip-relative fixups.
57076 */
57077 - kip->insns = module_alloc(PAGE_SIZE);
57078 + kip->insns = module_alloc_exec(PAGE_SIZE);
57079 if (!kip->insns) {
57080 kfree(kip);
57081 return NULL;
57082 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
57083 */
57084 if (!list_is_singular(&kip->list)) {
57085 list_del(&kip->list);
57086 - module_free(NULL, kip->insns);
57087 + module_free_exec(NULL, kip->insns);
57088 kfree(kip);
57089 }
57090 return 1;
57091 @@ -1936,7 +1936,7 @@ static int __init init_kprobes(void)
57092 {
57093 int i, err = 0;
57094 unsigned long offset = 0, size = 0;
57095 - char *modname, namebuf[128];
57096 + char *modname, namebuf[KSYM_NAME_LEN];
57097 const char *symbol_name;
57098 void *addr;
57099 struct kprobe_blackpoint *kb;
57100 @@ -2062,7 +2062,7 @@ static int __kprobes show_kprobe_addr(st
57101 const char *sym = NULL;
57102 unsigned int i = *(loff_t *) v;
57103 unsigned long offset = 0;
57104 - char *modname, namebuf[128];
57105 + char *modname, namebuf[KSYM_NAME_LEN];
57106
57107 head = &kprobe_table[i];
57108 preempt_disable();
57109 diff -urNp linux-3.0.3/kernel/lockdep.c linux-3.0.3/kernel/lockdep.c
57110 --- linux-3.0.3/kernel/lockdep.c 2011-07-21 22:17:23.000000000 -0400
57111 +++ linux-3.0.3/kernel/lockdep.c 2011-08-23 21:47:56.000000000 -0400
57112 @@ -583,6 +583,10 @@ static int static_obj(void *obj)
57113 end = (unsigned long) &_end,
57114 addr = (unsigned long) obj;
57115
57116 +#ifdef CONFIG_PAX_KERNEXEC
57117 + start = ktla_ktva(start);
57118 +#endif
57119 +
57120 /*
57121 * static variable?
57122 */
57123 @@ -718,6 +722,7 @@ register_lock_class(struct lockdep_map *
57124 if (!static_obj(lock->key)) {
57125 debug_locks_off();
57126 printk("INFO: trying to register non-static key.\n");
57127 + printk("lock:%pS key:%pS.\n", lock, lock->key);
57128 printk("the code is fine but needs lockdep annotation.\n");
57129 printk("turning off the locking correctness validator.\n");
57130 dump_stack();
57131 @@ -2936,7 +2941,7 @@ static int __lock_acquire(struct lockdep
57132 if (!class)
57133 return 0;
57134 }
57135 - atomic_inc((atomic_t *)&class->ops);
57136 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
57137 if (very_verbose(class)) {
57138 printk("\nacquire class [%p] %s", class->key, class->name);
57139 if (class->name_version > 1)
57140 diff -urNp linux-3.0.3/kernel/lockdep_proc.c linux-3.0.3/kernel/lockdep_proc.c
57141 --- linux-3.0.3/kernel/lockdep_proc.c 2011-07-21 22:17:23.000000000 -0400
57142 +++ linux-3.0.3/kernel/lockdep_proc.c 2011-08-23 21:47:56.000000000 -0400
57143 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
57144
57145 static void print_name(struct seq_file *m, struct lock_class *class)
57146 {
57147 - char str[128];
57148 + char str[KSYM_NAME_LEN];
57149 const char *name = class->name;
57150
57151 if (!name) {
57152 diff -urNp linux-3.0.3/kernel/module.c linux-3.0.3/kernel/module.c
57153 --- linux-3.0.3/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
57154 +++ linux-3.0.3/kernel/module.c 2011-08-23 21:48:14.000000000 -0400
57155 @@ -58,6 +58,7 @@
57156 #include <linux/jump_label.h>
57157 #include <linux/pfn.h>
57158 #include <linux/bsearch.h>
57159 +#include <linux/grsecurity.h>
57160
57161 #define CREATE_TRACE_POINTS
57162 #include <trace/events/module.h>
57163 @@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
57164
57165 /* Bounds of module allocation, for speeding __module_address.
57166 * Protected by module_mutex. */
57167 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
57168 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
57169 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
57170
57171 int register_module_notifier(struct notifier_block * nb)
57172 {
57173 @@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(cons
57174 return true;
57175
57176 list_for_each_entry_rcu(mod, &modules, list) {
57177 - struct symsearch arr[] = {
57178 + struct symsearch modarr[] = {
57179 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
57180 NOT_GPL_ONLY, false },
57181 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
57182 @@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(cons
57183 #endif
57184 };
57185
57186 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
57187 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
57188 return true;
57189 }
57190 return false;
57191 @@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(
57192 static int percpu_modalloc(struct module *mod,
57193 unsigned long size, unsigned long align)
57194 {
57195 - if (align > PAGE_SIZE) {
57196 + if (align-1 >= PAGE_SIZE) {
57197 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
57198 mod->name, align, PAGE_SIZE);
57199 align = PAGE_SIZE;
57200 @@ -1166,7 +1168,7 @@ resolve_symbol_wait(struct module *mod,
57201 */
57202 #ifdef CONFIG_SYSFS
57203
57204 -#ifdef CONFIG_KALLSYMS
57205 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57206 static inline bool sect_empty(const Elf_Shdr *sect)
57207 {
57208 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
57209 @@ -1632,21 +1634,21 @@ static void set_section_ro_nx(void *base
57210
57211 static void unset_module_core_ro_nx(struct module *mod)
57212 {
57213 - set_page_attributes(mod->module_core + mod->core_text_size,
57214 - mod->module_core + mod->core_size,
57215 + set_page_attributes(mod->module_core_rw,
57216 + mod->module_core_rw + mod->core_size_rw,
57217 set_memory_x);
57218 - set_page_attributes(mod->module_core,
57219 - mod->module_core + mod->core_ro_size,
57220 + set_page_attributes(mod->module_core_rx,
57221 + mod->module_core_rx + mod->core_size_rx,
57222 set_memory_rw);
57223 }
57224
57225 static void unset_module_init_ro_nx(struct module *mod)
57226 {
57227 - set_page_attributes(mod->module_init + mod->init_text_size,
57228 - mod->module_init + mod->init_size,
57229 + set_page_attributes(mod->module_init_rw,
57230 + mod->module_init_rw + mod->init_size_rw,
57231 set_memory_x);
57232 - set_page_attributes(mod->module_init,
57233 - mod->module_init + mod->init_ro_size,
57234 + set_page_attributes(mod->module_init_rx,
57235 + mod->module_init_rx + mod->init_size_rx,
57236 set_memory_rw);
57237 }
57238
57239 @@ -1657,14 +1659,14 @@ void set_all_modules_text_rw(void)
57240
57241 mutex_lock(&module_mutex);
57242 list_for_each_entry_rcu(mod, &modules, list) {
57243 - if ((mod->module_core) && (mod->core_text_size)) {
57244 - set_page_attributes(mod->module_core,
57245 - mod->module_core + mod->core_text_size,
57246 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
57247 + set_page_attributes(mod->module_core_rx,
57248 + mod->module_core_rx + mod->core_size_rx,
57249 set_memory_rw);
57250 }
57251 - if ((mod->module_init) && (mod->init_text_size)) {
57252 - set_page_attributes(mod->module_init,
57253 - mod->module_init + mod->init_text_size,
57254 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
57255 + set_page_attributes(mod->module_init_rx,
57256 + mod->module_init_rx + mod->init_size_rx,
57257 set_memory_rw);
57258 }
57259 }
57260 @@ -1678,14 +1680,14 @@ void set_all_modules_text_ro(void)
57261
57262 mutex_lock(&module_mutex);
57263 list_for_each_entry_rcu(mod, &modules, list) {
57264 - if ((mod->module_core) && (mod->core_text_size)) {
57265 - set_page_attributes(mod->module_core,
57266 - mod->module_core + mod->core_text_size,
57267 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
57268 + set_page_attributes(mod->module_core_rx,
57269 + mod->module_core_rx + mod->core_size_rx,
57270 set_memory_ro);
57271 }
57272 - if ((mod->module_init) && (mod->init_text_size)) {
57273 - set_page_attributes(mod->module_init,
57274 - mod->module_init + mod->init_text_size,
57275 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
57276 + set_page_attributes(mod->module_init_rx,
57277 + mod->module_init_rx + mod->init_size_rx,
57278 set_memory_ro);
57279 }
57280 }
57281 @@ -1722,16 +1724,19 @@ static void free_module(struct module *m
57282
57283 /* This may be NULL, but that's OK */
57284 unset_module_init_ro_nx(mod);
57285 - module_free(mod, mod->module_init);
57286 + module_free(mod, mod->module_init_rw);
57287 + module_free_exec(mod, mod->module_init_rx);
57288 kfree(mod->args);
57289 percpu_modfree(mod);
57290
57291 /* Free lock-classes: */
57292 - lockdep_free_key_range(mod->module_core, mod->core_size);
57293 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
57294 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
57295
57296 /* Finally, free the core (containing the module structure) */
57297 unset_module_core_ro_nx(mod);
57298 - module_free(mod, mod->module_core);
57299 + module_free_exec(mod, mod->module_core_rx);
57300 + module_free(mod, mod->module_core_rw);
57301
57302 #ifdef CONFIG_MPU
57303 update_protections(current->mm);
57304 @@ -1800,10 +1805,31 @@ static int simplify_symbols(struct modul
57305 unsigned int i;
57306 int ret = 0;
57307 const struct kernel_symbol *ksym;
57308 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57309 + int is_fs_load = 0;
57310 + int register_filesystem_found = 0;
57311 + char *p;
57312 +
57313 + p = strstr(mod->args, "grsec_modharden_fs");
57314 + if (p) {
57315 + char *endptr = p + strlen("grsec_modharden_fs");
57316 + /* copy \0 as well */
57317 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
57318 + is_fs_load = 1;
57319 + }
57320 +#endif
57321
57322 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
57323 const char *name = info->strtab + sym[i].st_name;
57324
57325 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57326 + /* it's a real shame this will never get ripped and copied
57327 + upstream! ;(
57328 + */
57329 + if (is_fs_load && !strcmp(name, "register_filesystem"))
57330 + register_filesystem_found = 1;
57331 +#endif
57332 +
57333 switch (sym[i].st_shndx) {
57334 case SHN_COMMON:
57335 /* We compiled with -fno-common. These are not
57336 @@ -1824,7 +1850,9 @@ static int simplify_symbols(struct modul
57337 ksym = resolve_symbol_wait(mod, info, name);
57338 /* Ok if resolved. */
57339 if (ksym && !IS_ERR(ksym)) {
57340 + pax_open_kernel();
57341 sym[i].st_value = ksym->value;
57342 + pax_close_kernel();
57343 break;
57344 }
57345
57346 @@ -1843,11 +1871,20 @@ static int simplify_symbols(struct modul
57347 secbase = (unsigned long)mod_percpu(mod);
57348 else
57349 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
57350 + pax_open_kernel();
57351 sym[i].st_value += secbase;
57352 + pax_close_kernel();
57353 break;
57354 }
57355 }
57356
57357 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57358 + if (is_fs_load && !register_filesystem_found) {
57359 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
57360 + ret = -EPERM;
57361 + }
57362 +#endif
57363 +
57364 return ret;
57365 }
57366
57367 @@ -1931,22 +1968,12 @@ static void layout_sections(struct modul
57368 || s->sh_entsize != ~0UL
57369 || strstarts(sname, ".init"))
57370 continue;
57371 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
57372 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
57373 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
57374 + else
57375 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
57376 DEBUGP("\t%s\n", name);
57377 }
57378 - switch (m) {
57379 - case 0: /* executable */
57380 - mod->core_size = debug_align(mod->core_size);
57381 - mod->core_text_size = mod->core_size;
57382 - break;
57383 - case 1: /* RO: text and ro-data */
57384 - mod->core_size = debug_align(mod->core_size);
57385 - mod->core_ro_size = mod->core_size;
57386 - break;
57387 - case 3: /* whole core */
57388 - mod->core_size = debug_align(mod->core_size);
57389 - break;
57390 - }
57391 }
57392
57393 DEBUGP("Init section allocation order:\n");
57394 @@ -1960,23 +1987,13 @@ static void layout_sections(struct modul
57395 || s->sh_entsize != ~0UL
57396 || !strstarts(sname, ".init"))
57397 continue;
57398 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
57399 - | INIT_OFFSET_MASK);
57400 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
57401 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
57402 + else
57403 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
57404 + s->sh_entsize |= INIT_OFFSET_MASK;
57405 DEBUGP("\t%s\n", sname);
57406 }
57407 - switch (m) {
57408 - case 0: /* executable */
57409 - mod->init_size = debug_align(mod->init_size);
57410 - mod->init_text_size = mod->init_size;
57411 - break;
57412 - case 1: /* RO: text and ro-data */
57413 - mod->init_size = debug_align(mod->init_size);
57414 - mod->init_ro_size = mod->init_size;
57415 - break;
57416 - case 3: /* whole init */
57417 - mod->init_size = debug_align(mod->init_size);
57418 - break;
57419 - }
57420 }
57421 }
57422
57423 @@ -2141,7 +2158,7 @@ static void layout_symtab(struct module
57424
57425 /* Put symbol section at end of init part of module. */
57426 symsect->sh_flags |= SHF_ALLOC;
57427 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
57428 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
57429 info->index.sym) | INIT_OFFSET_MASK;
57430 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
57431
57432 @@ -2158,19 +2175,19 @@ static void layout_symtab(struct module
57433 }
57434
57435 /* Append room for core symbols at end of core part. */
57436 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
57437 - mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
57438 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
57439 + mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
57440
57441 /* Put string table section at end of init part of module. */
57442 strsect->sh_flags |= SHF_ALLOC;
57443 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
57444 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
57445 info->index.str) | INIT_OFFSET_MASK;
57446 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
57447
57448 /* Append room for core symbols' strings at end of core part. */
57449 - info->stroffs = mod->core_size;
57450 + info->stroffs = mod->core_size_rx;
57451 __set_bit(0, info->strmap);
57452 - mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
57453 + mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
57454 }
57455
57456 static void add_kallsyms(struct module *mod, const struct load_info *info)
57457 @@ -2186,11 +2203,13 @@ static void add_kallsyms(struct module *
57458 /* Make sure we get permanent strtab: don't use info->strtab. */
57459 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
57460
57461 + pax_open_kernel();
57462 +
57463 /* Set types up while we still have access to sections. */
57464 for (i = 0; i < mod->num_symtab; i++)
57465 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
57466
57467 - mod->core_symtab = dst = mod->module_core + info->symoffs;
57468 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
57469 src = mod->symtab;
57470 *dst = *src;
57471 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
57472 @@ -2203,10 +2222,12 @@ static void add_kallsyms(struct module *
57473 }
57474 mod->core_num_syms = ndst;
57475
57476 - mod->core_strtab = s = mod->module_core + info->stroffs;
57477 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
57478 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
57479 if (test_bit(i, info->strmap))
57480 *++s = mod->strtab[i];
57481 +
57482 + pax_close_kernel();
57483 }
57484 #else
57485 static inline void layout_symtab(struct module *mod, struct load_info *info)
57486 @@ -2235,17 +2256,33 @@ static void dynamic_debug_remove(struct
57487 ddebug_remove_module(debug->modname);
57488 }
57489
57490 -static void *module_alloc_update_bounds(unsigned long size)
57491 +static void *module_alloc_update_bounds_rw(unsigned long size)
57492 {
57493 void *ret = module_alloc(size);
57494
57495 if (ret) {
57496 mutex_lock(&module_mutex);
57497 /* Update module bounds. */
57498 - if ((unsigned long)ret < module_addr_min)
57499 - module_addr_min = (unsigned long)ret;
57500 - if ((unsigned long)ret + size > module_addr_max)
57501 - module_addr_max = (unsigned long)ret + size;
57502 + if ((unsigned long)ret < module_addr_min_rw)
57503 + module_addr_min_rw = (unsigned long)ret;
57504 + if ((unsigned long)ret + size > module_addr_max_rw)
57505 + module_addr_max_rw = (unsigned long)ret + size;
57506 + mutex_unlock(&module_mutex);
57507 + }
57508 + return ret;
57509 +}
57510 +
57511 +static void *module_alloc_update_bounds_rx(unsigned long size)
57512 +{
57513 + void *ret = module_alloc_exec(size);
57514 +
57515 + if (ret) {
57516 + mutex_lock(&module_mutex);
57517 + /* Update module bounds. */
57518 + if ((unsigned long)ret < module_addr_min_rx)
57519 + module_addr_min_rx = (unsigned long)ret;
57520 + if ((unsigned long)ret + size > module_addr_max_rx)
57521 + module_addr_max_rx = (unsigned long)ret + size;
57522 mutex_unlock(&module_mutex);
57523 }
57524 return ret;
57525 @@ -2538,7 +2575,7 @@ static int move_module(struct module *mo
57526 void *ptr;
57527
57528 /* Do the allocs. */
57529 - ptr = module_alloc_update_bounds(mod->core_size);
57530 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
57531 /*
57532 * The pointer to this block is stored in the module structure
57533 * which is inside the block. Just mark it as not being a
57534 @@ -2548,23 +2585,50 @@ static int move_module(struct module *mo
57535 if (!ptr)
57536 return -ENOMEM;
57537
57538 - memset(ptr, 0, mod->core_size);
57539 - mod->module_core = ptr;
57540 + memset(ptr, 0, mod->core_size_rw);
57541 + mod->module_core_rw = ptr;
57542
57543 - ptr = module_alloc_update_bounds(mod->init_size);
57544 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
57545 /*
57546 * The pointer to this block is stored in the module structure
57547 * which is inside the block. This block doesn't need to be
57548 * scanned as it contains data and code that will be freed
57549 * after the module is initialized.
57550 */
57551 - kmemleak_ignore(ptr);
57552 - if (!ptr && mod->init_size) {
57553 - module_free(mod, mod->module_core);
57554 + kmemleak_not_leak(ptr);
57555 + if (!ptr && mod->init_size_rw) {
57556 + module_free(mod, mod->module_core_rw);
57557 return -ENOMEM;
57558 }
57559 - memset(ptr, 0, mod->init_size);
57560 - mod->module_init = ptr;
57561 + memset(ptr, 0, mod->init_size_rw);
57562 + mod->module_init_rw = ptr;
57563 +
57564 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
57565 + kmemleak_not_leak(ptr);
57566 + if (!ptr) {
57567 + module_free(mod, mod->module_init_rw);
57568 + module_free(mod, mod->module_core_rw);
57569 + return -ENOMEM;
57570 + }
57571 +
57572 + pax_open_kernel();
57573 + memset(ptr, 0, mod->core_size_rx);
57574 + pax_close_kernel();
57575 + mod->module_core_rx = ptr;
57576 +
57577 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
57578 + kmemleak_not_leak(ptr);
57579 + if (!ptr && mod->init_size_rx) {
57580 + module_free_exec(mod, mod->module_core_rx);
57581 + module_free(mod, mod->module_init_rw);
57582 + module_free(mod, mod->module_core_rw);
57583 + return -ENOMEM;
57584 + }
57585 +
57586 + pax_open_kernel();
57587 + memset(ptr, 0, mod->init_size_rx);
57588 + pax_close_kernel();
57589 + mod->module_init_rx = ptr;
57590
57591 /* Transfer each section which specifies SHF_ALLOC */
57592 DEBUGP("final section addresses:\n");
57593 @@ -2575,16 +2639,45 @@ static int move_module(struct module *mo
57594 if (!(shdr->sh_flags & SHF_ALLOC))
57595 continue;
57596
57597 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
57598 - dest = mod->module_init
57599 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
57600 - else
57601 - dest = mod->module_core + shdr->sh_entsize;
57602 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
57603 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
57604 + dest = mod->module_init_rw
57605 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
57606 + else
57607 + dest = mod->module_init_rx
57608 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
57609 + } else {
57610 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
57611 + dest = mod->module_core_rw + shdr->sh_entsize;
57612 + else
57613 + dest = mod->module_core_rx + shdr->sh_entsize;
57614 + }
57615 +
57616 + if (shdr->sh_type != SHT_NOBITS) {
57617 +
57618 +#ifdef CONFIG_PAX_KERNEXEC
57619 +#ifdef CONFIG_X86_64
57620 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
57621 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
57622 +#endif
57623 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
57624 + pax_open_kernel();
57625 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
57626 + pax_close_kernel();
57627 + } else
57628 +#endif
57629
57630 - if (shdr->sh_type != SHT_NOBITS)
57631 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
57632 + }
57633 /* Update sh_addr to point to copy in image. */
57634 - shdr->sh_addr = (unsigned long)dest;
57635 +
57636 +#ifdef CONFIG_PAX_KERNEXEC
57637 + if (shdr->sh_flags & SHF_EXECINSTR)
57638 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
57639 + else
57640 +#endif
57641 +
57642 + shdr->sh_addr = (unsigned long)dest;
57643 DEBUGP("\t0x%lx %s\n",
57644 shdr->sh_addr, info->secstrings + shdr->sh_name);
57645 }
57646 @@ -2635,12 +2728,12 @@ static void flush_module_icache(const st
57647 * Do it before processing of module parameters, so the module
57648 * can provide parameter accessor functions of its own.
57649 */
57650 - if (mod->module_init)
57651 - flush_icache_range((unsigned long)mod->module_init,
57652 - (unsigned long)mod->module_init
57653 - + mod->init_size);
57654 - flush_icache_range((unsigned long)mod->module_core,
57655 - (unsigned long)mod->module_core + mod->core_size);
57656 + if (mod->module_init_rx)
57657 + flush_icache_range((unsigned long)mod->module_init_rx,
57658 + (unsigned long)mod->module_init_rx
57659 + + mod->init_size_rx);
57660 + flush_icache_range((unsigned long)mod->module_core_rx,
57661 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
57662
57663 set_fs(old_fs);
57664 }
57665 @@ -2712,8 +2805,10 @@ static void module_deallocate(struct mod
57666 {
57667 kfree(info->strmap);
57668 percpu_modfree(mod);
57669 - module_free(mod, mod->module_init);
57670 - module_free(mod, mod->module_core);
57671 + module_free_exec(mod, mod->module_init_rx);
57672 + module_free_exec(mod, mod->module_core_rx);
57673 + module_free(mod, mod->module_init_rw);
57674 + module_free(mod, mod->module_core_rw);
57675 }
57676
57677 static int post_relocation(struct module *mod, const struct load_info *info)
57678 @@ -2770,9 +2865,38 @@ static struct module *load_module(void _
57679 if (err)
57680 goto free_unload;
57681
57682 + /* Now copy in args */
57683 + mod->args = strndup_user(uargs, ~0UL >> 1);
57684 + if (IS_ERR(mod->args)) {
57685 + err = PTR_ERR(mod->args);
57686 + goto free_unload;
57687 + }
57688 +
57689 /* Set up MODINFO_ATTR fields */
57690 setup_modinfo(mod, &info);
57691
57692 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57693 + {
57694 + char *p, *p2;
57695 +
57696 + if (strstr(mod->args, "grsec_modharden_netdev")) {
57697 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
57698 + err = -EPERM;
57699 + goto free_modinfo;
57700 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
57701 + p += strlen("grsec_modharden_normal");
57702 + p2 = strstr(p, "_");
57703 + if (p2) {
57704 + *p2 = '\0';
57705 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
57706 + *p2 = '_';
57707 + }
57708 + err = -EPERM;
57709 + goto free_modinfo;
57710 + }
57711 + }
57712 +#endif
57713 +
57714 /* Fix up syms, so that st_value is a pointer to location. */
57715 err = simplify_symbols(mod, &info);
57716 if (err < 0)
57717 @@ -2788,13 +2912,6 @@ static struct module *load_module(void _
57718
57719 flush_module_icache(mod);
57720
57721 - /* Now copy in args */
57722 - mod->args = strndup_user(uargs, ~0UL >> 1);
57723 - if (IS_ERR(mod->args)) {
57724 - err = PTR_ERR(mod->args);
57725 - goto free_arch_cleanup;
57726 - }
57727 -
57728 /* Mark state as coming so strong_try_module_get() ignores us. */
57729 mod->state = MODULE_STATE_COMING;
57730
57731 @@ -2854,11 +2971,10 @@ static struct module *load_module(void _
57732 unlock:
57733 mutex_unlock(&module_mutex);
57734 synchronize_sched();
57735 - kfree(mod->args);
57736 - free_arch_cleanup:
57737 module_arch_cleanup(mod);
57738 free_modinfo:
57739 free_modinfo(mod);
57740 + kfree(mod->args);
57741 free_unload:
57742 module_unload_free(mod);
57743 free_module:
57744 @@ -2899,16 +3015,16 @@ SYSCALL_DEFINE3(init_module, void __user
57745 MODULE_STATE_COMING, mod);
57746
57747 /* Set RO and NX regions for core */
57748 - set_section_ro_nx(mod->module_core,
57749 - mod->core_text_size,
57750 - mod->core_ro_size,
57751 - mod->core_size);
57752 + set_section_ro_nx(mod->module_core_rx,
57753 + mod->core_size_rx,
57754 + mod->core_size_rx,
57755 + mod->core_size_rx);
57756
57757 /* Set RO and NX regions for init */
57758 - set_section_ro_nx(mod->module_init,
57759 - mod->init_text_size,
57760 - mod->init_ro_size,
57761 - mod->init_size);
57762 + set_section_ro_nx(mod->module_init_rx,
57763 + mod->init_size_rx,
57764 + mod->init_size_rx,
57765 + mod->init_size_rx);
57766
57767 do_mod_ctors(mod);
57768 /* Start the module */
57769 @@ -2954,11 +3070,12 @@ SYSCALL_DEFINE3(init_module, void __user
57770 mod->strtab = mod->core_strtab;
57771 #endif
57772 unset_module_init_ro_nx(mod);
57773 - module_free(mod, mod->module_init);
57774 - mod->module_init = NULL;
57775 - mod->init_size = 0;
57776 - mod->init_ro_size = 0;
57777 - mod->init_text_size = 0;
57778 + module_free(mod, mod->module_init_rw);
57779 + module_free_exec(mod, mod->module_init_rx);
57780 + mod->module_init_rw = NULL;
57781 + mod->module_init_rx = NULL;
57782 + mod->init_size_rw = 0;
57783 + mod->init_size_rx = 0;
57784 mutex_unlock(&module_mutex);
57785
57786 return 0;
57787 @@ -2989,10 +3106,16 @@ static const char *get_ksymbol(struct mo
57788 unsigned long nextval;
57789
57790 /* At worse, next value is at end of module */
57791 - if (within_module_init(addr, mod))
57792 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
57793 + if (within_module_init_rx(addr, mod))
57794 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
57795 + else if (within_module_init_rw(addr, mod))
57796 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
57797 + else if (within_module_core_rx(addr, mod))
57798 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
57799 + else if (within_module_core_rw(addr, mod))
57800 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
57801 else
57802 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
57803 + return NULL;
57804
57805 /* Scan for closest preceding symbol, and next symbol. (ELF
57806 starts real symbols at 1). */
57807 @@ -3238,7 +3361,7 @@ static int m_show(struct seq_file *m, vo
57808 char buf[8];
57809
57810 seq_printf(m, "%s %u",
57811 - mod->name, mod->init_size + mod->core_size);
57812 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
57813 print_unload_info(m, mod);
57814
57815 /* Informative for users. */
57816 @@ -3247,7 +3370,7 @@ static int m_show(struct seq_file *m, vo
57817 mod->state == MODULE_STATE_COMING ? "Loading":
57818 "Live");
57819 /* Used by oprofile and other similar tools. */
57820 - seq_printf(m, " 0x%pK", mod->module_core);
57821 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
57822
57823 /* Taints info */
57824 if (mod->taints)
57825 @@ -3283,7 +3406,17 @@ static const struct file_operations proc
57826
57827 static int __init proc_modules_init(void)
57828 {
57829 +#ifndef CONFIG_GRKERNSEC_HIDESYM
57830 +#ifdef CONFIG_GRKERNSEC_PROC_USER
57831 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
57832 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
57833 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
57834 +#else
57835 proc_create("modules", 0, NULL, &proc_modules_operations);
57836 +#endif
57837 +#else
57838 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
57839 +#endif
57840 return 0;
57841 }
57842 module_init(proc_modules_init);
57843 @@ -3342,12 +3475,12 @@ struct module *__module_address(unsigned
57844 {
57845 struct module *mod;
57846
57847 - if (addr < module_addr_min || addr > module_addr_max)
57848 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
57849 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
57850 return NULL;
57851
57852 list_for_each_entry_rcu(mod, &modules, list)
57853 - if (within_module_core(addr, mod)
57854 - || within_module_init(addr, mod))
57855 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
57856 return mod;
57857 return NULL;
57858 }
57859 @@ -3381,11 +3514,20 @@ bool is_module_text_address(unsigned lon
57860 */
57861 struct module *__module_text_address(unsigned long addr)
57862 {
57863 - struct module *mod = __module_address(addr);
57864 + struct module *mod;
57865 +
57866 +#ifdef CONFIG_X86_32
57867 + addr = ktla_ktva(addr);
57868 +#endif
57869 +
57870 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
57871 + return NULL;
57872 +
57873 + mod = __module_address(addr);
57874 +
57875 if (mod) {
57876 /* Make sure it's within the text section. */
57877 - if (!within(addr, mod->module_init, mod->init_text_size)
57878 - && !within(addr, mod->module_core, mod->core_text_size))
57879 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
57880 mod = NULL;
57881 }
57882 return mod;
57883 diff -urNp linux-3.0.3/kernel/mutex.c linux-3.0.3/kernel/mutex.c
57884 --- linux-3.0.3/kernel/mutex.c 2011-07-21 22:17:23.000000000 -0400
57885 +++ linux-3.0.3/kernel/mutex.c 2011-08-23 21:47:56.000000000 -0400
57886 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock,
57887 spin_lock_mutex(&lock->wait_lock, flags);
57888
57889 debug_mutex_lock_common(lock, &waiter);
57890 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
57891 + debug_mutex_add_waiter(lock, &waiter, task);
57892
57893 /* add waiting tasks to the end of the waitqueue (FIFO): */
57894 list_add_tail(&waiter.list, &lock->wait_list);
57895 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock,
57896 * TASK_UNINTERRUPTIBLE case.)
57897 */
57898 if (unlikely(signal_pending_state(state, task))) {
57899 - mutex_remove_waiter(lock, &waiter,
57900 - task_thread_info(task));
57901 + mutex_remove_waiter(lock, &waiter, task);
57902 mutex_release(&lock->dep_map, 1, ip);
57903 spin_unlock_mutex(&lock->wait_lock, flags);
57904
57905 @@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock,
57906 done:
57907 lock_acquired(&lock->dep_map, ip);
57908 /* got the lock - rejoice! */
57909 - mutex_remove_waiter(lock, &waiter, current_thread_info());
57910 + mutex_remove_waiter(lock, &waiter, task);
57911 mutex_set_owner(lock);
57912
57913 /* set it to 0 if there are no waiters left: */
57914 diff -urNp linux-3.0.3/kernel/mutex-debug.c linux-3.0.3/kernel/mutex-debug.c
57915 --- linux-3.0.3/kernel/mutex-debug.c 2011-07-21 22:17:23.000000000 -0400
57916 +++ linux-3.0.3/kernel/mutex-debug.c 2011-08-23 21:47:56.000000000 -0400
57917 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
57918 }
57919
57920 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
57921 - struct thread_info *ti)
57922 + struct task_struct *task)
57923 {
57924 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
57925
57926 /* Mark the current thread as blocked on the lock: */
57927 - ti->task->blocked_on = waiter;
57928 + task->blocked_on = waiter;
57929 }
57930
57931 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
57932 - struct thread_info *ti)
57933 + struct task_struct *task)
57934 {
57935 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
57936 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
57937 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
57938 - ti->task->blocked_on = NULL;
57939 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
57940 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
57941 + task->blocked_on = NULL;
57942
57943 list_del_init(&waiter->list);
57944 waiter->task = NULL;
57945 diff -urNp linux-3.0.3/kernel/mutex-debug.h linux-3.0.3/kernel/mutex-debug.h
57946 --- linux-3.0.3/kernel/mutex-debug.h 2011-07-21 22:17:23.000000000 -0400
57947 +++ linux-3.0.3/kernel/mutex-debug.h 2011-08-23 21:47:56.000000000 -0400
57948 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(stru
57949 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
57950 extern void debug_mutex_add_waiter(struct mutex *lock,
57951 struct mutex_waiter *waiter,
57952 - struct thread_info *ti);
57953 + struct task_struct *task);
57954 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
57955 - struct thread_info *ti);
57956 + struct task_struct *task);
57957 extern void debug_mutex_unlock(struct mutex *lock);
57958 extern void debug_mutex_init(struct mutex *lock, const char *name,
57959 struct lock_class_key *key);
57960 diff -urNp linux-3.0.3/kernel/padata.c linux-3.0.3/kernel/padata.c
57961 --- linux-3.0.3/kernel/padata.c 2011-07-21 22:17:23.000000000 -0400
57962 +++ linux-3.0.3/kernel/padata.c 2011-08-23 21:47:56.000000000 -0400
57963 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
57964 padata->pd = pd;
57965 padata->cb_cpu = cb_cpu;
57966
57967 - if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
57968 - atomic_set(&pd->seq_nr, -1);
57969 + if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
57970 + atomic_set_unchecked(&pd->seq_nr, -1);
57971
57972 - padata->seq_nr = atomic_inc_return(&pd->seq_nr);
57973 + padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
57974
57975 target_cpu = padata_cpu_hash(padata);
57976 queue = per_cpu_ptr(pd->pqueue, target_cpu);
57977 @@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
57978 padata_init_pqueues(pd);
57979 padata_init_squeues(pd);
57980 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
57981 - atomic_set(&pd->seq_nr, -1);
57982 + atomic_set_unchecked(&pd->seq_nr, -1);
57983 atomic_set(&pd->reorder_objects, 0);
57984 atomic_set(&pd->refcnt, 0);
57985 pd->pinst = pinst;
57986 diff -urNp linux-3.0.3/kernel/panic.c linux-3.0.3/kernel/panic.c
57987 --- linux-3.0.3/kernel/panic.c 2011-07-21 22:17:23.000000000 -0400
57988 +++ linux-3.0.3/kernel/panic.c 2011-08-23 21:48:14.000000000 -0400
57989 @@ -369,7 +369,7 @@ static void warn_slowpath_common(const c
57990 const char *board;
57991
57992 printk(KERN_WARNING "------------[ cut here ]------------\n");
57993 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
57994 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
57995 board = dmi_get_system_info(DMI_PRODUCT_NAME);
57996 if (board)
57997 printk(KERN_WARNING "Hardware name: %s\n", board);
57998 @@ -424,7 +424,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
57999 */
58000 void __stack_chk_fail(void)
58001 {
58002 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
58003 + dump_stack();
58004 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
58005 __builtin_return_address(0));
58006 }
58007 EXPORT_SYMBOL(__stack_chk_fail);
58008 diff -urNp linux-3.0.3/kernel/pid.c linux-3.0.3/kernel/pid.c
58009 --- linux-3.0.3/kernel/pid.c 2011-07-21 22:17:23.000000000 -0400
58010 +++ linux-3.0.3/kernel/pid.c 2011-08-23 21:48:14.000000000 -0400
58011 @@ -33,6 +33,7 @@
58012 #include <linux/rculist.h>
58013 #include <linux/bootmem.h>
58014 #include <linux/hash.h>
58015 +#include <linux/security.h>
58016 #include <linux/pid_namespace.h>
58017 #include <linux/init_task.h>
58018 #include <linux/syscalls.h>
58019 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
58020
58021 int pid_max = PID_MAX_DEFAULT;
58022
58023 -#define RESERVED_PIDS 300
58024 +#define RESERVED_PIDS 500
58025
58026 int pid_max_min = RESERVED_PIDS + 1;
58027 int pid_max_max = PID_MAX_LIMIT;
58028 @@ -419,8 +420,15 @@ EXPORT_SYMBOL(pid_task);
58029 */
58030 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
58031 {
58032 + struct task_struct *task;
58033 +
58034 rcu_lockdep_assert(rcu_read_lock_held());
58035 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
58036 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
58037 +
58038 + if (gr_pid_is_chrooted(task))
58039 + return NULL;
58040 +
58041 + return task;
58042 }
58043
58044 struct task_struct *find_task_by_vpid(pid_t vnr)
58045 @@ -428,6 +436,12 @@ struct task_struct *find_task_by_vpid(pi
58046 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
58047 }
58048
58049 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
58050 +{
58051 + rcu_lockdep_assert(rcu_read_lock_held());
58052 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
58053 +}
58054 +
58055 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
58056 {
58057 struct pid *pid;
58058 diff -urNp linux-3.0.3/kernel/posix-cpu-timers.c linux-3.0.3/kernel/posix-cpu-timers.c
58059 --- linux-3.0.3/kernel/posix-cpu-timers.c 2011-07-21 22:17:23.000000000 -0400
58060 +++ linux-3.0.3/kernel/posix-cpu-timers.c 2011-08-23 21:48:14.000000000 -0400
58061 @@ -6,6 +6,7 @@
58062 #include <linux/posix-timers.h>
58063 #include <linux/errno.h>
58064 #include <linux/math64.h>
58065 +#include <linux/security.h>
58066 #include <asm/uaccess.h>
58067 #include <linux/kernel_stat.h>
58068 #include <trace/events/timer.h>
58069 @@ -1604,14 +1605,14 @@ struct k_clock clock_posix_cpu = {
58070
58071 static __init int init_posix_cpu_timers(void)
58072 {
58073 - struct k_clock process = {
58074 + static struct k_clock process = {
58075 .clock_getres = process_cpu_clock_getres,
58076 .clock_get = process_cpu_clock_get,
58077 .timer_create = process_cpu_timer_create,
58078 .nsleep = process_cpu_nsleep,
58079 .nsleep_restart = process_cpu_nsleep_restart,
58080 };
58081 - struct k_clock thread = {
58082 + static struct k_clock thread = {
58083 .clock_getres = thread_cpu_clock_getres,
58084 .clock_get = thread_cpu_clock_get,
58085 .timer_create = thread_cpu_timer_create,
58086 diff -urNp linux-3.0.3/kernel/posix-timers.c linux-3.0.3/kernel/posix-timers.c
58087 --- linux-3.0.3/kernel/posix-timers.c 2011-07-21 22:17:23.000000000 -0400
58088 +++ linux-3.0.3/kernel/posix-timers.c 2011-08-23 21:48:14.000000000 -0400
58089 @@ -43,6 +43,7 @@
58090 #include <linux/idr.h>
58091 #include <linux/posix-clock.h>
58092 #include <linux/posix-timers.h>
58093 +#include <linux/grsecurity.h>
58094 #include <linux/syscalls.h>
58095 #include <linux/wait.h>
58096 #include <linux/workqueue.h>
58097 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
58098 * which we beg off on and pass to do_sys_settimeofday().
58099 */
58100
58101 -static struct k_clock posix_clocks[MAX_CLOCKS];
58102 +static struct k_clock *posix_clocks[MAX_CLOCKS];
58103
58104 /*
58105 * These ones are defined below.
58106 @@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
58107 */
58108 static __init int init_posix_timers(void)
58109 {
58110 - struct k_clock clock_realtime = {
58111 + static struct k_clock clock_realtime = {
58112 .clock_getres = hrtimer_get_res,
58113 .clock_get = posix_clock_realtime_get,
58114 .clock_set = posix_clock_realtime_set,
58115 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void
58116 .timer_get = common_timer_get,
58117 .timer_del = common_timer_del,
58118 };
58119 - struct k_clock clock_monotonic = {
58120 + static struct k_clock clock_monotonic = {
58121 .clock_getres = hrtimer_get_res,
58122 .clock_get = posix_ktime_get_ts,
58123 .nsleep = common_nsleep,
58124 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void
58125 .timer_get = common_timer_get,
58126 .timer_del = common_timer_del,
58127 };
58128 - struct k_clock clock_monotonic_raw = {
58129 + static struct k_clock clock_monotonic_raw = {
58130 .clock_getres = hrtimer_get_res,
58131 .clock_get = posix_get_monotonic_raw,
58132 };
58133 - struct k_clock clock_realtime_coarse = {
58134 + static struct k_clock clock_realtime_coarse = {
58135 .clock_getres = posix_get_coarse_res,
58136 .clock_get = posix_get_realtime_coarse,
58137 };
58138 - struct k_clock clock_monotonic_coarse = {
58139 + static struct k_clock clock_monotonic_coarse = {
58140 .clock_getres = posix_get_coarse_res,
58141 .clock_get = posix_get_monotonic_coarse,
58142 };
58143 - struct k_clock clock_boottime = {
58144 + static struct k_clock clock_boottime = {
58145 .clock_getres = hrtimer_get_res,
58146 .clock_get = posix_get_boottime,
58147 .nsleep = common_nsleep,
58148 @@ -272,6 +273,8 @@ static __init int init_posix_timers(void
58149 .timer_del = common_timer_del,
58150 };
58151
58152 + pax_track_stack();
58153 +
58154 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
58155 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
58156 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
58157 @@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
58158 return;
58159 }
58160
58161 - posix_clocks[clock_id] = *new_clock;
58162 + posix_clocks[clock_id] = new_clock;
58163 }
58164 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
58165
58166 @@ -519,9 +522,9 @@ static struct k_clock *clockid_to_kclock
58167 return (id & CLOCKFD_MASK) == CLOCKFD ?
58168 &clock_posix_dynamic : &clock_posix_cpu;
58169
58170 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
58171 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
58172 return NULL;
58173 - return &posix_clocks[id];
58174 + return posix_clocks[id];
58175 }
58176
58177 static int common_timer_create(struct k_itimer *new_timer)
58178 @@ -959,6 +962,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
58179 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
58180 return -EFAULT;
58181
58182 + /* only the CLOCK_REALTIME clock can be set, all other clocks
58183 + have their clock_set fptr set to a nosettime dummy function
58184 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
58185 + call common_clock_set, which calls do_sys_settimeofday, which
58186 + we hook
58187 + */
58188 +
58189 return kc->clock_set(which_clock, &new_tp);
58190 }
58191
58192 diff -urNp linux-3.0.3/kernel/power/poweroff.c linux-3.0.3/kernel/power/poweroff.c
58193 --- linux-3.0.3/kernel/power/poweroff.c 2011-07-21 22:17:23.000000000 -0400
58194 +++ linux-3.0.3/kernel/power/poweroff.c 2011-08-23 21:47:56.000000000 -0400
58195 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
58196 .enable_mask = SYSRQ_ENABLE_BOOT,
58197 };
58198
58199 -static int pm_sysrq_init(void)
58200 +static int __init pm_sysrq_init(void)
58201 {
58202 register_sysrq_key('o', &sysrq_poweroff_op);
58203 return 0;
58204 diff -urNp linux-3.0.3/kernel/power/process.c linux-3.0.3/kernel/power/process.c
58205 --- linux-3.0.3/kernel/power/process.c 2011-07-21 22:17:23.000000000 -0400
58206 +++ linux-3.0.3/kernel/power/process.c 2011-08-23 21:47:56.000000000 -0400
58207 @@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
58208 u64 elapsed_csecs64;
58209 unsigned int elapsed_csecs;
58210 bool wakeup = false;
58211 + bool timedout = false;
58212
58213 do_gettimeofday(&start);
58214
58215 @@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
58216
58217 while (true) {
58218 todo = 0;
58219 + if (time_after(jiffies, end_time))
58220 + timedout = true;
58221 read_lock(&tasklist_lock);
58222 do_each_thread(g, p) {
58223 if (frozen(p) || !freezable(p))
58224 @@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
58225 * try_to_stop() after schedule() in ptrace/signal
58226 * stop sees TIF_FREEZE.
58227 */
58228 - if (!task_is_stopped_or_traced(p) &&
58229 - !freezer_should_skip(p))
58230 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
58231 todo++;
58232 + if (timedout) {
58233 + printk(KERN_ERR "Task refusing to freeze:\n");
58234 + sched_show_task(p);
58235 + }
58236 + }
58237 } while_each_thread(g, p);
58238 read_unlock(&tasklist_lock);
58239
58240 @@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
58241 todo += wq_busy;
58242 }
58243
58244 - if (!todo || time_after(jiffies, end_time))
58245 + if (!todo || timedout)
58246 break;
58247
58248 if (pm_wakeup_pending()) {
58249 diff -urNp linux-3.0.3/kernel/printk.c linux-3.0.3/kernel/printk.c
58250 --- linux-3.0.3/kernel/printk.c 2011-07-21 22:17:23.000000000 -0400
58251 +++ linux-3.0.3/kernel/printk.c 2011-08-23 21:48:14.000000000 -0400
58252 @@ -313,12 +313,17 @@ static int check_syslog_permissions(int
58253 if (from_file && type != SYSLOG_ACTION_OPEN)
58254 return 0;
58255
58256 +#ifdef CONFIG_GRKERNSEC_DMESG
58257 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
58258 + return -EPERM;
58259 +#endif
58260 +
58261 if (syslog_action_restricted(type)) {
58262 if (capable(CAP_SYSLOG))
58263 return 0;
58264 /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
58265 if (capable(CAP_SYS_ADMIN)) {
58266 - WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
58267 + printk_once(KERN_WARNING "Attempt to access syslog with CAP_SYS_ADMIN "
58268 "but no CAP_SYSLOG (deprecated).\n");
58269 return 0;
58270 }
58271 diff -urNp linux-3.0.3/kernel/profile.c linux-3.0.3/kernel/profile.c
58272 --- linux-3.0.3/kernel/profile.c 2011-07-21 22:17:23.000000000 -0400
58273 +++ linux-3.0.3/kernel/profile.c 2011-08-23 21:47:56.000000000 -0400
58274 @@ -39,7 +39,7 @@ struct profile_hit {
58275 /* Oprofile timer tick hook */
58276 static int (*timer_hook)(struct pt_regs *) __read_mostly;
58277
58278 -static atomic_t *prof_buffer;
58279 +static atomic_unchecked_t *prof_buffer;
58280 static unsigned long prof_len, prof_shift;
58281
58282 int prof_on __read_mostly;
58283 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
58284 hits[i].pc = 0;
58285 continue;
58286 }
58287 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
58288 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
58289 hits[i].hits = hits[i].pc = 0;
58290 }
58291 }
58292 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, vo
58293 * Add the current hit(s) and flush the write-queue out
58294 * to the global buffer:
58295 */
58296 - atomic_add(nr_hits, &prof_buffer[pc]);
58297 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
58298 for (i = 0; i < NR_PROFILE_HIT; ++i) {
58299 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
58300 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
58301 hits[i].pc = hits[i].hits = 0;
58302 }
58303 out:
58304 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, vo
58305 {
58306 unsigned long pc;
58307 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
58308 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
58309 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
58310 }
58311 #endif /* !CONFIG_SMP */
58312
58313 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
58314 return -EFAULT;
58315 buf++; p++; count--; read++;
58316 }
58317 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
58318 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
58319 if (copy_to_user(buf, (void *)pnt, count))
58320 return -EFAULT;
58321 read += count;
58322 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
58323 }
58324 #endif
58325 profile_discard_flip_buffers();
58326 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
58327 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
58328 return count;
58329 }
58330
58331 diff -urNp linux-3.0.3/kernel/ptrace.c linux-3.0.3/kernel/ptrace.c
58332 --- linux-3.0.3/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
58333 +++ linux-3.0.3/kernel/ptrace.c 2011-08-23 21:48:14.000000000 -0400
58334 @@ -132,7 +132,8 @@ int ptrace_check_attach(struct task_stru
58335 return ret;
58336 }
58337
58338 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
58339 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
58340 + unsigned int log)
58341 {
58342 const struct cred *cred = current_cred(), *tcred;
58343
58344 @@ -158,7 +159,8 @@ int __ptrace_may_access(struct task_stru
58345 cred->gid == tcred->sgid &&
58346 cred->gid == tcred->gid))
58347 goto ok;
58348 - if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
58349 + if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
58350 + (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
58351 goto ok;
58352 rcu_read_unlock();
58353 return -EPERM;
58354 @@ -167,7 +169,9 @@ ok:
58355 smp_rmb();
58356 if (task->mm)
58357 dumpable = get_dumpable(task->mm);
58358 - if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
58359 + if (!dumpable &&
58360 + ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
58361 + (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
58362 return -EPERM;
58363
58364 return security_ptrace_access_check(task, mode);
58365 @@ -177,7 +181,16 @@ bool ptrace_may_access(struct task_struc
58366 {
58367 int err;
58368 task_lock(task);
58369 - err = __ptrace_may_access(task, mode);
58370 + err = __ptrace_may_access(task, mode, 0);
58371 + task_unlock(task);
58372 + return !err;
58373 +}
58374 +
58375 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
58376 +{
58377 + int err;
58378 + task_lock(task);
58379 + err = __ptrace_may_access(task, mode, 1);
58380 task_unlock(task);
58381 return !err;
58382 }
58383 @@ -205,7 +218,7 @@ static int ptrace_attach(struct task_str
58384 goto out;
58385
58386 task_lock(task);
58387 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
58388 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
58389 task_unlock(task);
58390 if (retval)
58391 goto unlock_creds;
58392 @@ -218,7 +231,7 @@ static int ptrace_attach(struct task_str
58393 goto unlock_tasklist;
58394
58395 task->ptrace = PT_PTRACED;
58396 - if (task_ns_capable(task, CAP_SYS_PTRACE))
58397 + if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
58398 task->ptrace |= PT_PTRACE_CAP;
58399
58400 __ptrace_link(task, current);
58401 @@ -406,6 +419,8 @@ int ptrace_readdata(struct task_struct *
58402 {
58403 int copied = 0;
58404
58405 + pax_track_stack();
58406 +
58407 while (len > 0) {
58408 char buf[128];
58409 int this_len, retval;
58410 @@ -417,7 +432,7 @@ int ptrace_readdata(struct task_struct *
58411 break;
58412 return -EIO;
58413 }
58414 - if (copy_to_user(dst, buf, retval))
58415 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
58416 return -EFAULT;
58417 copied += retval;
58418 src += retval;
58419 @@ -431,6 +446,8 @@ int ptrace_writedata(struct task_struct
58420 {
58421 int copied = 0;
58422
58423 + pax_track_stack();
58424 +
58425 while (len > 0) {
58426 char buf[128];
58427 int this_len, retval;
58428 @@ -613,9 +630,11 @@ int ptrace_request(struct task_struct *c
58429 {
58430 int ret = -EIO;
58431 siginfo_t siginfo;
58432 - void __user *datavp = (void __user *) data;
58433 + void __user *datavp = (__force void __user *) data;
58434 unsigned long __user *datalp = datavp;
58435
58436 + pax_track_stack();
58437 +
58438 switch (request) {
58439 case PTRACE_PEEKTEXT:
58440 case PTRACE_PEEKDATA:
58441 @@ -761,14 +780,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
58442 goto out;
58443 }
58444
58445 + if (gr_handle_ptrace(child, request)) {
58446 + ret = -EPERM;
58447 + goto out_put_task_struct;
58448 + }
58449 +
58450 if (request == PTRACE_ATTACH) {
58451 ret = ptrace_attach(child);
58452 /*
58453 * Some architectures need to do book-keeping after
58454 * a ptrace attach.
58455 */
58456 - if (!ret)
58457 + if (!ret) {
58458 arch_ptrace_attach(child);
58459 + gr_audit_ptrace(child);
58460 + }
58461 goto out_put_task_struct;
58462 }
58463
58464 @@ -793,7 +819,7 @@ int generic_ptrace_peekdata(struct task_
58465 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
58466 if (copied != sizeof(tmp))
58467 return -EIO;
58468 - return put_user(tmp, (unsigned long __user *)data);
58469 + return put_user(tmp, (__force unsigned long __user *)data);
58470 }
58471
58472 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
58473 @@ -816,6 +842,8 @@ int compat_ptrace_request(struct task_st
58474 siginfo_t siginfo;
58475 int ret;
58476
58477 + pax_track_stack();
58478 +
58479 switch (request) {
58480 case PTRACE_PEEKTEXT:
58481 case PTRACE_PEEKDATA:
58482 @@ -903,14 +931,21 @@ asmlinkage long compat_sys_ptrace(compat
58483 goto out;
58484 }
58485
58486 + if (gr_handle_ptrace(child, request)) {
58487 + ret = -EPERM;
58488 + goto out_put_task_struct;
58489 + }
58490 +
58491 if (request == PTRACE_ATTACH) {
58492 ret = ptrace_attach(child);
58493 /*
58494 * Some architectures need to do book-keeping after
58495 * a ptrace attach.
58496 */
58497 - if (!ret)
58498 + if (!ret) {
58499 arch_ptrace_attach(child);
58500 + gr_audit_ptrace(child);
58501 + }
58502 goto out_put_task_struct;
58503 }
58504
58505 diff -urNp linux-3.0.3/kernel/rcutorture.c linux-3.0.3/kernel/rcutorture.c
58506 --- linux-3.0.3/kernel/rcutorture.c 2011-07-21 22:17:23.000000000 -0400
58507 +++ linux-3.0.3/kernel/rcutorture.c 2011-08-23 21:47:56.000000000 -0400
58508 @@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
58509 { 0 };
58510 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
58511 { 0 };
58512 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
58513 -static atomic_t n_rcu_torture_alloc;
58514 -static atomic_t n_rcu_torture_alloc_fail;
58515 -static atomic_t n_rcu_torture_free;
58516 -static atomic_t n_rcu_torture_mberror;
58517 -static atomic_t n_rcu_torture_error;
58518 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
58519 +static atomic_unchecked_t n_rcu_torture_alloc;
58520 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
58521 +static atomic_unchecked_t n_rcu_torture_free;
58522 +static atomic_unchecked_t n_rcu_torture_mberror;
58523 +static atomic_unchecked_t n_rcu_torture_error;
58524 static long n_rcu_torture_boost_ktrerror;
58525 static long n_rcu_torture_boost_rterror;
58526 static long n_rcu_torture_boost_failure;
58527 @@ -223,11 +223,11 @@ rcu_torture_alloc(void)
58528
58529 spin_lock_bh(&rcu_torture_lock);
58530 if (list_empty(&rcu_torture_freelist)) {
58531 - atomic_inc(&n_rcu_torture_alloc_fail);
58532 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
58533 spin_unlock_bh(&rcu_torture_lock);
58534 return NULL;
58535 }
58536 - atomic_inc(&n_rcu_torture_alloc);
58537 + atomic_inc_unchecked(&n_rcu_torture_alloc);
58538 p = rcu_torture_freelist.next;
58539 list_del_init(p);
58540 spin_unlock_bh(&rcu_torture_lock);
58541 @@ -240,7 +240,7 @@ rcu_torture_alloc(void)
58542 static void
58543 rcu_torture_free(struct rcu_torture *p)
58544 {
58545 - atomic_inc(&n_rcu_torture_free);
58546 + atomic_inc_unchecked(&n_rcu_torture_free);
58547 spin_lock_bh(&rcu_torture_lock);
58548 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
58549 spin_unlock_bh(&rcu_torture_lock);
58550 @@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
58551 i = rp->rtort_pipe_count;
58552 if (i > RCU_TORTURE_PIPE_LEN)
58553 i = RCU_TORTURE_PIPE_LEN;
58554 - atomic_inc(&rcu_torture_wcount[i]);
58555 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
58556 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
58557 rp->rtort_mbtest = 0;
58558 rcu_torture_free(rp);
58559 @@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_fr
58560 i = rp->rtort_pipe_count;
58561 if (i > RCU_TORTURE_PIPE_LEN)
58562 i = RCU_TORTURE_PIPE_LEN;
58563 - atomic_inc(&rcu_torture_wcount[i]);
58564 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
58565 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
58566 rp->rtort_mbtest = 0;
58567 list_del(&rp->rtort_free);
58568 @@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
58569 i = old_rp->rtort_pipe_count;
58570 if (i > RCU_TORTURE_PIPE_LEN)
58571 i = RCU_TORTURE_PIPE_LEN;
58572 - atomic_inc(&rcu_torture_wcount[i]);
58573 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
58574 old_rp->rtort_pipe_count++;
58575 cur_ops->deferred_free(old_rp);
58576 }
58577 @@ -951,7 +951,7 @@ static void rcu_torture_timer(unsigned l
58578 return;
58579 }
58580 if (p->rtort_mbtest == 0)
58581 - atomic_inc(&n_rcu_torture_mberror);
58582 + atomic_inc_unchecked(&n_rcu_torture_mberror);
58583 spin_lock(&rand_lock);
58584 cur_ops->read_delay(&rand);
58585 n_rcu_torture_timers++;
58586 @@ -1013,7 +1013,7 @@ rcu_torture_reader(void *arg)
58587 continue;
58588 }
58589 if (p->rtort_mbtest == 0)
58590 - atomic_inc(&n_rcu_torture_mberror);
58591 + atomic_inc_unchecked(&n_rcu_torture_mberror);
58592 cur_ops->read_delay(&rand);
58593 preempt_disable();
58594 pipe_count = p->rtort_pipe_count;
58595 @@ -1072,16 +1072,16 @@ rcu_torture_printk(char *page)
58596 rcu_torture_current,
58597 rcu_torture_current_version,
58598 list_empty(&rcu_torture_freelist),
58599 - atomic_read(&n_rcu_torture_alloc),
58600 - atomic_read(&n_rcu_torture_alloc_fail),
58601 - atomic_read(&n_rcu_torture_free),
58602 - atomic_read(&n_rcu_torture_mberror),
58603 + atomic_read_unchecked(&n_rcu_torture_alloc),
58604 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
58605 + atomic_read_unchecked(&n_rcu_torture_free),
58606 + atomic_read_unchecked(&n_rcu_torture_mberror),
58607 n_rcu_torture_boost_ktrerror,
58608 n_rcu_torture_boost_rterror,
58609 n_rcu_torture_boost_failure,
58610 n_rcu_torture_boosts,
58611 n_rcu_torture_timers);
58612 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
58613 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
58614 n_rcu_torture_boost_ktrerror != 0 ||
58615 n_rcu_torture_boost_rterror != 0 ||
58616 n_rcu_torture_boost_failure != 0)
58617 @@ -1089,7 +1089,7 @@ rcu_torture_printk(char *page)
58618 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
58619 if (i > 1) {
58620 cnt += sprintf(&page[cnt], "!!! ");
58621 - atomic_inc(&n_rcu_torture_error);
58622 + atomic_inc_unchecked(&n_rcu_torture_error);
58623 WARN_ON_ONCE(1);
58624 }
58625 cnt += sprintf(&page[cnt], "Reader Pipe: ");
58626 @@ -1103,7 +1103,7 @@ rcu_torture_printk(char *page)
58627 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
58628 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
58629 cnt += sprintf(&page[cnt], " %d",
58630 - atomic_read(&rcu_torture_wcount[i]));
58631 + atomic_read_unchecked(&rcu_torture_wcount[i]));
58632 }
58633 cnt += sprintf(&page[cnt], "\n");
58634 if (cur_ops->stats)
58635 @@ -1412,7 +1412,7 @@ rcu_torture_cleanup(void)
58636
58637 if (cur_ops->cleanup)
58638 cur_ops->cleanup();
58639 - if (atomic_read(&n_rcu_torture_error))
58640 + if (atomic_read_unchecked(&n_rcu_torture_error))
58641 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
58642 else
58643 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
58644 @@ -1476,17 +1476,17 @@ rcu_torture_init(void)
58645
58646 rcu_torture_current = NULL;
58647 rcu_torture_current_version = 0;
58648 - atomic_set(&n_rcu_torture_alloc, 0);
58649 - atomic_set(&n_rcu_torture_alloc_fail, 0);
58650 - atomic_set(&n_rcu_torture_free, 0);
58651 - atomic_set(&n_rcu_torture_mberror, 0);
58652 - atomic_set(&n_rcu_torture_error, 0);
58653 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
58654 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
58655 + atomic_set_unchecked(&n_rcu_torture_free, 0);
58656 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
58657 + atomic_set_unchecked(&n_rcu_torture_error, 0);
58658 n_rcu_torture_boost_ktrerror = 0;
58659 n_rcu_torture_boost_rterror = 0;
58660 n_rcu_torture_boost_failure = 0;
58661 n_rcu_torture_boosts = 0;
58662 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
58663 - atomic_set(&rcu_torture_wcount[i], 0);
58664 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
58665 for_each_possible_cpu(cpu) {
58666 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
58667 per_cpu(rcu_torture_count, cpu)[i] = 0;
58668 diff -urNp linux-3.0.3/kernel/rcutree.c linux-3.0.3/kernel/rcutree.c
58669 --- linux-3.0.3/kernel/rcutree.c 2011-07-21 22:17:23.000000000 -0400
58670 +++ linux-3.0.3/kernel/rcutree.c 2011-08-23 21:47:56.000000000 -0400
58671 @@ -1470,7 +1470,7 @@ __rcu_process_callbacks(struct rcu_state
58672 /*
58673 * Do softirq processing for the current CPU.
58674 */
58675 -static void rcu_process_callbacks(struct softirq_action *unused)
58676 +static void rcu_process_callbacks(void)
58677 {
58678 __rcu_process_callbacks(&rcu_sched_state,
58679 &__get_cpu_var(rcu_sched_data));
58680 diff -urNp linux-3.0.3/kernel/rcutree_plugin.h linux-3.0.3/kernel/rcutree_plugin.h
58681 --- linux-3.0.3/kernel/rcutree_plugin.h 2011-07-21 22:17:23.000000000 -0400
58682 +++ linux-3.0.3/kernel/rcutree_plugin.h 2011-08-23 21:47:56.000000000 -0400
58683 @@ -822,7 +822,7 @@ void synchronize_rcu_expedited(void)
58684
58685 /* Clean up and exit. */
58686 smp_mb(); /* ensure expedited GP seen before counter increment. */
58687 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
58688 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
58689 unlock_mb_ret:
58690 mutex_unlock(&sync_rcu_preempt_exp_mutex);
58691 mb_ret:
58692 @@ -1774,8 +1774,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
58693
58694 #else /* #ifndef CONFIG_SMP */
58695
58696 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
58697 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
58698 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
58699 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
58700
58701 static int synchronize_sched_expedited_cpu_stop(void *data)
58702 {
58703 @@ -1830,7 +1830,7 @@ void synchronize_sched_expedited(void)
58704 int firstsnap, s, snap, trycount = 0;
58705
58706 /* Note that atomic_inc_return() implies full memory barrier. */
58707 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
58708 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
58709 get_online_cpus();
58710
58711 /*
58712 @@ -1851,7 +1851,7 @@ void synchronize_sched_expedited(void)
58713 }
58714
58715 /* Check to see if someone else did our work for us. */
58716 - s = atomic_read(&sync_sched_expedited_done);
58717 + s = atomic_read_unchecked(&sync_sched_expedited_done);
58718 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
58719 smp_mb(); /* ensure test happens before caller kfree */
58720 return;
58721 @@ -1866,7 +1866,7 @@ void synchronize_sched_expedited(void)
58722 * grace period works for us.
58723 */
58724 get_online_cpus();
58725 - snap = atomic_read(&sync_sched_expedited_started) - 1;
58726 + snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
58727 smp_mb(); /* ensure read is before try_stop_cpus(). */
58728 }
58729
58730 @@ -1877,12 +1877,12 @@ void synchronize_sched_expedited(void)
58731 * than we did beat us to the punch.
58732 */
58733 do {
58734 - s = atomic_read(&sync_sched_expedited_done);
58735 + s = atomic_read_unchecked(&sync_sched_expedited_done);
58736 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
58737 smp_mb(); /* ensure test happens before caller kfree */
58738 break;
58739 }
58740 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
58741 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
58742
58743 put_online_cpus();
58744 }
58745 diff -urNp linux-3.0.3/kernel/relay.c linux-3.0.3/kernel/relay.c
58746 --- linux-3.0.3/kernel/relay.c 2011-07-21 22:17:23.000000000 -0400
58747 +++ linux-3.0.3/kernel/relay.c 2011-08-23 21:48:14.000000000 -0400
58748 @@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
58749 };
58750 ssize_t ret;
58751
58752 + pax_track_stack();
58753 +
58754 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
58755 return 0;
58756 if (splice_grow_spd(pipe, &spd))
58757 diff -urNp linux-3.0.3/kernel/resource.c linux-3.0.3/kernel/resource.c
58758 --- linux-3.0.3/kernel/resource.c 2011-07-21 22:17:23.000000000 -0400
58759 +++ linux-3.0.3/kernel/resource.c 2011-08-23 21:48:14.000000000 -0400
58760 @@ -141,8 +141,18 @@ static const struct file_operations proc
58761
58762 static int __init ioresources_init(void)
58763 {
58764 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
58765 +#ifdef CONFIG_GRKERNSEC_PROC_USER
58766 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
58767 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
58768 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58769 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
58770 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
58771 +#endif
58772 +#else
58773 proc_create("ioports", 0, NULL, &proc_ioports_operations);
58774 proc_create("iomem", 0, NULL, &proc_iomem_operations);
58775 +#endif
58776 return 0;
58777 }
58778 __initcall(ioresources_init);
58779 diff -urNp linux-3.0.3/kernel/rtmutex-tester.c linux-3.0.3/kernel/rtmutex-tester.c
58780 --- linux-3.0.3/kernel/rtmutex-tester.c 2011-07-21 22:17:23.000000000 -0400
58781 +++ linux-3.0.3/kernel/rtmutex-tester.c 2011-08-23 21:47:56.000000000 -0400
58782 @@ -20,7 +20,7 @@
58783 #define MAX_RT_TEST_MUTEXES 8
58784
58785 static spinlock_t rttest_lock;
58786 -static atomic_t rttest_event;
58787 +static atomic_unchecked_t rttest_event;
58788
58789 struct test_thread_data {
58790 int opcode;
58791 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
58792
58793 case RTTEST_LOCKCONT:
58794 td->mutexes[td->opdata] = 1;
58795 - td->event = atomic_add_return(1, &rttest_event);
58796 + td->event = atomic_add_return_unchecked(1, &rttest_event);
58797 return 0;
58798
58799 case RTTEST_RESET:
58800 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
58801 return 0;
58802
58803 case RTTEST_RESETEVENT:
58804 - atomic_set(&rttest_event, 0);
58805 + atomic_set_unchecked(&rttest_event, 0);
58806 return 0;
58807
58808 default:
58809 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
58810 return ret;
58811
58812 td->mutexes[id] = 1;
58813 - td->event = atomic_add_return(1, &rttest_event);
58814 + td->event = atomic_add_return_unchecked(1, &rttest_event);
58815 rt_mutex_lock(&mutexes[id]);
58816 - td->event = atomic_add_return(1, &rttest_event);
58817 + td->event = atomic_add_return_unchecked(1, &rttest_event);
58818 td->mutexes[id] = 4;
58819 return 0;
58820
58821 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
58822 return ret;
58823
58824 td->mutexes[id] = 1;
58825 - td->event = atomic_add_return(1, &rttest_event);
58826 + td->event = atomic_add_return_unchecked(1, &rttest_event);
58827 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
58828 - td->event = atomic_add_return(1, &rttest_event);
58829 + td->event = atomic_add_return_unchecked(1, &rttest_event);
58830 td->mutexes[id] = ret ? 0 : 4;
58831 return ret ? -EINTR : 0;
58832
58833 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
58834 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
58835 return ret;
58836
58837 - td->event = atomic_add_return(1, &rttest_event);
58838 + td->event = atomic_add_return_unchecked(1, &rttest_event);
58839 rt_mutex_unlock(&mutexes[id]);
58840 - td->event = atomic_add_return(1, &rttest_event);
58841 + td->event = atomic_add_return_unchecked(1, &rttest_event);
58842 td->mutexes[id] = 0;
58843 return 0;
58844
58845 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
58846 break;
58847
58848 td->mutexes[dat] = 2;
58849 - td->event = atomic_add_return(1, &rttest_event);
58850 + td->event = atomic_add_return_unchecked(1, &rttest_event);
58851 break;
58852
58853 default:
58854 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
58855 return;
58856
58857 td->mutexes[dat] = 3;
58858 - td->event = atomic_add_return(1, &rttest_event);
58859 + td->event = atomic_add_return_unchecked(1, &rttest_event);
58860 break;
58861
58862 case RTTEST_LOCKNOWAIT:
58863 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
58864 return;
58865
58866 td->mutexes[dat] = 1;
58867 - td->event = atomic_add_return(1, &rttest_event);
58868 + td->event = atomic_add_return_unchecked(1, &rttest_event);
58869 return;
58870
58871 default:
58872 diff -urNp linux-3.0.3/kernel/sched_autogroup.c linux-3.0.3/kernel/sched_autogroup.c
58873 --- linux-3.0.3/kernel/sched_autogroup.c 2011-07-21 22:17:23.000000000 -0400
58874 +++ linux-3.0.3/kernel/sched_autogroup.c 2011-08-23 21:47:56.000000000 -0400
58875 @@ -7,7 +7,7 @@
58876
58877 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
58878 static struct autogroup autogroup_default;
58879 -static atomic_t autogroup_seq_nr;
58880 +static atomic_unchecked_t autogroup_seq_nr;
58881
58882 static void __init autogroup_init(struct task_struct *init_task)
58883 {
58884 @@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
58885
58886 kref_init(&ag->kref);
58887 init_rwsem(&ag->lock);
58888 - ag->id = atomic_inc_return(&autogroup_seq_nr);
58889 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
58890 ag->tg = tg;
58891 #ifdef CONFIG_RT_GROUP_SCHED
58892 /*
58893 diff -urNp linux-3.0.3/kernel/sched.c linux-3.0.3/kernel/sched.c
58894 --- linux-3.0.3/kernel/sched.c 2011-07-21 22:17:23.000000000 -0400
58895 +++ linux-3.0.3/kernel/sched.c 2011-08-23 21:48:14.000000000 -0400
58896 @@ -4251,6 +4251,8 @@ asmlinkage void __sched schedule(void)
58897 struct rq *rq;
58898 int cpu;
58899
58900 + pax_track_stack();
58901 +
58902 need_resched:
58903 preempt_disable();
58904 cpu = smp_processor_id();
58905 @@ -4934,6 +4936,8 @@ int can_nice(const struct task_struct *p
58906 /* convert nice value [19,-20] to rlimit style value [1,40] */
58907 int nice_rlim = 20 - nice;
58908
58909 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
58910 +
58911 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
58912 capable(CAP_SYS_NICE));
58913 }
58914 @@ -4967,7 +4971,8 @@ SYSCALL_DEFINE1(nice, int, increment)
58915 if (nice > 19)
58916 nice = 19;
58917
58918 - if (increment < 0 && !can_nice(current, nice))
58919 + if (increment < 0 && (!can_nice(current, nice) ||
58920 + gr_handle_chroot_nice()))
58921 return -EPERM;
58922
58923 retval = security_task_setnice(current, nice);
58924 @@ -5111,6 +5116,7 @@ recheck:
58925 unsigned long rlim_rtprio =
58926 task_rlimit(p, RLIMIT_RTPRIO);
58927
58928 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
58929 /* can't set/change the rt policy */
58930 if (policy != p->policy && !rlim_rtprio)
58931 return -EPERM;
58932 diff -urNp linux-3.0.3/kernel/sched_fair.c linux-3.0.3/kernel/sched_fair.c
58933 --- linux-3.0.3/kernel/sched_fair.c 2011-07-21 22:17:23.000000000 -0400
58934 +++ linux-3.0.3/kernel/sched_fair.c 2011-08-23 21:47:56.000000000 -0400
58935 @@ -4050,7 +4050,7 @@ static void nohz_idle_balance(int this_c
58936 * run_rebalance_domains is triggered when needed from the scheduler tick.
58937 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
58938 */
58939 -static void run_rebalance_domains(struct softirq_action *h)
58940 +static void run_rebalance_domains(void)
58941 {
58942 int this_cpu = smp_processor_id();
58943 struct rq *this_rq = cpu_rq(this_cpu);
58944 diff -urNp linux-3.0.3/kernel/signal.c linux-3.0.3/kernel/signal.c
58945 --- linux-3.0.3/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
58946 +++ linux-3.0.3/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
58947 @@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
58948
58949 int print_fatal_signals __read_mostly;
58950
58951 -static void __user *sig_handler(struct task_struct *t, int sig)
58952 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
58953 {
58954 return t->sighand->action[sig - 1].sa.sa_handler;
58955 }
58956
58957 -static int sig_handler_ignored(void __user *handler, int sig)
58958 +static int sig_handler_ignored(__sighandler_t handler, int sig)
58959 {
58960 /* Is it explicitly or implicitly ignored? */
58961 return handler == SIG_IGN ||
58962 @@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
58963 static int sig_task_ignored(struct task_struct *t, int sig,
58964 int from_ancestor_ns)
58965 {
58966 - void __user *handler;
58967 + __sighandler_t handler;
58968
58969 handler = sig_handler(t, sig);
58970
58971 @@ -320,6 +320,9 @@ __sigqueue_alloc(int sig, struct task_st
58972 atomic_inc(&user->sigpending);
58973 rcu_read_unlock();
58974
58975 + if (!override_rlimit)
58976 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
58977 +
58978 if (override_rlimit ||
58979 atomic_read(&user->sigpending) <=
58980 task_rlimit(t, RLIMIT_SIGPENDING)) {
58981 @@ -444,7 +447,7 @@ flush_signal_handlers(struct task_struct
58982
58983 int unhandled_signal(struct task_struct *tsk, int sig)
58984 {
58985 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
58986 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
58987 if (is_global_init(tsk))
58988 return 1;
58989 if (handler != SIG_IGN && handler != SIG_DFL)
58990 @@ -770,6 +773,13 @@ static int check_kill_permission(int sig
58991 }
58992 }
58993
58994 + /* allow glibc communication via tgkill to other threads in our
58995 + thread group */
58996 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
58997 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
58998 + && gr_handle_signal(t, sig))
58999 + return -EPERM;
59000 +
59001 return security_task_kill(t, info, sig, 0);
59002 }
59003
59004 @@ -1092,7 +1102,7 @@ __group_send_sig_info(int sig, struct si
59005 return send_signal(sig, info, p, 1);
59006 }
59007
59008 -static int
59009 +int
59010 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
59011 {
59012 return send_signal(sig, info, t, 0);
59013 @@ -1129,6 +1139,7 @@ force_sig_info(int sig, struct siginfo *
59014 unsigned long int flags;
59015 int ret, blocked, ignored;
59016 struct k_sigaction *action;
59017 + int is_unhandled = 0;
59018
59019 spin_lock_irqsave(&t->sighand->siglock, flags);
59020 action = &t->sighand->action[sig-1];
59021 @@ -1143,9 +1154,18 @@ force_sig_info(int sig, struct siginfo *
59022 }
59023 if (action->sa.sa_handler == SIG_DFL)
59024 t->signal->flags &= ~SIGNAL_UNKILLABLE;
59025 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
59026 + is_unhandled = 1;
59027 ret = specific_send_sig_info(sig, info, t);
59028 spin_unlock_irqrestore(&t->sighand->siglock, flags);
59029
59030 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
59031 + normal operation */
59032 + if (is_unhandled) {
59033 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
59034 + gr_handle_crash(t, sig);
59035 + }
59036 +
59037 return ret;
59038 }
59039
59040 @@ -1212,8 +1232,11 @@ int group_send_sig_info(int sig, struct
59041 ret = check_kill_permission(sig, info, p);
59042 rcu_read_unlock();
59043
59044 - if (!ret && sig)
59045 + if (!ret && sig) {
59046 ret = do_send_sig_info(sig, info, p, true);
59047 + if (!ret)
59048 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
59049 + }
59050
59051 return ret;
59052 }
59053 @@ -1839,6 +1862,8 @@ void ptrace_notify(int exit_code)
59054 {
59055 siginfo_t info;
59056
59057 + pax_track_stack();
59058 +
59059 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
59060
59061 memset(&info, 0, sizeof info);
59062 @@ -2639,7 +2664,15 @@ do_send_specific(pid_t tgid, pid_t pid,
59063 int error = -ESRCH;
59064
59065 rcu_read_lock();
59066 - p = find_task_by_vpid(pid);
59067 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59068 + /* allow glibc communication via tgkill to other threads in our
59069 + thread group */
59070 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
59071 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
59072 + p = find_task_by_vpid_unrestricted(pid);
59073 + else
59074 +#endif
59075 + p = find_task_by_vpid(pid);
59076 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
59077 error = check_kill_permission(sig, info, p);
59078 /*
59079 diff -urNp linux-3.0.3/kernel/smp.c linux-3.0.3/kernel/smp.c
59080 --- linux-3.0.3/kernel/smp.c 2011-07-21 22:17:23.000000000 -0400
59081 +++ linux-3.0.3/kernel/smp.c 2011-08-23 21:47:56.000000000 -0400
59082 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t fu
59083 }
59084 EXPORT_SYMBOL(smp_call_function);
59085
59086 -void ipi_call_lock(void)
59087 +void ipi_call_lock(void) __acquires(call_function.lock)
59088 {
59089 raw_spin_lock(&call_function.lock);
59090 }
59091
59092 -void ipi_call_unlock(void)
59093 +void ipi_call_unlock(void) __releases(call_function.lock)
59094 {
59095 raw_spin_unlock(&call_function.lock);
59096 }
59097
59098 -void ipi_call_lock_irq(void)
59099 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
59100 {
59101 raw_spin_lock_irq(&call_function.lock);
59102 }
59103
59104 -void ipi_call_unlock_irq(void)
59105 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
59106 {
59107 raw_spin_unlock_irq(&call_function.lock);
59108 }
59109 diff -urNp linux-3.0.3/kernel/softirq.c linux-3.0.3/kernel/softirq.c
59110 --- linux-3.0.3/kernel/softirq.c 2011-07-21 22:17:23.000000000 -0400
59111 +++ linux-3.0.3/kernel/softirq.c 2011-08-23 21:47:56.000000000 -0400
59112 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
59113
59114 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
59115
59116 -char *softirq_to_name[NR_SOFTIRQS] = {
59117 +const char * const softirq_to_name[NR_SOFTIRQS] = {
59118 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
59119 "TASKLET", "SCHED", "HRTIMER", "RCU"
59120 };
59121 @@ -235,7 +235,7 @@ restart:
59122 kstat_incr_softirqs_this_cpu(vec_nr);
59123
59124 trace_softirq_entry(vec_nr);
59125 - h->action(h);
59126 + h->action();
59127 trace_softirq_exit(vec_nr);
59128 if (unlikely(prev_count != preempt_count())) {
59129 printk(KERN_ERR "huh, entered softirq %u %s %p"
59130 @@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
59131 local_irq_restore(flags);
59132 }
59133
59134 -void open_softirq(int nr, void (*action)(struct softirq_action *))
59135 +void open_softirq(int nr, void (*action)(void))
59136 {
59137 - softirq_vec[nr].action = action;
59138 + pax_open_kernel();
59139 + *(void **)&softirq_vec[nr].action = action;
59140 + pax_close_kernel();
59141 }
59142
59143 /*
59144 @@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct
59145
59146 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
59147
59148 -static void tasklet_action(struct softirq_action *a)
59149 +static void tasklet_action(void)
59150 {
59151 struct tasklet_struct *list;
59152
59153 @@ -476,7 +478,7 @@ static void tasklet_action(struct softir
59154 }
59155 }
59156
59157 -static void tasklet_hi_action(struct softirq_action *a)
59158 +static void tasklet_hi_action(void)
59159 {
59160 struct tasklet_struct *list;
59161
59162 diff -urNp linux-3.0.3/kernel/sys.c linux-3.0.3/kernel/sys.c
59163 --- linux-3.0.3/kernel/sys.c 2011-07-21 22:17:23.000000000 -0400
59164 +++ linux-3.0.3/kernel/sys.c 2011-08-23 21:48:14.000000000 -0400
59165 @@ -154,6 +154,12 @@ static int set_one_prio(struct task_stru
59166 error = -EACCES;
59167 goto out;
59168 }
59169 +
59170 + if (gr_handle_chroot_setpriority(p, niceval)) {
59171 + error = -EACCES;
59172 + goto out;
59173 + }
59174 +
59175 no_nice = security_task_setnice(p, niceval);
59176 if (no_nice) {
59177 error = no_nice;
59178 @@ -537,6 +543,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
59179 goto error;
59180 }
59181
59182 + if (gr_check_group_change(new->gid, new->egid, -1))
59183 + goto error;
59184 +
59185 if (rgid != (gid_t) -1 ||
59186 (egid != (gid_t) -1 && egid != old->gid))
59187 new->sgid = new->egid;
59188 @@ -566,6 +575,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
59189 old = current_cred();
59190
59191 retval = -EPERM;
59192 +
59193 + if (gr_check_group_change(gid, gid, gid))
59194 + goto error;
59195 +
59196 if (nsown_capable(CAP_SETGID))
59197 new->gid = new->egid = new->sgid = new->fsgid = gid;
59198 else if (gid == old->gid || gid == old->sgid)
59199 @@ -646,6 +659,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
59200 goto error;
59201 }
59202
59203 + if (gr_check_user_change(new->uid, new->euid, -1))
59204 + goto error;
59205 +
59206 if (new->uid != old->uid) {
59207 retval = set_user(new);
59208 if (retval < 0)
59209 @@ -690,6 +706,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
59210 old = current_cred();
59211
59212 retval = -EPERM;
59213 +
59214 + if (gr_check_crash_uid(uid))
59215 + goto error;
59216 + if (gr_check_user_change(uid, uid, uid))
59217 + goto error;
59218 +
59219 if (nsown_capable(CAP_SETUID)) {
59220 new->suid = new->uid = uid;
59221 if (uid != old->uid) {
59222 @@ -744,6 +766,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
59223 goto error;
59224 }
59225
59226 + if (gr_check_user_change(ruid, euid, -1))
59227 + goto error;
59228 +
59229 if (ruid != (uid_t) -1) {
59230 new->uid = ruid;
59231 if (ruid != old->uid) {
59232 @@ -808,6 +833,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
59233 goto error;
59234 }
59235
59236 + if (gr_check_group_change(rgid, egid, -1))
59237 + goto error;
59238 +
59239 if (rgid != (gid_t) -1)
59240 new->gid = rgid;
59241 if (egid != (gid_t) -1)
59242 @@ -854,6 +882,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
59243 old = current_cred();
59244 old_fsuid = old->fsuid;
59245
59246 + if (gr_check_user_change(-1, -1, uid))
59247 + goto error;
59248 +
59249 if (uid == old->uid || uid == old->euid ||
59250 uid == old->suid || uid == old->fsuid ||
59251 nsown_capable(CAP_SETUID)) {
59252 @@ -864,6 +895,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
59253 }
59254 }
59255
59256 +error:
59257 abort_creds(new);
59258 return old_fsuid;
59259
59260 @@ -890,12 +922,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
59261 if (gid == old->gid || gid == old->egid ||
59262 gid == old->sgid || gid == old->fsgid ||
59263 nsown_capable(CAP_SETGID)) {
59264 + if (gr_check_group_change(-1, -1, gid))
59265 + goto error;
59266 +
59267 if (gid != old_fsgid) {
59268 new->fsgid = gid;
59269 goto change_okay;
59270 }
59271 }
59272
59273 +error:
59274 abort_creds(new);
59275 return old_fsgid;
59276
59277 @@ -1642,7 +1678,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
59278 error = get_dumpable(me->mm);
59279 break;
59280 case PR_SET_DUMPABLE:
59281 - if (arg2 < 0 || arg2 > 1) {
59282 + if (arg2 > 1) {
59283 error = -EINVAL;
59284 break;
59285 }
59286 diff -urNp linux-3.0.3/kernel/sysctl.c linux-3.0.3/kernel/sysctl.c
59287 --- linux-3.0.3/kernel/sysctl.c 2011-07-21 22:17:23.000000000 -0400
59288 +++ linux-3.0.3/kernel/sysctl.c 2011-08-23 21:48:14.000000000 -0400
59289 @@ -85,6 +85,13 @@
59290
59291
59292 #if defined(CONFIG_SYSCTL)
59293 +#include <linux/grsecurity.h>
59294 +#include <linux/grinternal.h>
59295 +
59296 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
59297 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
59298 + const int op);
59299 +extern int gr_handle_chroot_sysctl(const int op);
59300
59301 /* External variables not in a header file. */
59302 extern int sysctl_overcommit_memory;
59303 @@ -197,6 +204,7 @@ static int sysrq_sysctl_handler(ctl_tabl
59304 }
59305
59306 #endif
59307 +extern struct ctl_table grsecurity_table[];
59308
59309 static struct ctl_table root_table[];
59310 static struct ctl_table_root sysctl_table_root;
59311 @@ -226,6 +234,20 @@ extern struct ctl_table epoll_table[];
59312 int sysctl_legacy_va_layout;
59313 #endif
59314
59315 +#ifdef CONFIG_PAX_SOFTMODE
59316 +static ctl_table pax_table[] = {
59317 + {
59318 + .procname = "softmode",
59319 + .data = &pax_softmode,
59320 + .maxlen = sizeof(unsigned int),
59321 + .mode = 0600,
59322 + .proc_handler = &proc_dointvec,
59323 + },
59324 +
59325 + { }
59326 +};
59327 +#endif
59328 +
59329 /* The default sysctl tables: */
59330
59331 static struct ctl_table root_table[] = {
59332 @@ -272,6 +294,22 @@ static int max_extfrag_threshold = 1000;
59333 #endif
59334
59335 static struct ctl_table kern_table[] = {
59336 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
59337 + {
59338 + .procname = "grsecurity",
59339 + .mode = 0500,
59340 + .child = grsecurity_table,
59341 + },
59342 +#endif
59343 +
59344 +#ifdef CONFIG_PAX_SOFTMODE
59345 + {
59346 + .procname = "pax",
59347 + .mode = 0500,
59348 + .child = pax_table,
59349 + },
59350 +#endif
59351 +
59352 {
59353 .procname = "sched_child_runs_first",
59354 .data = &sysctl_sched_child_runs_first,
59355 @@ -546,7 +584,7 @@ static struct ctl_table kern_table[] = {
59356 .data = &modprobe_path,
59357 .maxlen = KMOD_PATH_LEN,
59358 .mode = 0644,
59359 - .proc_handler = proc_dostring,
59360 + .proc_handler = proc_dostring_modpriv,
59361 },
59362 {
59363 .procname = "modules_disabled",
59364 @@ -713,16 +751,20 @@ static struct ctl_table kern_table[] = {
59365 .extra1 = &zero,
59366 .extra2 = &one,
59367 },
59368 +#endif
59369 {
59370 .procname = "kptr_restrict",
59371 .data = &kptr_restrict,
59372 .maxlen = sizeof(int),
59373 .mode = 0644,
59374 .proc_handler = proc_dmesg_restrict,
59375 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59376 + .extra1 = &two,
59377 +#else
59378 .extra1 = &zero,
59379 +#endif
59380 .extra2 = &two,
59381 },
59382 -#endif
59383 {
59384 .procname = "ngroups_max",
59385 .data = &ngroups_max,
59386 @@ -1205,6 +1247,13 @@ static struct ctl_table vm_table[] = {
59387 .proc_handler = proc_dointvec_minmax,
59388 .extra1 = &zero,
59389 },
59390 + {
59391 + .procname = "heap_stack_gap",
59392 + .data = &sysctl_heap_stack_gap,
59393 + .maxlen = sizeof(sysctl_heap_stack_gap),
59394 + .mode = 0644,
59395 + .proc_handler = proc_doulongvec_minmax,
59396 + },
59397 #else
59398 {
59399 .procname = "nr_trim_pages",
59400 @@ -1714,6 +1763,17 @@ static int test_perm(int mode, int op)
59401 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
59402 {
59403 int mode;
59404 + int error;
59405 +
59406 + if (table->parent != NULL && table->parent->procname != NULL &&
59407 + table->procname != NULL &&
59408 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
59409 + return -EACCES;
59410 + if (gr_handle_chroot_sysctl(op))
59411 + return -EACCES;
59412 + error = gr_handle_sysctl(table, op);
59413 + if (error)
59414 + return error;
59415
59416 if (root->permissions)
59417 mode = root->permissions(root, current->nsproxy, table);
59418 @@ -2118,6 +2178,16 @@ int proc_dostring(struct ctl_table *tabl
59419 buffer, lenp, ppos);
59420 }
59421
59422 +int proc_dostring_modpriv(struct ctl_table *table, int write,
59423 + void __user *buffer, size_t *lenp, loff_t *ppos)
59424 +{
59425 + if (write && !capable(CAP_SYS_MODULE))
59426 + return -EPERM;
59427 +
59428 + return _proc_do_string(table->data, table->maxlen, write,
59429 + buffer, lenp, ppos);
59430 +}
59431 +
59432 static size_t proc_skip_spaces(char **buf)
59433 {
59434 size_t ret;
59435 @@ -2223,6 +2293,8 @@ static int proc_put_long(void __user **b
59436 len = strlen(tmp);
59437 if (len > *size)
59438 len = *size;
59439 + if (len > sizeof(tmp))
59440 + len = sizeof(tmp);
59441 if (copy_to_user(*buf, tmp, len))
59442 return -EFAULT;
59443 *size -= len;
59444 @@ -2539,8 +2611,11 @@ static int __do_proc_doulongvec_minmax(v
59445 *i = val;
59446 } else {
59447 val = convdiv * (*i) / convmul;
59448 - if (!first)
59449 + if (!first) {
59450 err = proc_put_char(&buffer, &left, '\t');
59451 + if (err)
59452 + break;
59453 + }
59454 err = proc_put_long(&buffer, &left, val, false);
59455 if (err)
59456 break;
59457 @@ -2935,6 +3010,12 @@ int proc_dostring(struct ctl_table *tabl
59458 return -ENOSYS;
59459 }
59460
59461 +int proc_dostring_modpriv(struct ctl_table *table, int write,
59462 + void __user *buffer, size_t *lenp, loff_t *ppos)
59463 +{
59464 + return -ENOSYS;
59465 +}
59466 +
59467 int proc_dointvec(struct ctl_table *table, int write,
59468 void __user *buffer, size_t *lenp, loff_t *ppos)
59469 {
59470 @@ -2991,6 +3072,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
59471 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
59472 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
59473 EXPORT_SYMBOL(proc_dostring);
59474 +EXPORT_SYMBOL(proc_dostring_modpriv);
59475 EXPORT_SYMBOL(proc_doulongvec_minmax);
59476 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
59477 EXPORT_SYMBOL(register_sysctl_table);
59478 diff -urNp linux-3.0.3/kernel/sysctl_check.c linux-3.0.3/kernel/sysctl_check.c
59479 --- linux-3.0.3/kernel/sysctl_check.c 2011-07-21 22:17:23.000000000 -0400
59480 +++ linux-3.0.3/kernel/sysctl_check.c 2011-08-23 21:48:14.000000000 -0400
59481 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
59482 set_fail(&fail, table, "Directory with extra2");
59483 } else {
59484 if ((table->proc_handler == proc_dostring) ||
59485 + (table->proc_handler == proc_dostring_modpriv) ||
59486 (table->proc_handler == proc_dointvec) ||
59487 (table->proc_handler == proc_dointvec_minmax) ||
59488 (table->proc_handler == proc_dointvec_jiffies) ||
59489 diff -urNp linux-3.0.3/kernel/taskstats.c linux-3.0.3/kernel/taskstats.c
59490 --- linux-3.0.3/kernel/taskstats.c 2011-07-21 22:17:23.000000000 -0400
59491 +++ linux-3.0.3/kernel/taskstats.c 2011-08-23 21:48:14.000000000 -0400
59492 @@ -27,9 +27,12 @@
59493 #include <linux/cgroup.h>
59494 #include <linux/fs.h>
59495 #include <linux/file.h>
59496 +#include <linux/grsecurity.h>
59497 #include <net/genetlink.h>
59498 #include <asm/atomic.h>
59499
59500 +extern int gr_is_taskstats_denied(int pid);
59501 +
59502 /*
59503 * Maximum length of a cpumask that can be specified in
59504 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
59505 @@ -558,6 +561,9 @@ err:
59506
59507 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
59508 {
59509 + if (gr_is_taskstats_denied(current->pid))
59510 + return -EACCES;
59511 +
59512 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
59513 return cmd_attr_register_cpumask(info);
59514 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
59515 diff -urNp linux-3.0.3/kernel/time/alarmtimer.c linux-3.0.3/kernel/time/alarmtimer.c
59516 --- linux-3.0.3/kernel/time/alarmtimer.c 2011-07-21 22:17:23.000000000 -0400
59517 +++ linux-3.0.3/kernel/time/alarmtimer.c 2011-08-23 21:47:56.000000000 -0400
59518 @@ -685,7 +685,7 @@ static int __init alarmtimer_init(void)
59519 {
59520 int error = 0;
59521 int i;
59522 - struct k_clock alarm_clock = {
59523 + static struct k_clock alarm_clock = {
59524 .clock_getres = alarm_clock_getres,
59525 .clock_get = alarm_clock_get,
59526 .timer_create = alarm_timer_create,
59527 diff -urNp linux-3.0.3/kernel/time/tick-broadcast.c linux-3.0.3/kernel/time/tick-broadcast.c
59528 --- linux-3.0.3/kernel/time/tick-broadcast.c 2011-07-21 22:17:23.000000000 -0400
59529 +++ linux-3.0.3/kernel/time/tick-broadcast.c 2011-08-23 21:47:56.000000000 -0400
59530 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
59531 * then clear the broadcast bit.
59532 */
59533 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
59534 - int cpu = smp_processor_id();
59535 + cpu = smp_processor_id();
59536
59537 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
59538 tick_broadcast_clear_oneshot(cpu);
59539 diff -urNp linux-3.0.3/kernel/time/timekeeping.c linux-3.0.3/kernel/time/timekeeping.c
59540 --- linux-3.0.3/kernel/time/timekeeping.c 2011-07-21 22:17:23.000000000 -0400
59541 +++ linux-3.0.3/kernel/time/timekeeping.c 2011-08-23 21:48:14.000000000 -0400
59542 @@ -14,6 +14,7 @@
59543 #include <linux/init.h>
59544 #include <linux/mm.h>
59545 #include <linux/sched.h>
59546 +#include <linux/grsecurity.h>
59547 #include <linux/syscore_ops.h>
59548 #include <linux/clocksource.h>
59549 #include <linux/jiffies.h>
59550 @@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
59551 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
59552 return -EINVAL;
59553
59554 + gr_log_timechange();
59555 +
59556 write_seqlock_irqsave(&xtime_lock, flags);
59557
59558 timekeeping_forward_now();
59559 diff -urNp linux-3.0.3/kernel/time/timer_list.c linux-3.0.3/kernel/time/timer_list.c
59560 --- linux-3.0.3/kernel/time/timer_list.c 2011-07-21 22:17:23.000000000 -0400
59561 +++ linux-3.0.3/kernel/time/timer_list.c 2011-08-23 21:48:14.000000000 -0400
59562 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
59563
59564 static void print_name_offset(struct seq_file *m, void *sym)
59565 {
59566 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59567 + SEQ_printf(m, "<%p>", NULL);
59568 +#else
59569 char symname[KSYM_NAME_LEN];
59570
59571 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
59572 SEQ_printf(m, "<%pK>", sym);
59573 else
59574 SEQ_printf(m, "%s", symname);
59575 +#endif
59576 }
59577
59578 static void
59579 @@ -112,7 +116,11 @@ next_one:
59580 static void
59581 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
59582 {
59583 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59584 + SEQ_printf(m, " .base: %p\n", NULL);
59585 +#else
59586 SEQ_printf(m, " .base: %pK\n", base);
59587 +#endif
59588 SEQ_printf(m, " .index: %d\n",
59589 base->index);
59590 SEQ_printf(m, " .resolution: %Lu nsecs\n",
59591 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
59592 {
59593 struct proc_dir_entry *pe;
59594
59595 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
59596 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
59597 +#else
59598 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
59599 +#endif
59600 if (!pe)
59601 return -ENOMEM;
59602 return 0;
59603 diff -urNp linux-3.0.3/kernel/time/timer_stats.c linux-3.0.3/kernel/time/timer_stats.c
59604 --- linux-3.0.3/kernel/time/timer_stats.c 2011-07-21 22:17:23.000000000 -0400
59605 +++ linux-3.0.3/kernel/time/timer_stats.c 2011-08-23 21:48:14.000000000 -0400
59606 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
59607 static unsigned long nr_entries;
59608 static struct entry entries[MAX_ENTRIES];
59609
59610 -static atomic_t overflow_count;
59611 +static atomic_unchecked_t overflow_count;
59612
59613 /*
59614 * The entries are in a hash-table, for fast lookup:
59615 @@ -140,7 +140,7 @@ static void reset_entries(void)
59616 nr_entries = 0;
59617 memset(entries, 0, sizeof(entries));
59618 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
59619 - atomic_set(&overflow_count, 0);
59620 + atomic_set_unchecked(&overflow_count, 0);
59621 }
59622
59623 static struct entry *alloc_entry(void)
59624 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
59625 if (likely(entry))
59626 entry->count++;
59627 else
59628 - atomic_inc(&overflow_count);
59629 + atomic_inc_unchecked(&overflow_count);
59630
59631 out_unlock:
59632 raw_spin_unlock_irqrestore(lock, flags);
59633 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
59634
59635 static void print_name_offset(struct seq_file *m, unsigned long addr)
59636 {
59637 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59638 + seq_printf(m, "<%p>", NULL);
59639 +#else
59640 char symname[KSYM_NAME_LEN];
59641
59642 if (lookup_symbol_name(addr, symname) < 0)
59643 seq_printf(m, "<%p>", (void *)addr);
59644 else
59645 seq_printf(m, "%s", symname);
59646 +#endif
59647 }
59648
59649 static int tstats_show(struct seq_file *m, void *v)
59650 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
59651
59652 seq_puts(m, "Timer Stats Version: v0.2\n");
59653 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
59654 - if (atomic_read(&overflow_count))
59655 + if (atomic_read_unchecked(&overflow_count))
59656 seq_printf(m, "Overflow: %d entries\n",
59657 - atomic_read(&overflow_count));
59658 + atomic_read_unchecked(&overflow_count));
59659
59660 for (i = 0; i < nr_entries; i++) {
59661 entry = entries + i;
59662 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
59663 {
59664 struct proc_dir_entry *pe;
59665
59666 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
59667 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
59668 +#else
59669 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
59670 +#endif
59671 if (!pe)
59672 return -ENOMEM;
59673 return 0;
59674 diff -urNp linux-3.0.3/kernel/time.c linux-3.0.3/kernel/time.c
59675 --- linux-3.0.3/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
59676 +++ linux-3.0.3/kernel/time.c 2011-08-23 21:48:14.000000000 -0400
59677 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
59678 return error;
59679
59680 if (tz) {
59681 + /* we log in do_settimeofday called below, so don't log twice
59682 + */
59683 + if (!tv)
59684 + gr_log_timechange();
59685 +
59686 /* SMP safe, global irq locking makes it work. */
59687 sys_tz = *tz;
59688 update_vsyscall_tz();
59689 diff -urNp linux-3.0.3/kernel/timer.c linux-3.0.3/kernel/timer.c
59690 --- linux-3.0.3/kernel/timer.c 2011-07-21 22:17:23.000000000 -0400
59691 +++ linux-3.0.3/kernel/timer.c 2011-08-23 21:47:56.000000000 -0400
59692 @@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
59693 /*
59694 * This function runs timers and the timer-tq in bottom half context.
59695 */
59696 -static void run_timer_softirq(struct softirq_action *h)
59697 +static void run_timer_softirq(void)
59698 {
59699 struct tvec_base *base = __this_cpu_read(tvec_bases);
59700
59701 diff -urNp linux-3.0.3/kernel/trace/blktrace.c linux-3.0.3/kernel/trace/blktrace.c
59702 --- linux-3.0.3/kernel/trace/blktrace.c 2011-07-21 22:17:23.000000000 -0400
59703 +++ linux-3.0.3/kernel/trace/blktrace.c 2011-08-23 21:47:56.000000000 -0400
59704 @@ -321,7 +321,7 @@ static ssize_t blk_dropped_read(struct f
59705 struct blk_trace *bt = filp->private_data;
59706 char buf[16];
59707
59708 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
59709 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
59710
59711 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
59712 }
59713 @@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(str
59714 return 1;
59715
59716 bt = buf->chan->private_data;
59717 - atomic_inc(&bt->dropped);
59718 + atomic_inc_unchecked(&bt->dropped);
59719 return 0;
59720 }
59721
59722 @@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_qu
59723
59724 bt->dir = dir;
59725 bt->dev = dev;
59726 - atomic_set(&bt->dropped, 0);
59727 + atomic_set_unchecked(&bt->dropped, 0);
59728
59729 ret = -EIO;
59730 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
59731 diff -urNp linux-3.0.3/kernel/trace/ftrace.c linux-3.0.3/kernel/trace/ftrace.c
59732 --- linux-3.0.3/kernel/trace/ftrace.c 2011-07-21 22:17:23.000000000 -0400
59733 +++ linux-3.0.3/kernel/trace/ftrace.c 2011-08-23 21:47:56.000000000 -0400
59734 @@ -1566,12 +1566,17 @@ ftrace_code_disable(struct module *mod,
59735 if (unlikely(ftrace_disabled))
59736 return 0;
59737
59738 + ret = ftrace_arch_code_modify_prepare();
59739 + FTRACE_WARN_ON(ret);
59740 + if (ret)
59741 + return 0;
59742 +
59743 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
59744 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
59745 if (ret) {
59746 ftrace_bug(ret, ip);
59747 - return 0;
59748 }
59749 - return 1;
59750 + return ret ? 0 : 1;
59751 }
59752
59753 /*
59754 @@ -2550,7 +2555,7 @@ static void ftrace_free_entry_rcu(struct
59755
59756 int
59757 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59758 - void *data)
59759 + void *data)
59760 {
59761 struct ftrace_func_probe *entry;
59762 struct ftrace_page *pg;
59763 diff -urNp linux-3.0.3/kernel/trace/trace.c linux-3.0.3/kernel/trace/trace.c
59764 --- linux-3.0.3/kernel/trace/trace.c 2011-07-21 22:17:23.000000000 -0400
59765 +++ linux-3.0.3/kernel/trace/trace.c 2011-08-23 21:48:14.000000000 -0400
59766 @@ -3339,6 +3339,8 @@ static ssize_t tracing_splice_read_pipe(
59767 size_t rem;
59768 unsigned int i;
59769
59770 + pax_track_stack();
59771 +
59772 if (splice_grow_spd(pipe, &spd))
59773 return -ENOMEM;
59774
59775 @@ -3822,6 +3824,8 @@ tracing_buffers_splice_read(struct file
59776 int entries, size, i;
59777 size_t ret;
59778
59779 + pax_track_stack();
59780 +
59781 if (splice_grow_spd(pipe, &spd))
59782 return -ENOMEM;
59783
59784 @@ -3990,10 +3994,9 @@ static const struct file_operations trac
59785 };
59786 #endif
59787
59788 -static struct dentry *d_tracer;
59789 -
59790 struct dentry *tracing_init_dentry(void)
59791 {
59792 + static struct dentry *d_tracer;
59793 static int once;
59794
59795 if (d_tracer)
59796 @@ -4013,10 +4016,9 @@ struct dentry *tracing_init_dentry(void)
59797 return d_tracer;
59798 }
59799
59800 -static struct dentry *d_percpu;
59801 -
59802 struct dentry *tracing_dentry_percpu(void)
59803 {
59804 + static struct dentry *d_percpu;
59805 static int once;
59806 struct dentry *d_tracer;
59807
59808 diff -urNp linux-3.0.3/kernel/trace/trace_events.c linux-3.0.3/kernel/trace/trace_events.c
59809 --- linux-3.0.3/kernel/trace/trace_events.c 2011-08-23 21:44:40.000000000 -0400
59810 +++ linux-3.0.3/kernel/trace/trace_events.c 2011-08-23 21:47:56.000000000 -0400
59811 @@ -1318,10 +1318,6 @@ static LIST_HEAD(ftrace_module_file_list
59812 struct ftrace_module_file_ops {
59813 struct list_head list;
59814 struct module *mod;
59815 - struct file_operations id;
59816 - struct file_operations enable;
59817 - struct file_operations format;
59818 - struct file_operations filter;
59819 };
59820
59821 static struct ftrace_module_file_ops *
59822 @@ -1342,17 +1338,12 @@ trace_create_file_ops(struct module *mod
59823
59824 file_ops->mod = mod;
59825
59826 - file_ops->id = ftrace_event_id_fops;
59827 - file_ops->id.owner = mod;
59828 -
59829 - file_ops->enable = ftrace_enable_fops;
59830 - file_ops->enable.owner = mod;
59831 -
59832 - file_ops->filter = ftrace_event_filter_fops;
59833 - file_ops->filter.owner = mod;
59834 -
59835 - file_ops->format = ftrace_event_format_fops;
59836 - file_ops->format.owner = mod;
59837 + pax_open_kernel();
59838 + *(void **)&mod->trace_id.owner = mod;
59839 + *(void **)&mod->trace_enable.owner = mod;
59840 + *(void **)&mod->trace_filter.owner = mod;
59841 + *(void **)&mod->trace_format.owner = mod;
59842 + pax_close_kernel();
59843
59844 list_add(&file_ops->list, &ftrace_module_file_list);
59845
59846 @@ -1376,8 +1367,8 @@ static void trace_module_add_events(stru
59847
59848 for_each_event(call, start, end) {
59849 __trace_add_event_call(*call, mod,
59850 - &file_ops->id, &file_ops->enable,
59851 - &file_ops->filter, &file_ops->format);
59852 + &mod->trace_id, &mod->trace_enable,
59853 + &mod->trace_filter, &mod->trace_format);
59854 }
59855 }
59856
59857 diff -urNp linux-3.0.3/kernel/trace/trace_mmiotrace.c linux-3.0.3/kernel/trace/trace_mmiotrace.c
59858 --- linux-3.0.3/kernel/trace/trace_mmiotrace.c 2011-07-21 22:17:23.000000000 -0400
59859 +++ linux-3.0.3/kernel/trace/trace_mmiotrace.c 2011-08-23 21:47:56.000000000 -0400
59860 @@ -24,7 +24,7 @@ struct header_iter {
59861 static struct trace_array *mmio_trace_array;
59862 static bool overrun_detected;
59863 static unsigned long prev_overruns;
59864 -static atomic_t dropped_count;
59865 +static atomic_unchecked_t dropped_count;
59866
59867 static void mmio_reset_data(struct trace_array *tr)
59868 {
59869 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
59870
59871 static unsigned long count_overruns(struct trace_iterator *iter)
59872 {
59873 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
59874 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
59875 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
59876
59877 if (over > prev_overruns)
59878 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
59879 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
59880 sizeof(*entry), 0, pc);
59881 if (!event) {
59882 - atomic_inc(&dropped_count);
59883 + atomic_inc_unchecked(&dropped_count);
59884 return;
59885 }
59886 entry = ring_buffer_event_data(event);
59887 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
59888 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
59889 sizeof(*entry), 0, pc);
59890 if (!event) {
59891 - atomic_inc(&dropped_count);
59892 + atomic_inc_unchecked(&dropped_count);
59893 return;
59894 }
59895 entry = ring_buffer_event_data(event);
59896 diff -urNp linux-3.0.3/kernel/trace/trace_output.c linux-3.0.3/kernel/trace/trace_output.c
59897 --- linux-3.0.3/kernel/trace/trace_output.c 2011-07-21 22:17:23.000000000 -0400
59898 +++ linux-3.0.3/kernel/trace/trace_output.c 2011-08-23 21:47:56.000000000 -0400
59899 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
59900
59901 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
59902 if (!IS_ERR(p)) {
59903 - p = mangle_path(s->buffer + s->len, p, "\n");
59904 + p = mangle_path(s->buffer + s->len, p, "\n\\");
59905 if (p) {
59906 s->len = p - s->buffer;
59907 return 1;
59908 diff -urNp linux-3.0.3/kernel/trace/trace_stack.c linux-3.0.3/kernel/trace/trace_stack.c
59909 --- linux-3.0.3/kernel/trace/trace_stack.c 2011-07-21 22:17:23.000000000 -0400
59910 +++ linux-3.0.3/kernel/trace/trace_stack.c 2011-08-23 21:47:56.000000000 -0400
59911 @@ -50,7 +50,7 @@ static inline void check_stack(void)
59912 return;
59913
59914 /* we do not handle interrupt stacks yet */
59915 - if (!object_is_on_stack(&this_size))
59916 + if (!object_starts_on_stack(&this_size))
59917 return;
59918
59919 local_irq_save(flags);
59920 diff -urNp linux-3.0.3/kernel/trace/trace_workqueue.c linux-3.0.3/kernel/trace/trace_workqueue.c
59921 --- linux-3.0.3/kernel/trace/trace_workqueue.c 2011-07-21 22:17:23.000000000 -0400
59922 +++ linux-3.0.3/kernel/trace/trace_workqueue.c 2011-08-23 21:47:56.000000000 -0400
59923 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
59924 int cpu;
59925 pid_t pid;
59926 /* Can be inserted from interrupt or user context, need to be atomic */
59927 - atomic_t inserted;
59928 + atomic_unchecked_t inserted;
59929 /*
59930 * Don't need to be atomic, works are serialized in a single workqueue thread
59931 * on a single CPU.
59932 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
59933 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
59934 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
59935 if (node->pid == wq_thread->pid) {
59936 - atomic_inc(&node->inserted);
59937 + atomic_inc_unchecked(&node->inserted);
59938 goto found;
59939 }
59940 }
59941 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
59942 tsk = get_pid_task(pid, PIDTYPE_PID);
59943 if (tsk) {
59944 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
59945 - atomic_read(&cws->inserted), cws->executed,
59946 + atomic_read_unchecked(&cws->inserted), cws->executed,
59947 tsk->comm);
59948 put_task_struct(tsk);
59949 }
59950 diff -urNp linux-3.0.3/lib/bug.c linux-3.0.3/lib/bug.c
59951 --- linux-3.0.3/lib/bug.c 2011-07-21 22:17:23.000000000 -0400
59952 +++ linux-3.0.3/lib/bug.c 2011-08-23 21:47:56.000000000 -0400
59953 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
59954 return BUG_TRAP_TYPE_NONE;
59955
59956 bug = find_bug(bugaddr);
59957 + if (!bug)
59958 + return BUG_TRAP_TYPE_NONE;
59959
59960 file = NULL;
59961 line = 0;
59962 diff -urNp linux-3.0.3/lib/debugobjects.c linux-3.0.3/lib/debugobjects.c
59963 --- linux-3.0.3/lib/debugobjects.c 2011-07-21 22:17:23.000000000 -0400
59964 +++ linux-3.0.3/lib/debugobjects.c 2011-08-23 21:47:56.000000000 -0400
59965 @@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
59966 if (limit > 4)
59967 return;
59968
59969 - is_on_stack = object_is_on_stack(addr);
59970 + is_on_stack = object_starts_on_stack(addr);
59971 if (is_on_stack == onstack)
59972 return;
59973
59974 diff -urNp linux-3.0.3/lib/dma-debug.c linux-3.0.3/lib/dma-debug.c
59975 --- linux-3.0.3/lib/dma-debug.c 2011-07-21 22:17:23.000000000 -0400
59976 +++ linux-3.0.3/lib/dma-debug.c 2011-08-23 21:47:56.000000000 -0400
59977 @@ -870,7 +870,7 @@ out:
59978
59979 static void check_for_stack(struct device *dev, void *addr)
59980 {
59981 - if (object_is_on_stack(addr))
59982 + if (object_starts_on_stack(addr))
59983 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
59984 "stack [addr=%p]\n", addr);
59985 }
59986 diff -urNp linux-3.0.3/lib/extable.c linux-3.0.3/lib/extable.c
59987 --- linux-3.0.3/lib/extable.c 2011-07-21 22:17:23.000000000 -0400
59988 +++ linux-3.0.3/lib/extable.c 2011-08-23 21:47:56.000000000 -0400
59989 @@ -13,6 +13,7 @@
59990 #include <linux/init.h>
59991 #include <linux/sort.h>
59992 #include <asm/uaccess.h>
59993 +#include <asm/pgtable.h>
59994
59995 #ifndef ARCH_HAS_SORT_EXTABLE
59996 /*
59997 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const v
59998 void sort_extable(struct exception_table_entry *start,
59999 struct exception_table_entry *finish)
60000 {
60001 + pax_open_kernel();
60002 sort(start, finish - start, sizeof(struct exception_table_entry),
60003 cmp_ex, NULL);
60004 + pax_close_kernel();
60005 }
60006
60007 #ifdef CONFIG_MODULES
60008 diff -urNp linux-3.0.3/lib/inflate.c linux-3.0.3/lib/inflate.c
60009 --- linux-3.0.3/lib/inflate.c 2011-07-21 22:17:23.000000000 -0400
60010 +++ linux-3.0.3/lib/inflate.c 2011-08-23 21:47:56.000000000 -0400
60011 @@ -269,7 +269,7 @@ static void free(void *where)
60012 malloc_ptr = free_mem_ptr;
60013 }
60014 #else
60015 -#define malloc(a) kmalloc(a, GFP_KERNEL)
60016 +#define malloc(a) kmalloc((a), GFP_KERNEL)
60017 #define free(a) kfree(a)
60018 #endif
60019
60020 diff -urNp linux-3.0.3/lib/Kconfig.debug linux-3.0.3/lib/Kconfig.debug
60021 --- linux-3.0.3/lib/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
60022 +++ linux-3.0.3/lib/Kconfig.debug 2011-08-23 21:48:14.000000000 -0400
60023 @@ -1088,6 +1088,7 @@ config LATENCYTOP
60024 depends on DEBUG_KERNEL
60025 depends on STACKTRACE_SUPPORT
60026 depends on PROC_FS
60027 + depends on !GRKERNSEC_HIDESYM
60028 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
60029 select KALLSYMS
60030 select KALLSYMS_ALL
60031 diff -urNp linux-3.0.3/lib/kref.c linux-3.0.3/lib/kref.c
60032 --- linux-3.0.3/lib/kref.c 2011-07-21 22:17:23.000000000 -0400
60033 +++ linux-3.0.3/lib/kref.c 2011-08-23 21:47:56.000000000 -0400
60034 @@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
60035 */
60036 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
60037 {
60038 - WARN_ON(release == NULL);
60039 + BUG_ON(release == NULL);
60040 WARN_ON(release == (void (*)(struct kref *))kfree);
60041
60042 if (atomic_dec_and_test(&kref->refcount)) {
60043 diff -urNp linux-3.0.3/lib/radix-tree.c linux-3.0.3/lib/radix-tree.c
60044 --- linux-3.0.3/lib/radix-tree.c 2011-07-21 22:17:23.000000000 -0400
60045 +++ linux-3.0.3/lib/radix-tree.c 2011-08-23 21:47:56.000000000 -0400
60046 @@ -80,7 +80,7 @@ struct radix_tree_preload {
60047 int nr;
60048 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
60049 };
60050 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
60051 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
60052
60053 static inline void *ptr_to_indirect(void *ptr)
60054 {
60055 diff -urNp linux-3.0.3/lib/vsprintf.c linux-3.0.3/lib/vsprintf.c
60056 --- linux-3.0.3/lib/vsprintf.c 2011-07-21 22:17:23.000000000 -0400
60057 +++ linux-3.0.3/lib/vsprintf.c 2011-08-23 21:48:14.000000000 -0400
60058 @@ -16,6 +16,9 @@
60059 * - scnprintf and vscnprintf
60060 */
60061
60062 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60063 +#define __INCLUDED_BY_HIDESYM 1
60064 +#endif
60065 #include <stdarg.h>
60066 #include <linux/module.h>
60067 #include <linux/types.h>
60068 @@ -435,7 +438,7 @@ char *symbol_string(char *buf, char *end
60069 char sym[KSYM_SYMBOL_LEN];
60070 if (ext == 'B')
60071 sprint_backtrace(sym, value);
60072 - else if (ext != 'f' && ext != 's')
60073 + else if (ext != 'f' && ext != 's' && ext != 'a')
60074 sprint_symbol(sym, value);
60075 else
60076 kallsyms_lookup(value, NULL, NULL, NULL, sym);
60077 @@ -799,7 +802,11 @@ char *uuid_string(char *buf, char *end,
60078 return string(buf, end, uuid, spec);
60079 }
60080
60081 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60082 +int kptr_restrict __read_mostly = 2;
60083 +#else
60084 int kptr_restrict __read_mostly;
60085 +#endif
60086
60087 /*
60088 * Show a '%p' thing. A kernel extension is that the '%p' is followed
60089 @@ -813,6 +820,8 @@ int kptr_restrict __read_mostly;
60090 * - 'S' For symbolic direct pointers with offset
60091 * - 's' For symbolic direct pointers without offset
60092 * - 'B' For backtraced symbolic direct pointers with offset
60093 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
60094 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
60095 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
60096 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
60097 * - 'M' For a 6-byte MAC address, it prints the address in the
60098 @@ -857,12 +866,12 @@ char *pointer(const char *fmt, char *buf
60099 {
60100 if (!ptr && *fmt != 'K') {
60101 /*
60102 - * Print (null) with the same width as a pointer so it makes
60103 + * Print (nil) with the same width as a pointer so it makes
60104 * tabular output look nice.
60105 */
60106 if (spec.field_width == -1)
60107 spec.field_width = 2 * sizeof(void *);
60108 - return string(buf, end, "(null)", spec);
60109 + return string(buf, end, "(nil)", spec);
60110 }
60111
60112 switch (*fmt) {
60113 @@ -872,6 +881,13 @@ char *pointer(const char *fmt, char *buf
60114 /* Fallthrough */
60115 case 'S':
60116 case 's':
60117 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60118 + break;
60119 +#else
60120 + return symbol_string(buf, end, ptr, spec, *fmt);
60121 +#endif
60122 + case 'A':
60123 + case 'a':
60124 case 'B':
60125 return symbol_string(buf, end, ptr, spec, *fmt);
60126 case 'R':
60127 @@ -1631,11 +1647,11 @@ int bstr_printf(char *buf, size_t size,
60128 typeof(type) value; \
60129 if (sizeof(type) == 8) { \
60130 args = PTR_ALIGN(args, sizeof(u32)); \
60131 - *(u32 *)&value = *(u32 *)args; \
60132 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
60133 + *(u32 *)&value = *(const u32 *)args; \
60134 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
60135 } else { \
60136 args = PTR_ALIGN(args, sizeof(type)); \
60137 - value = *(typeof(type) *)args; \
60138 + value = *(const typeof(type) *)args; \
60139 } \
60140 args += sizeof(type); \
60141 value; \
60142 @@ -1698,7 +1714,7 @@ int bstr_printf(char *buf, size_t size,
60143 case FORMAT_TYPE_STR: {
60144 const char *str_arg = args;
60145 args += strlen(str_arg) + 1;
60146 - str = string(str, end, (char *)str_arg, spec);
60147 + str = string(str, end, str_arg, spec);
60148 break;
60149 }
60150
60151 diff -urNp linux-3.0.3/localversion-grsec linux-3.0.3/localversion-grsec
60152 --- linux-3.0.3/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
60153 +++ linux-3.0.3/localversion-grsec 2011-08-23 21:48:14.000000000 -0400
60154 @@ -0,0 +1 @@
60155 +-grsec
60156 diff -urNp linux-3.0.3/Makefile linux-3.0.3/Makefile
60157 --- linux-3.0.3/Makefile 2011-08-23 21:44:40.000000000 -0400
60158 +++ linux-3.0.3/Makefile 2011-08-23 22:01:23.000000000 -0400
60159 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
60160
60161 HOSTCC = gcc
60162 HOSTCXX = g++
60163 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
60164 -HOSTCXXFLAGS = -O2
60165 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
60166 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
60167 +HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
60168
60169 # Decide whether to build built-in, modular, or both.
60170 # Normally, just do built-in.
60171 @@ -365,10 +366,12 @@ LINUXINCLUDE := -I$(srctree)/arch/$(h
60172 KBUILD_CPPFLAGS := -D__KERNEL__
60173
60174 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
60175 + -W -Wno-unused-parameter -Wno-missing-field-initializers \
60176 -fno-strict-aliasing -fno-common \
60177 -Werror-implicit-function-declaration \
60178 -Wno-format-security \
60179 -fno-delete-null-pointer-checks
60180 +KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
60181 KBUILD_AFLAGS_KERNEL :=
60182 KBUILD_CFLAGS_KERNEL :=
60183 KBUILD_AFLAGS := -D__ASSEMBLY__
60184 @@ -407,10 +410,11 @@ export RCS_TAR_IGNORE := --exclude SCCS
60185 # Rules shared between *config targets and build targets
60186
60187 # Basic helpers built in scripts/
60188 -PHONY += scripts_basic
60189 -scripts_basic:
60190 +PHONY += scripts_basic0 scripts_basic gcc-plugins
60191 +scripts_basic0:
60192 $(Q)$(MAKE) $(build)=scripts/basic
60193 $(Q)rm -f .tmp_quiet_recordmcount
60194 +scripts_basic: scripts_basic0 gcc-plugins
60195
60196 # To avoid any implicit rule to kick in, define an empty command.
60197 scripts/basic/%: scripts_basic ;
60198 @@ -564,6 +568,25 @@ else
60199 KBUILD_CFLAGS += -O2
60200 endif
60201
60202 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh $(HOSTCC)), y)
60203 +CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so
60204 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
60205 +STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
60206 +endif
60207 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
60208 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN
60209 +gcc-plugins:
60210 + $(Q)$(MAKE) $(build)=tools/gcc
60211 +else
60212 +gcc-plugins:
60213 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
60214 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
60215 +else
60216 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
60217 +endif
60218 + $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
60219 +endif
60220 +
60221 include $(srctree)/arch/$(SRCARCH)/Makefile
60222
60223 ifneq ($(CONFIG_FRAME_WARN),0)
60224 @@ -708,7 +731,7 @@ export mod_strip_cmd
60225
60226
60227 ifeq ($(KBUILD_EXTMOD),)
60228 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
60229 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
60230
60231 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
60232 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
60233 @@ -973,7 +996,7 @@ ifneq ($(KBUILD_SRC),)
60234 endif
60235
60236 # prepare2 creates a makefile if using a separate output directory
60237 -prepare2: prepare3 outputmakefile asm-generic
60238 +prepare2: prepare3 outputmakefile asm-generic gcc-plugins
60239
60240 prepare1: prepare2 include/linux/version.h include/generated/utsrelease.h \
60241 include/config/auto.conf
60242 @@ -1198,7 +1221,7 @@ distclean: mrproper
60243 @find $(srctree) $(RCS_FIND_IGNORE) \
60244 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
60245 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
60246 - -o -name '.*.rej' -o -size 0 \
60247 + -o -name '.*.rej' -o -size 0 -o -name '*.so' \
60248 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
60249 -type f -print | xargs rm -f
60250
60251 @@ -1404,7 +1427,7 @@ clean: $(clean-dirs)
60252 $(call cmd,rmdirs)
60253 $(call cmd,rmfiles)
60254 @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
60255 - \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
60256 + \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
60257 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
60258 -o -name '*.symtypes' -o -name 'modules.order' \
60259 -o -name modules.builtin -o -name '.tmp_*.o.*' \
60260 diff -urNp linux-3.0.3/mm/filemap.c linux-3.0.3/mm/filemap.c
60261 --- linux-3.0.3/mm/filemap.c 2011-07-21 22:17:23.000000000 -0400
60262 +++ linux-3.0.3/mm/filemap.c 2011-08-23 21:48:14.000000000 -0400
60263 @@ -1763,7 +1763,7 @@ int generic_file_mmap(struct file * file
60264 struct address_space *mapping = file->f_mapping;
60265
60266 if (!mapping->a_ops->readpage)
60267 - return -ENOEXEC;
60268 + return -ENODEV;
60269 file_accessed(file);
60270 vma->vm_ops = &generic_file_vm_ops;
60271 vma->vm_flags |= VM_CAN_NONLINEAR;
60272 @@ -2169,6 +2169,7 @@ inline int generic_write_checks(struct f
60273 *pos = i_size_read(inode);
60274
60275 if (limit != RLIM_INFINITY) {
60276 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
60277 if (*pos >= limit) {
60278 send_sig(SIGXFSZ, current, 0);
60279 return -EFBIG;
60280 diff -urNp linux-3.0.3/mm/fremap.c linux-3.0.3/mm/fremap.c
60281 --- linux-3.0.3/mm/fremap.c 2011-07-21 22:17:23.000000000 -0400
60282 +++ linux-3.0.3/mm/fremap.c 2011-08-23 21:47:56.000000000 -0400
60283 @@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
60284 retry:
60285 vma = find_vma(mm, start);
60286
60287 +#ifdef CONFIG_PAX_SEGMEXEC
60288 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
60289 + goto out;
60290 +#endif
60291 +
60292 /*
60293 * Make sure the vma is shared, that it supports prefaulting,
60294 * and that the remapped range is valid and fully within
60295 diff -urNp linux-3.0.3/mm/highmem.c linux-3.0.3/mm/highmem.c
60296 --- linux-3.0.3/mm/highmem.c 2011-07-21 22:17:23.000000000 -0400
60297 +++ linux-3.0.3/mm/highmem.c 2011-08-23 21:47:56.000000000 -0400
60298 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
60299 * So no dangers, even with speculative execution.
60300 */
60301 page = pte_page(pkmap_page_table[i]);
60302 + pax_open_kernel();
60303 pte_clear(&init_mm, (unsigned long)page_address(page),
60304 &pkmap_page_table[i]);
60305 -
60306 + pax_close_kernel();
60307 set_page_address(page, NULL);
60308 need_flush = 1;
60309 }
60310 @@ -186,9 +187,11 @@ start:
60311 }
60312 }
60313 vaddr = PKMAP_ADDR(last_pkmap_nr);
60314 +
60315 + pax_open_kernel();
60316 set_pte_at(&init_mm, vaddr,
60317 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
60318 -
60319 + pax_close_kernel();
60320 pkmap_count[last_pkmap_nr] = 1;
60321 set_page_address(page, (void *)vaddr);
60322
60323 diff -urNp linux-3.0.3/mm/huge_memory.c linux-3.0.3/mm/huge_memory.c
60324 --- linux-3.0.3/mm/huge_memory.c 2011-07-21 22:17:23.000000000 -0400
60325 +++ linux-3.0.3/mm/huge_memory.c 2011-08-23 21:47:56.000000000 -0400
60326 @@ -702,7 +702,7 @@ out:
60327 * run pte_offset_map on the pmd, if an huge pmd could
60328 * materialize from under us from a different thread.
60329 */
60330 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
60331 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
60332 return VM_FAULT_OOM;
60333 /* if an huge pmd materialized from under us just retry later */
60334 if (unlikely(pmd_trans_huge(*pmd)))
60335 diff -urNp linux-3.0.3/mm/hugetlb.c linux-3.0.3/mm/hugetlb.c
60336 --- linux-3.0.3/mm/hugetlb.c 2011-07-21 22:17:23.000000000 -0400
60337 +++ linux-3.0.3/mm/hugetlb.c 2011-08-23 21:47:56.000000000 -0400
60338 @@ -2339,6 +2339,27 @@ static int unmap_ref_private(struct mm_s
60339 return 1;
60340 }
60341
60342 +#ifdef CONFIG_PAX_SEGMEXEC
60343 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
60344 +{
60345 + struct mm_struct *mm = vma->vm_mm;
60346 + struct vm_area_struct *vma_m;
60347 + unsigned long address_m;
60348 + pte_t *ptep_m;
60349 +
60350 + vma_m = pax_find_mirror_vma(vma);
60351 + if (!vma_m)
60352 + return;
60353 +
60354 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
60355 + address_m = address + SEGMEXEC_TASK_SIZE;
60356 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
60357 + get_page(page_m);
60358 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
60359 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
60360 +}
60361 +#endif
60362 +
60363 /*
60364 * Hugetlb_cow() should be called with page lock of the original hugepage held.
60365 */
60366 @@ -2440,6 +2461,11 @@ retry_avoidcopy:
60367 make_huge_pte(vma, new_page, 1));
60368 page_remove_rmap(old_page);
60369 hugepage_add_new_anon_rmap(new_page, vma, address);
60370 +
60371 +#ifdef CONFIG_PAX_SEGMEXEC
60372 + pax_mirror_huge_pte(vma, address, new_page);
60373 +#endif
60374 +
60375 /* Make the old page be freed below */
60376 new_page = old_page;
60377 mmu_notifier_invalidate_range_end(mm,
60378 @@ -2591,6 +2617,10 @@ retry:
60379 && (vma->vm_flags & VM_SHARED)));
60380 set_huge_pte_at(mm, address, ptep, new_pte);
60381
60382 +#ifdef CONFIG_PAX_SEGMEXEC
60383 + pax_mirror_huge_pte(vma, address, page);
60384 +#endif
60385 +
60386 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
60387 /* Optimization, do the COW without a second fault */
60388 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
60389 @@ -2620,6 +2650,10 @@ int hugetlb_fault(struct mm_struct *mm,
60390 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
60391 struct hstate *h = hstate_vma(vma);
60392
60393 +#ifdef CONFIG_PAX_SEGMEXEC
60394 + struct vm_area_struct *vma_m;
60395 +#endif
60396 +
60397 ptep = huge_pte_offset(mm, address);
60398 if (ptep) {
60399 entry = huge_ptep_get(ptep);
60400 @@ -2631,6 +2665,26 @@ int hugetlb_fault(struct mm_struct *mm,
60401 VM_FAULT_SET_HINDEX(h - hstates);
60402 }
60403
60404 +#ifdef CONFIG_PAX_SEGMEXEC
60405 + vma_m = pax_find_mirror_vma(vma);
60406 + if (vma_m) {
60407 + unsigned long address_m;
60408 +
60409 + if (vma->vm_start > vma_m->vm_start) {
60410 + address_m = address;
60411 + address -= SEGMEXEC_TASK_SIZE;
60412 + vma = vma_m;
60413 + h = hstate_vma(vma);
60414 + } else
60415 + address_m = address + SEGMEXEC_TASK_SIZE;
60416 +
60417 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
60418 + return VM_FAULT_OOM;
60419 + address_m &= HPAGE_MASK;
60420 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
60421 + }
60422 +#endif
60423 +
60424 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
60425 if (!ptep)
60426 return VM_FAULT_OOM;
60427 diff -urNp linux-3.0.3/mm/internal.h linux-3.0.3/mm/internal.h
60428 --- linux-3.0.3/mm/internal.h 2011-07-21 22:17:23.000000000 -0400
60429 +++ linux-3.0.3/mm/internal.h 2011-08-23 21:47:56.000000000 -0400
60430 @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
60431 * in mm/page_alloc.c
60432 */
60433 extern void __free_pages_bootmem(struct page *page, unsigned int order);
60434 +extern void free_compound_page(struct page *page);
60435 extern void prep_compound_page(struct page *page, unsigned long order);
60436 #ifdef CONFIG_MEMORY_FAILURE
60437 extern bool is_free_buddy_page(struct page *page);
60438 diff -urNp linux-3.0.3/mm/Kconfig linux-3.0.3/mm/Kconfig
60439 --- linux-3.0.3/mm/Kconfig 2011-07-21 22:17:23.000000000 -0400
60440 +++ linux-3.0.3/mm/Kconfig 2011-08-23 21:48:14.000000000 -0400
60441 @@ -240,7 +240,7 @@ config KSM
60442 config DEFAULT_MMAP_MIN_ADDR
60443 int "Low address space to protect from user allocation"
60444 depends on MMU
60445 - default 4096
60446 + default 65536
60447 help
60448 This is the portion of low virtual memory which should be protected
60449 from userspace allocation. Keeping a user from writing to low pages
60450 diff -urNp linux-3.0.3/mm/kmemleak.c linux-3.0.3/mm/kmemleak.c
60451 --- linux-3.0.3/mm/kmemleak.c 2011-07-21 22:17:23.000000000 -0400
60452 +++ linux-3.0.3/mm/kmemleak.c 2011-08-23 21:48:14.000000000 -0400
60453 @@ -357,7 +357,7 @@ static void print_unreferenced(struct se
60454
60455 for (i = 0; i < object->trace_len; i++) {
60456 void *ptr = (void *)object->trace[i];
60457 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
60458 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
60459 }
60460 }
60461
60462 diff -urNp linux-3.0.3/mm/madvise.c linux-3.0.3/mm/madvise.c
60463 --- linux-3.0.3/mm/madvise.c 2011-07-21 22:17:23.000000000 -0400
60464 +++ linux-3.0.3/mm/madvise.c 2011-08-23 21:47:56.000000000 -0400
60465 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
60466 pgoff_t pgoff;
60467 unsigned long new_flags = vma->vm_flags;
60468
60469 +#ifdef CONFIG_PAX_SEGMEXEC
60470 + struct vm_area_struct *vma_m;
60471 +#endif
60472 +
60473 switch (behavior) {
60474 case MADV_NORMAL:
60475 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
60476 @@ -110,6 +114,13 @@ success:
60477 /*
60478 * vm_flags is protected by the mmap_sem held in write mode.
60479 */
60480 +
60481 +#ifdef CONFIG_PAX_SEGMEXEC
60482 + vma_m = pax_find_mirror_vma(vma);
60483 + if (vma_m)
60484 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
60485 +#endif
60486 +
60487 vma->vm_flags = new_flags;
60488
60489 out:
60490 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
60491 struct vm_area_struct ** prev,
60492 unsigned long start, unsigned long end)
60493 {
60494 +
60495 +#ifdef CONFIG_PAX_SEGMEXEC
60496 + struct vm_area_struct *vma_m;
60497 +#endif
60498 +
60499 *prev = vma;
60500 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
60501 return -EINVAL;
60502 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
60503 zap_page_range(vma, start, end - start, &details);
60504 } else
60505 zap_page_range(vma, start, end - start, NULL);
60506 +
60507 +#ifdef CONFIG_PAX_SEGMEXEC
60508 + vma_m = pax_find_mirror_vma(vma);
60509 + if (vma_m) {
60510 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
60511 + struct zap_details details = {
60512 + .nonlinear_vma = vma_m,
60513 + .last_index = ULONG_MAX,
60514 + };
60515 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
60516 + } else
60517 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
60518 + }
60519 +#endif
60520 +
60521 return 0;
60522 }
60523
60524 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
60525 if (end < start)
60526 goto out;
60527
60528 +#ifdef CONFIG_PAX_SEGMEXEC
60529 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
60530 + if (end > SEGMEXEC_TASK_SIZE)
60531 + goto out;
60532 + } else
60533 +#endif
60534 +
60535 + if (end > TASK_SIZE)
60536 + goto out;
60537 +
60538 error = 0;
60539 if (end == start)
60540 goto out;
60541 diff -urNp linux-3.0.3/mm/memory.c linux-3.0.3/mm/memory.c
60542 --- linux-3.0.3/mm/memory.c 2011-08-23 21:44:40.000000000 -0400
60543 +++ linux-3.0.3/mm/memory.c 2011-08-23 21:47:56.000000000 -0400
60544 @@ -457,8 +457,12 @@ static inline void free_pmd_range(struct
60545 return;
60546
60547 pmd = pmd_offset(pud, start);
60548 +
60549 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
60550 pud_clear(pud);
60551 pmd_free_tlb(tlb, pmd, start);
60552 +#endif
60553 +
60554 }
60555
60556 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
60557 @@ -489,9 +493,12 @@ static inline void free_pud_range(struct
60558 if (end - 1 > ceiling - 1)
60559 return;
60560
60561 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
60562 pud = pud_offset(pgd, start);
60563 pgd_clear(pgd);
60564 pud_free_tlb(tlb, pud, start);
60565 +#endif
60566 +
60567 }
60568
60569 /*
60570 @@ -1577,12 +1584,6 @@ no_page_table:
60571 return page;
60572 }
60573
60574 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
60575 -{
60576 - return stack_guard_page_start(vma, addr) ||
60577 - stack_guard_page_end(vma, addr+PAGE_SIZE);
60578 -}
60579 -
60580 /**
60581 * __get_user_pages() - pin user pages in memory
60582 * @tsk: task_struct of target task
60583 @@ -1655,10 +1656,10 @@ int __get_user_pages(struct task_struct
60584 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
60585 i = 0;
60586
60587 - do {
60588 + while (nr_pages) {
60589 struct vm_area_struct *vma;
60590
60591 - vma = find_extend_vma(mm, start);
60592 + vma = find_vma(mm, start);
60593 if (!vma && in_gate_area(mm, start)) {
60594 unsigned long pg = start & PAGE_MASK;
60595 pgd_t *pgd;
60596 @@ -1706,7 +1707,7 @@ int __get_user_pages(struct task_struct
60597 goto next_page;
60598 }
60599
60600 - if (!vma ||
60601 + if (!vma || start < vma->vm_start ||
60602 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
60603 !(vm_flags & vma->vm_flags))
60604 return i ? : -EFAULT;
60605 @@ -1733,11 +1734,6 @@ int __get_user_pages(struct task_struct
60606 int ret;
60607 unsigned int fault_flags = 0;
60608
60609 - /* For mlock, just skip the stack guard page. */
60610 - if (foll_flags & FOLL_MLOCK) {
60611 - if (stack_guard_page(vma, start))
60612 - goto next_page;
60613 - }
60614 if (foll_flags & FOLL_WRITE)
60615 fault_flags |= FAULT_FLAG_WRITE;
60616 if (nonblocking)
60617 @@ -1811,7 +1807,7 @@ next_page:
60618 start += PAGE_SIZE;
60619 nr_pages--;
60620 } while (nr_pages && start < vma->vm_end);
60621 - } while (nr_pages);
60622 + }
60623 return i;
60624 }
60625 EXPORT_SYMBOL(__get_user_pages);
60626 @@ -2018,6 +2014,10 @@ static int insert_page(struct vm_area_st
60627 page_add_file_rmap(page);
60628 set_pte_at(mm, addr, pte, mk_pte(page, prot));
60629
60630 +#ifdef CONFIG_PAX_SEGMEXEC
60631 + pax_mirror_file_pte(vma, addr, page, ptl);
60632 +#endif
60633 +
60634 retval = 0;
60635 pte_unmap_unlock(pte, ptl);
60636 return retval;
60637 @@ -2052,10 +2052,22 @@ out:
60638 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
60639 struct page *page)
60640 {
60641 +
60642 +#ifdef CONFIG_PAX_SEGMEXEC
60643 + struct vm_area_struct *vma_m;
60644 +#endif
60645 +
60646 if (addr < vma->vm_start || addr >= vma->vm_end)
60647 return -EFAULT;
60648 if (!page_count(page))
60649 return -EINVAL;
60650 +
60651 +#ifdef CONFIG_PAX_SEGMEXEC
60652 + vma_m = pax_find_mirror_vma(vma);
60653 + if (vma_m)
60654 + vma_m->vm_flags |= VM_INSERTPAGE;
60655 +#endif
60656 +
60657 vma->vm_flags |= VM_INSERTPAGE;
60658 return insert_page(vma, addr, page, vma->vm_page_prot);
60659 }
60660 @@ -2141,6 +2153,7 @@ int vm_insert_mixed(struct vm_area_struc
60661 unsigned long pfn)
60662 {
60663 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
60664 + BUG_ON(vma->vm_mirror);
60665
60666 if (addr < vma->vm_start || addr >= vma->vm_end)
60667 return -EFAULT;
60668 @@ -2456,6 +2469,186 @@ static inline void cow_user_page(struct
60669 copy_user_highpage(dst, src, va, vma);
60670 }
60671
60672 +#ifdef CONFIG_PAX_SEGMEXEC
60673 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
60674 +{
60675 + struct mm_struct *mm = vma->vm_mm;
60676 + spinlock_t *ptl;
60677 + pte_t *pte, entry;
60678 +
60679 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
60680 + entry = *pte;
60681 + if (!pte_present(entry)) {
60682 + if (!pte_none(entry)) {
60683 + BUG_ON(pte_file(entry));
60684 + free_swap_and_cache(pte_to_swp_entry(entry));
60685 + pte_clear_not_present_full(mm, address, pte, 0);
60686 + }
60687 + } else {
60688 + struct page *page;
60689 +
60690 + flush_cache_page(vma, address, pte_pfn(entry));
60691 + entry = ptep_clear_flush(vma, address, pte);
60692 + BUG_ON(pte_dirty(entry));
60693 + page = vm_normal_page(vma, address, entry);
60694 + if (page) {
60695 + update_hiwater_rss(mm);
60696 + if (PageAnon(page))
60697 + dec_mm_counter_fast(mm, MM_ANONPAGES);
60698 + else
60699 + dec_mm_counter_fast(mm, MM_FILEPAGES);
60700 + page_remove_rmap(page);
60701 + page_cache_release(page);
60702 + }
60703 + }
60704 + pte_unmap_unlock(pte, ptl);
60705 +}
60706 +
60707 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
60708 + *
60709 + * the ptl of the lower mapped page is held on entry and is not released on exit
60710 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
60711 + */
60712 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
60713 +{
60714 + struct mm_struct *mm = vma->vm_mm;
60715 + unsigned long address_m;
60716 + spinlock_t *ptl_m;
60717 + struct vm_area_struct *vma_m;
60718 + pmd_t *pmd_m;
60719 + pte_t *pte_m, entry_m;
60720 +
60721 + BUG_ON(!page_m || !PageAnon(page_m));
60722 +
60723 + vma_m = pax_find_mirror_vma(vma);
60724 + if (!vma_m)
60725 + return;
60726 +
60727 + BUG_ON(!PageLocked(page_m));
60728 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
60729 + address_m = address + SEGMEXEC_TASK_SIZE;
60730 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
60731 + pte_m = pte_offset_map(pmd_m, address_m);
60732 + ptl_m = pte_lockptr(mm, pmd_m);
60733 + if (ptl != ptl_m) {
60734 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
60735 + if (!pte_none(*pte_m))
60736 + goto out;
60737 + }
60738 +
60739 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
60740 + page_cache_get(page_m);
60741 + page_add_anon_rmap(page_m, vma_m, address_m);
60742 + inc_mm_counter_fast(mm, MM_ANONPAGES);
60743 + set_pte_at(mm, address_m, pte_m, entry_m);
60744 + update_mmu_cache(vma_m, address_m, entry_m);
60745 +out:
60746 + if (ptl != ptl_m)
60747 + spin_unlock(ptl_m);
60748 + pte_unmap(pte_m);
60749 + unlock_page(page_m);
60750 +}
60751 +
60752 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
60753 +{
60754 + struct mm_struct *mm = vma->vm_mm;
60755 + unsigned long address_m;
60756 + spinlock_t *ptl_m;
60757 + struct vm_area_struct *vma_m;
60758 + pmd_t *pmd_m;
60759 + pte_t *pte_m, entry_m;
60760 +
60761 + BUG_ON(!page_m || PageAnon(page_m));
60762 +
60763 + vma_m = pax_find_mirror_vma(vma);
60764 + if (!vma_m)
60765 + return;
60766 +
60767 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
60768 + address_m = address + SEGMEXEC_TASK_SIZE;
60769 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
60770 + pte_m = pte_offset_map(pmd_m, address_m);
60771 + ptl_m = pte_lockptr(mm, pmd_m);
60772 + if (ptl != ptl_m) {
60773 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
60774 + if (!pte_none(*pte_m))
60775 + goto out;
60776 + }
60777 +
60778 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
60779 + page_cache_get(page_m);
60780 + page_add_file_rmap(page_m);
60781 + inc_mm_counter_fast(mm, MM_FILEPAGES);
60782 + set_pte_at(mm, address_m, pte_m, entry_m);
60783 + update_mmu_cache(vma_m, address_m, entry_m);
60784 +out:
60785 + if (ptl != ptl_m)
60786 + spin_unlock(ptl_m);
60787 + pte_unmap(pte_m);
60788 +}
60789 +
60790 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
60791 +{
60792 + struct mm_struct *mm = vma->vm_mm;
60793 + unsigned long address_m;
60794 + spinlock_t *ptl_m;
60795 + struct vm_area_struct *vma_m;
60796 + pmd_t *pmd_m;
60797 + pte_t *pte_m, entry_m;
60798 +
60799 + vma_m = pax_find_mirror_vma(vma);
60800 + if (!vma_m)
60801 + return;
60802 +
60803 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
60804 + address_m = address + SEGMEXEC_TASK_SIZE;
60805 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
60806 + pte_m = pte_offset_map(pmd_m, address_m);
60807 + ptl_m = pte_lockptr(mm, pmd_m);
60808 + if (ptl != ptl_m) {
60809 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
60810 + if (!pte_none(*pte_m))
60811 + goto out;
60812 + }
60813 +
60814 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
60815 + set_pte_at(mm, address_m, pte_m, entry_m);
60816 +out:
60817 + if (ptl != ptl_m)
60818 + spin_unlock(ptl_m);
60819 + pte_unmap(pte_m);
60820 +}
60821 +
60822 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
60823 +{
60824 + struct page *page_m;
60825 + pte_t entry;
60826 +
60827 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
60828 + goto out;
60829 +
60830 + entry = *pte;
60831 + page_m = vm_normal_page(vma, address, entry);
60832 + if (!page_m)
60833 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
60834 + else if (PageAnon(page_m)) {
60835 + if (pax_find_mirror_vma(vma)) {
60836 + pte_unmap_unlock(pte, ptl);
60837 + lock_page(page_m);
60838 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
60839 + if (pte_same(entry, *pte))
60840 + pax_mirror_anon_pte(vma, address, page_m, ptl);
60841 + else
60842 + unlock_page(page_m);
60843 + }
60844 + } else
60845 + pax_mirror_file_pte(vma, address, page_m, ptl);
60846 +
60847 +out:
60848 + pte_unmap_unlock(pte, ptl);
60849 +}
60850 +#endif
60851 +
60852 /*
60853 * This routine handles present pages, when users try to write
60854 * to a shared page. It is done by copying the page to a new address
60855 @@ -2667,6 +2860,12 @@ gotten:
60856 */
60857 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
60858 if (likely(pte_same(*page_table, orig_pte))) {
60859 +
60860 +#ifdef CONFIG_PAX_SEGMEXEC
60861 + if (pax_find_mirror_vma(vma))
60862 + BUG_ON(!trylock_page(new_page));
60863 +#endif
60864 +
60865 if (old_page) {
60866 if (!PageAnon(old_page)) {
60867 dec_mm_counter_fast(mm, MM_FILEPAGES);
60868 @@ -2718,6 +2917,10 @@ gotten:
60869 page_remove_rmap(old_page);
60870 }
60871
60872 +#ifdef CONFIG_PAX_SEGMEXEC
60873 + pax_mirror_anon_pte(vma, address, new_page, ptl);
60874 +#endif
60875 +
60876 /* Free the old page.. */
60877 new_page = old_page;
60878 ret |= VM_FAULT_WRITE;
60879 @@ -2997,6 +3200,11 @@ static int do_swap_page(struct mm_struct
60880 swap_free(entry);
60881 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
60882 try_to_free_swap(page);
60883 +
60884 +#ifdef CONFIG_PAX_SEGMEXEC
60885 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
60886 +#endif
60887 +
60888 unlock_page(page);
60889 if (swapcache) {
60890 /*
60891 @@ -3020,6 +3228,11 @@ static int do_swap_page(struct mm_struct
60892
60893 /* No need to invalidate - it was non-present before */
60894 update_mmu_cache(vma, address, page_table);
60895 +
60896 +#ifdef CONFIG_PAX_SEGMEXEC
60897 + pax_mirror_anon_pte(vma, address, page, ptl);
60898 +#endif
60899 +
60900 unlock:
60901 pte_unmap_unlock(page_table, ptl);
60902 out:
60903 @@ -3039,40 +3252,6 @@ out_release:
60904 }
60905
60906 /*
60907 - * This is like a special single-page "expand_{down|up}wards()",
60908 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
60909 - * doesn't hit another vma.
60910 - */
60911 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
60912 -{
60913 - address &= PAGE_MASK;
60914 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
60915 - struct vm_area_struct *prev = vma->vm_prev;
60916 -
60917 - /*
60918 - * Is there a mapping abutting this one below?
60919 - *
60920 - * That's only ok if it's the same stack mapping
60921 - * that has gotten split..
60922 - */
60923 - if (prev && prev->vm_end == address)
60924 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
60925 -
60926 - expand_downwards(vma, address - PAGE_SIZE);
60927 - }
60928 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
60929 - struct vm_area_struct *next = vma->vm_next;
60930 -
60931 - /* As VM_GROWSDOWN but s/below/above/ */
60932 - if (next && next->vm_start == address + PAGE_SIZE)
60933 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
60934 -
60935 - expand_upwards(vma, address + PAGE_SIZE);
60936 - }
60937 - return 0;
60938 -}
60939 -
60940 -/*
60941 * We enter with non-exclusive mmap_sem (to exclude vma changes,
60942 * but allow concurrent faults), and pte mapped but not yet locked.
60943 * We return with mmap_sem still held, but pte unmapped and unlocked.
60944 @@ -3081,27 +3260,23 @@ static int do_anonymous_page(struct mm_s
60945 unsigned long address, pte_t *page_table, pmd_t *pmd,
60946 unsigned int flags)
60947 {
60948 - struct page *page;
60949 + struct page *page = NULL;
60950 spinlock_t *ptl;
60951 pte_t entry;
60952
60953 - pte_unmap(page_table);
60954 -
60955 - /* Check if we need to add a guard page to the stack */
60956 - if (check_stack_guard_page(vma, address) < 0)
60957 - return VM_FAULT_SIGBUS;
60958 -
60959 - /* Use the zero-page for reads */
60960 if (!(flags & FAULT_FLAG_WRITE)) {
60961 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
60962 vma->vm_page_prot));
60963 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
60964 + ptl = pte_lockptr(mm, pmd);
60965 + spin_lock(ptl);
60966 if (!pte_none(*page_table))
60967 goto unlock;
60968 goto setpte;
60969 }
60970
60971 /* Allocate our own private page. */
60972 + pte_unmap(page_table);
60973 +
60974 if (unlikely(anon_vma_prepare(vma)))
60975 goto oom;
60976 page = alloc_zeroed_user_highpage_movable(vma, address);
60977 @@ -3120,6 +3295,11 @@ static int do_anonymous_page(struct mm_s
60978 if (!pte_none(*page_table))
60979 goto release;
60980
60981 +#ifdef CONFIG_PAX_SEGMEXEC
60982 + if (pax_find_mirror_vma(vma))
60983 + BUG_ON(!trylock_page(page));
60984 +#endif
60985 +
60986 inc_mm_counter_fast(mm, MM_ANONPAGES);
60987 page_add_new_anon_rmap(page, vma, address);
60988 setpte:
60989 @@ -3127,6 +3307,12 @@ setpte:
60990
60991 /* No need to invalidate - it was non-present before */
60992 update_mmu_cache(vma, address, page_table);
60993 +
60994 +#ifdef CONFIG_PAX_SEGMEXEC
60995 + if (page)
60996 + pax_mirror_anon_pte(vma, address, page, ptl);
60997 +#endif
60998 +
60999 unlock:
61000 pte_unmap_unlock(page_table, ptl);
61001 return 0;
61002 @@ -3264,6 +3450,12 @@ static int __do_fault(struct mm_struct *
61003 */
61004 /* Only go through if we didn't race with anybody else... */
61005 if (likely(pte_same(*page_table, orig_pte))) {
61006 +
61007 +#ifdef CONFIG_PAX_SEGMEXEC
61008 + if (anon && pax_find_mirror_vma(vma))
61009 + BUG_ON(!trylock_page(page));
61010 +#endif
61011 +
61012 flush_icache_page(vma, page);
61013 entry = mk_pte(page, vma->vm_page_prot);
61014 if (flags & FAULT_FLAG_WRITE)
61015 @@ -3283,6 +3475,14 @@ static int __do_fault(struct mm_struct *
61016
61017 /* no need to invalidate: a not-present page won't be cached */
61018 update_mmu_cache(vma, address, page_table);
61019 +
61020 +#ifdef CONFIG_PAX_SEGMEXEC
61021 + if (anon)
61022 + pax_mirror_anon_pte(vma, address, page, ptl);
61023 + else
61024 + pax_mirror_file_pte(vma, address, page, ptl);
61025 +#endif
61026 +
61027 } else {
61028 if (charged)
61029 mem_cgroup_uncharge_page(page);
61030 @@ -3430,6 +3630,12 @@ int handle_pte_fault(struct mm_struct *m
61031 if (flags & FAULT_FLAG_WRITE)
61032 flush_tlb_fix_spurious_fault(vma, address);
61033 }
61034 +
61035 +#ifdef CONFIG_PAX_SEGMEXEC
61036 + pax_mirror_pte(vma, address, pte, pmd, ptl);
61037 + return 0;
61038 +#endif
61039 +
61040 unlock:
61041 pte_unmap_unlock(pte, ptl);
61042 return 0;
61043 @@ -3446,6 +3652,10 @@ int handle_mm_fault(struct mm_struct *mm
61044 pmd_t *pmd;
61045 pte_t *pte;
61046
61047 +#ifdef CONFIG_PAX_SEGMEXEC
61048 + struct vm_area_struct *vma_m;
61049 +#endif
61050 +
61051 __set_current_state(TASK_RUNNING);
61052
61053 count_vm_event(PGFAULT);
61054 @@ -3457,6 +3667,34 @@ int handle_mm_fault(struct mm_struct *mm
61055 if (unlikely(is_vm_hugetlb_page(vma)))
61056 return hugetlb_fault(mm, vma, address, flags);
61057
61058 +#ifdef CONFIG_PAX_SEGMEXEC
61059 + vma_m = pax_find_mirror_vma(vma);
61060 + if (vma_m) {
61061 + unsigned long address_m;
61062 + pgd_t *pgd_m;
61063 + pud_t *pud_m;
61064 + pmd_t *pmd_m;
61065 +
61066 + if (vma->vm_start > vma_m->vm_start) {
61067 + address_m = address;
61068 + address -= SEGMEXEC_TASK_SIZE;
61069 + vma = vma_m;
61070 + } else
61071 + address_m = address + SEGMEXEC_TASK_SIZE;
61072 +
61073 + pgd_m = pgd_offset(mm, address_m);
61074 + pud_m = pud_alloc(mm, pgd_m, address_m);
61075 + if (!pud_m)
61076 + return VM_FAULT_OOM;
61077 + pmd_m = pmd_alloc(mm, pud_m, address_m);
61078 + if (!pmd_m)
61079 + return VM_FAULT_OOM;
61080 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
61081 + return VM_FAULT_OOM;
61082 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
61083 + }
61084 +#endif
61085 +
61086 pgd = pgd_offset(mm, address);
61087 pud = pud_alloc(mm, pgd, address);
61088 if (!pud)
61089 @@ -3486,7 +3724,7 @@ int handle_mm_fault(struct mm_struct *mm
61090 * run pte_offset_map on the pmd, if an huge pmd could
61091 * materialize from under us from a different thread.
61092 */
61093 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
61094 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
61095 return VM_FAULT_OOM;
61096 /* if an huge pmd materialized from under us just retry later */
61097 if (unlikely(pmd_trans_huge(*pmd)))
61098 @@ -3590,7 +3828,7 @@ static int __init gate_vma_init(void)
61099 gate_vma.vm_start = FIXADDR_USER_START;
61100 gate_vma.vm_end = FIXADDR_USER_END;
61101 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
61102 - gate_vma.vm_page_prot = __P101;
61103 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
61104 /*
61105 * Make sure the vDSO gets into every core dump.
61106 * Dumping its contents makes post-mortem fully interpretable later
61107 diff -urNp linux-3.0.3/mm/memory-failure.c linux-3.0.3/mm/memory-failure.c
61108 --- linux-3.0.3/mm/memory-failure.c 2011-07-21 22:17:23.000000000 -0400
61109 +++ linux-3.0.3/mm/memory-failure.c 2011-08-23 21:47:56.000000000 -0400
61110 @@ -59,7 +59,7 @@ int sysctl_memory_failure_early_kill __r
61111
61112 int sysctl_memory_failure_recovery __read_mostly = 1;
61113
61114 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
61115 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
61116
61117 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
61118
61119 @@ -1008,7 +1008,7 @@ int __memory_failure(unsigned long pfn,
61120 }
61121
61122 nr_pages = 1 << compound_trans_order(hpage);
61123 - atomic_long_add(nr_pages, &mce_bad_pages);
61124 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
61125
61126 /*
61127 * We need/can do nothing about count=0 pages.
61128 @@ -1038,7 +1038,7 @@ int __memory_failure(unsigned long pfn,
61129 if (!PageHWPoison(hpage)
61130 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
61131 || (p != hpage && TestSetPageHWPoison(hpage))) {
61132 - atomic_long_sub(nr_pages, &mce_bad_pages);
61133 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61134 return 0;
61135 }
61136 set_page_hwpoison_huge_page(hpage);
61137 @@ -1096,7 +1096,7 @@ int __memory_failure(unsigned long pfn,
61138 }
61139 if (hwpoison_filter(p)) {
61140 if (TestClearPageHWPoison(p))
61141 - atomic_long_sub(nr_pages, &mce_bad_pages);
61142 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61143 unlock_page(hpage);
61144 put_page(hpage);
61145 return 0;
61146 @@ -1222,7 +1222,7 @@ int unpoison_memory(unsigned long pfn)
61147 return 0;
61148 }
61149 if (TestClearPageHWPoison(p))
61150 - atomic_long_sub(nr_pages, &mce_bad_pages);
61151 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61152 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
61153 return 0;
61154 }
61155 @@ -1236,7 +1236,7 @@ int unpoison_memory(unsigned long pfn)
61156 */
61157 if (TestClearPageHWPoison(page)) {
61158 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
61159 - atomic_long_sub(nr_pages, &mce_bad_pages);
61160 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61161 freeit = 1;
61162 if (PageHuge(page))
61163 clear_page_hwpoison_huge_page(page);
61164 @@ -1349,7 +1349,7 @@ static int soft_offline_huge_page(struct
61165 }
61166 done:
61167 if (!PageHWPoison(hpage))
61168 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
61169 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
61170 set_page_hwpoison_huge_page(hpage);
61171 dequeue_hwpoisoned_huge_page(hpage);
61172 /* keep elevated page count for bad page */
61173 @@ -1480,7 +1480,7 @@ int soft_offline_page(struct page *page,
61174 return ret;
61175
61176 done:
61177 - atomic_long_add(1, &mce_bad_pages);
61178 + atomic_long_add_unchecked(1, &mce_bad_pages);
61179 SetPageHWPoison(page);
61180 /* keep elevated page count for bad page */
61181 return ret;
61182 diff -urNp linux-3.0.3/mm/mempolicy.c linux-3.0.3/mm/mempolicy.c
61183 --- linux-3.0.3/mm/mempolicy.c 2011-07-21 22:17:23.000000000 -0400
61184 +++ linux-3.0.3/mm/mempolicy.c 2011-08-23 21:48:14.000000000 -0400
61185 @@ -639,6 +639,10 @@ static int mbind_range(struct mm_struct
61186 unsigned long vmstart;
61187 unsigned long vmend;
61188
61189 +#ifdef CONFIG_PAX_SEGMEXEC
61190 + struct vm_area_struct *vma_m;
61191 +#endif
61192 +
61193 vma = find_vma_prev(mm, start, &prev);
61194 if (!vma || vma->vm_start > start)
61195 return -EFAULT;
61196 @@ -669,6 +673,16 @@ static int mbind_range(struct mm_struct
61197 err = policy_vma(vma, new_pol);
61198 if (err)
61199 goto out;
61200 +
61201 +#ifdef CONFIG_PAX_SEGMEXEC
61202 + vma_m = pax_find_mirror_vma(vma);
61203 + if (vma_m) {
61204 + err = policy_vma(vma_m, new_pol);
61205 + if (err)
61206 + goto out;
61207 + }
61208 +#endif
61209 +
61210 }
61211
61212 out:
61213 @@ -1102,6 +1116,17 @@ static long do_mbind(unsigned long start
61214
61215 if (end < start)
61216 return -EINVAL;
61217 +
61218 +#ifdef CONFIG_PAX_SEGMEXEC
61219 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
61220 + if (end > SEGMEXEC_TASK_SIZE)
61221 + return -EINVAL;
61222 + } else
61223 +#endif
61224 +
61225 + if (end > TASK_SIZE)
61226 + return -EINVAL;
61227 +
61228 if (end == start)
61229 return 0;
61230
61231 @@ -1320,6 +1345,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
61232 if (!mm)
61233 goto out;
61234
61235 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61236 + if (mm != current->mm &&
61237 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
61238 + err = -EPERM;
61239 + goto out;
61240 + }
61241 +#endif
61242 +
61243 /*
61244 * Check if this process has the right to modify the specified
61245 * process. The right exists if the process has administrative
61246 @@ -1329,8 +1362,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
61247 rcu_read_lock();
61248 tcred = __task_cred(task);
61249 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
61250 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
61251 - !capable(CAP_SYS_NICE)) {
61252 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
61253 rcu_read_unlock();
61254 err = -EPERM;
61255 goto out;
61256 diff -urNp linux-3.0.3/mm/migrate.c linux-3.0.3/mm/migrate.c
61257 --- linux-3.0.3/mm/migrate.c 2011-07-21 22:17:23.000000000 -0400
61258 +++ linux-3.0.3/mm/migrate.c 2011-08-23 21:48:14.000000000 -0400
61259 @@ -1124,6 +1124,8 @@ static int do_pages_move(struct mm_struc
61260 unsigned long chunk_start;
61261 int err;
61262
61263 + pax_track_stack();
61264 +
61265 task_nodes = cpuset_mems_allowed(task);
61266
61267 err = -ENOMEM;
61268 @@ -1308,6 +1310,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
61269 if (!mm)
61270 return -EINVAL;
61271
61272 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61273 + if (mm != current->mm &&
61274 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
61275 + err = -EPERM;
61276 + goto out;
61277 + }
61278 +#endif
61279 +
61280 /*
61281 * Check if this process has the right to modify the specified
61282 * process. The right exists if the process has administrative
61283 @@ -1317,8 +1327,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
61284 rcu_read_lock();
61285 tcred = __task_cred(task);
61286 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
61287 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
61288 - !capable(CAP_SYS_NICE)) {
61289 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
61290 rcu_read_unlock();
61291 err = -EPERM;
61292 goto out;
61293 diff -urNp linux-3.0.3/mm/mlock.c linux-3.0.3/mm/mlock.c
61294 --- linux-3.0.3/mm/mlock.c 2011-07-21 22:17:23.000000000 -0400
61295 +++ linux-3.0.3/mm/mlock.c 2011-08-23 21:48:14.000000000 -0400
61296 @@ -13,6 +13,7 @@
61297 #include <linux/pagemap.h>
61298 #include <linux/mempolicy.h>
61299 #include <linux/syscalls.h>
61300 +#include <linux/security.h>
61301 #include <linux/sched.h>
61302 #include <linux/module.h>
61303 #include <linux/rmap.h>
61304 @@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
61305 return -EINVAL;
61306 if (end == start)
61307 return 0;
61308 + if (end > TASK_SIZE)
61309 + return -EINVAL;
61310 +
61311 vma = find_vma_prev(current->mm, start, &prev);
61312 if (!vma || vma->vm_start > start)
61313 return -ENOMEM;
61314 @@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
61315 for (nstart = start ; ; ) {
61316 vm_flags_t newflags;
61317
61318 +#ifdef CONFIG_PAX_SEGMEXEC
61319 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
61320 + break;
61321 +#endif
61322 +
61323 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
61324
61325 newflags = vma->vm_flags | VM_LOCKED;
61326 @@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
61327 lock_limit >>= PAGE_SHIFT;
61328
61329 /* check against resource limits */
61330 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
61331 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
61332 error = do_mlock(start, len, 1);
61333 up_write(&current->mm->mmap_sem);
61334 @@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
61335 static int do_mlockall(int flags)
61336 {
61337 struct vm_area_struct * vma, * prev = NULL;
61338 - unsigned int def_flags = 0;
61339
61340 if (flags & MCL_FUTURE)
61341 - def_flags = VM_LOCKED;
61342 - current->mm->def_flags = def_flags;
61343 + current->mm->def_flags |= VM_LOCKED;
61344 + else
61345 + current->mm->def_flags &= ~VM_LOCKED;
61346 if (flags == MCL_FUTURE)
61347 goto out;
61348
61349 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
61350 vm_flags_t newflags;
61351
61352 +#ifdef CONFIG_PAX_SEGMEXEC
61353 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
61354 + break;
61355 +#endif
61356 +
61357 + BUG_ON(vma->vm_end > TASK_SIZE);
61358 newflags = vma->vm_flags | VM_LOCKED;
61359 if (!(flags & MCL_CURRENT))
61360 newflags &= ~VM_LOCKED;
61361 @@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
61362 lock_limit >>= PAGE_SHIFT;
61363
61364 ret = -ENOMEM;
61365 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
61366 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
61367 capable(CAP_IPC_LOCK))
61368 ret = do_mlockall(flags);
61369 diff -urNp linux-3.0.3/mm/mmap.c linux-3.0.3/mm/mmap.c
61370 --- linux-3.0.3/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
61371 +++ linux-3.0.3/mm/mmap.c 2011-08-23 21:48:14.000000000 -0400
61372 @@ -46,6 +46,16 @@
61373 #define arch_rebalance_pgtables(addr, len) (addr)
61374 #endif
61375
61376 +static inline void verify_mm_writelocked(struct mm_struct *mm)
61377 +{
61378 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
61379 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
61380 + up_read(&mm->mmap_sem);
61381 + BUG();
61382 + }
61383 +#endif
61384 +}
61385 +
61386 static void unmap_region(struct mm_struct *mm,
61387 struct vm_area_struct *vma, struct vm_area_struct *prev,
61388 unsigned long start, unsigned long end);
61389 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
61390 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
61391 *
61392 */
61393 -pgprot_t protection_map[16] = {
61394 +pgprot_t protection_map[16] __read_only = {
61395 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
61396 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
61397 };
61398
61399 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
61400 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
61401 {
61402 - return __pgprot(pgprot_val(protection_map[vm_flags &
61403 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
61404 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
61405 pgprot_val(arch_vm_get_page_prot(vm_flags)));
61406 +
61407 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
61408 + if (!(__supported_pte_mask & _PAGE_NX) &&
61409 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
61410 + (vm_flags & (VM_READ | VM_WRITE)))
61411 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
61412 +#endif
61413 +
61414 + return prot;
61415 }
61416 EXPORT_SYMBOL(vm_get_page_prot);
61417
61418 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
61419 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
61420 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
61421 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
61422 /*
61423 * Make sure vm_committed_as in one cacheline and not cacheline shared with
61424 * other variables. It can be updated by several CPUs frequently.
61425 @@ -236,6 +256,7 @@ static struct vm_area_struct *remove_vma
61426 struct vm_area_struct *next = vma->vm_next;
61427
61428 might_sleep();
61429 + BUG_ON(vma->vm_mirror);
61430 if (vma->vm_ops && vma->vm_ops->close)
61431 vma->vm_ops->close(vma);
61432 if (vma->vm_file) {
61433 @@ -280,6 +301,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
61434 * not page aligned -Ram Gupta
61435 */
61436 rlim = rlimit(RLIMIT_DATA);
61437 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
61438 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
61439 (mm->end_data - mm->start_data) > rlim)
61440 goto out;
61441 @@ -697,6 +719,12 @@ static int
61442 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
61443 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
61444 {
61445 +
61446 +#ifdef CONFIG_PAX_SEGMEXEC
61447 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
61448 + return 0;
61449 +#endif
61450 +
61451 if (is_mergeable_vma(vma, file, vm_flags) &&
61452 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
61453 if (vma->vm_pgoff == vm_pgoff)
61454 @@ -716,6 +744,12 @@ static int
61455 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
61456 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
61457 {
61458 +
61459 +#ifdef CONFIG_PAX_SEGMEXEC
61460 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
61461 + return 0;
61462 +#endif
61463 +
61464 if (is_mergeable_vma(vma, file, vm_flags) &&
61465 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
61466 pgoff_t vm_pglen;
61467 @@ -758,13 +792,20 @@ can_vma_merge_after(struct vm_area_struc
61468 struct vm_area_struct *vma_merge(struct mm_struct *mm,
61469 struct vm_area_struct *prev, unsigned long addr,
61470 unsigned long end, unsigned long vm_flags,
61471 - struct anon_vma *anon_vma, struct file *file,
61472 + struct anon_vma *anon_vma, struct file *file,
61473 pgoff_t pgoff, struct mempolicy *policy)
61474 {
61475 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
61476 struct vm_area_struct *area, *next;
61477 int err;
61478
61479 +#ifdef CONFIG_PAX_SEGMEXEC
61480 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
61481 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
61482 +
61483 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
61484 +#endif
61485 +
61486 /*
61487 * We later require that vma->vm_flags == vm_flags,
61488 * so this tests vma->vm_flags & VM_SPECIAL, too.
61489 @@ -780,6 +821,15 @@ struct vm_area_struct *vma_merge(struct
61490 if (next && next->vm_end == end) /* cases 6, 7, 8 */
61491 next = next->vm_next;
61492
61493 +#ifdef CONFIG_PAX_SEGMEXEC
61494 + if (prev)
61495 + prev_m = pax_find_mirror_vma(prev);
61496 + if (area)
61497 + area_m = pax_find_mirror_vma(area);
61498 + if (next)
61499 + next_m = pax_find_mirror_vma(next);
61500 +#endif
61501 +
61502 /*
61503 * Can it merge with the predecessor?
61504 */
61505 @@ -799,9 +849,24 @@ struct vm_area_struct *vma_merge(struct
61506 /* cases 1, 6 */
61507 err = vma_adjust(prev, prev->vm_start,
61508 next->vm_end, prev->vm_pgoff, NULL);
61509 - } else /* cases 2, 5, 7 */
61510 +
61511 +#ifdef CONFIG_PAX_SEGMEXEC
61512 + if (!err && prev_m)
61513 + err = vma_adjust(prev_m, prev_m->vm_start,
61514 + next_m->vm_end, prev_m->vm_pgoff, NULL);
61515 +#endif
61516 +
61517 + } else { /* cases 2, 5, 7 */
61518 err = vma_adjust(prev, prev->vm_start,
61519 end, prev->vm_pgoff, NULL);
61520 +
61521 +#ifdef CONFIG_PAX_SEGMEXEC
61522 + if (!err && prev_m)
61523 + err = vma_adjust(prev_m, prev_m->vm_start,
61524 + end_m, prev_m->vm_pgoff, NULL);
61525 +#endif
61526 +
61527 + }
61528 if (err)
61529 return NULL;
61530 khugepaged_enter_vma_merge(prev);
61531 @@ -815,12 +880,27 @@ struct vm_area_struct *vma_merge(struct
61532 mpol_equal(policy, vma_policy(next)) &&
61533 can_vma_merge_before(next, vm_flags,
61534 anon_vma, file, pgoff+pglen)) {
61535 - if (prev && addr < prev->vm_end) /* case 4 */
61536 + if (prev && addr < prev->vm_end) { /* case 4 */
61537 err = vma_adjust(prev, prev->vm_start,
61538 addr, prev->vm_pgoff, NULL);
61539 - else /* cases 3, 8 */
61540 +
61541 +#ifdef CONFIG_PAX_SEGMEXEC
61542 + if (!err && prev_m)
61543 + err = vma_adjust(prev_m, prev_m->vm_start,
61544 + addr_m, prev_m->vm_pgoff, NULL);
61545 +#endif
61546 +
61547 + } else { /* cases 3, 8 */
61548 err = vma_adjust(area, addr, next->vm_end,
61549 next->vm_pgoff - pglen, NULL);
61550 +
61551 +#ifdef CONFIG_PAX_SEGMEXEC
61552 + if (!err && area_m)
61553 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
61554 + next_m->vm_pgoff - pglen, NULL);
61555 +#endif
61556 +
61557 + }
61558 if (err)
61559 return NULL;
61560 khugepaged_enter_vma_merge(area);
61561 @@ -929,14 +1009,11 @@ none:
61562 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
61563 struct file *file, long pages)
61564 {
61565 - const unsigned long stack_flags
61566 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
61567 -
61568 if (file) {
61569 mm->shared_vm += pages;
61570 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
61571 mm->exec_vm += pages;
61572 - } else if (flags & stack_flags)
61573 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
61574 mm->stack_vm += pages;
61575 if (flags & (VM_RESERVED|VM_IO))
61576 mm->reserved_vm += pages;
61577 @@ -963,7 +1040,7 @@ unsigned long do_mmap_pgoff(struct file
61578 * (the exception is when the underlying filesystem is noexec
61579 * mounted, in which case we dont add PROT_EXEC.)
61580 */
61581 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
61582 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
61583 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
61584 prot |= PROT_EXEC;
61585
61586 @@ -989,7 +1066,7 @@ unsigned long do_mmap_pgoff(struct file
61587 /* Obtain the address to map to. we verify (or select) it and ensure
61588 * that it represents a valid section of the address space.
61589 */
61590 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
61591 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
61592 if (addr & ~PAGE_MASK)
61593 return addr;
61594
61595 @@ -1000,6 +1077,36 @@ unsigned long do_mmap_pgoff(struct file
61596 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
61597 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
61598
61599 +#ifdef CONFIG_PAX_MPROTECT
61600 + if (mm->pax_flags & MF_PAX_MPROTECT) {
61601 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
61602 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
61603 + gr_log_rwxmmap(file);
61604 +
61605 +#ifdef CONFIG_PAX_EMUPLT
61606 + vm_flags &= ~VM_EXEC;
61607 +#else
61608 + return -EPERM;
61609 +#endif
61610 +
61611 + }
61612 +
61613 + if (!(vm_flags & VM_EXEC))
61614 + vm_flags &= ~VM_MAYEXEC;
61615 +#else
61616 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
61617 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
61618 +#endif
61619 + else
61620 + vm_flags &= ~VM_MAYWRITE;
61621 + }
61622 +#endif
61623 +
61624 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
61625 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
61626 + vm_flags &= ~VM_PAGEEXEC;
61627 +#endif
61628 +
61629 if (flags & MAP_LOCKED)
61630 if (!can_do_mlock())
61631 return -EPERM;
61632 @@ -1011,6 +1118,7 @@ unsigned long do_mmap_pgoff(struct file
61633 locked += mm->locked_vm;
61634 lock_limit = rlimit(RLIMIT_MEMLOCK);
61635 lock_limit >>= PAGE_SHIFT;
61636 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
61637 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
61638 return -EAGAIN;
61639 }
61640 @@ -1081,6 +1189,9 @@ unsigned long do_mmap_pgoff(struct file
61641 if (error)
61642 return error;
61643
61644 + if (!gr_acl_handle_mmap(file, prot))
61645 + return -EACCES;
61646 +
61647 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
61648 }
61649 EXPORT_SYMBOL(do_mmap_pgoff);
61650 @@ -1161,7 +1272,7 @@ int vma_wants_writenotify(struct vm_area
61651 vm_flags_t vm_flags = vma->vm_flags;
61652
61653 /* If it was private or non-writable, the write bit is already clear */
61654 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
61655 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
61656 return 0;
61657
61658 /* The backer wishes to know when pages are first written to? */
61659 @@ -1210,14 +1321,24 @@ unsigned long mmap_region(struct file *f
61660 unsigned long charged = 0;
61661 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
61662
61663 +#ifdef CONFIG_PAX_SEGMEXEC
61664 + struct vm_area_struct *vma_m = NULL;
61665 +#endif
61666 +
61667 + /*
61668 + * mm->mmap_sem is required to protect against another thread
61669 + * changing the mappings in case we sleep.
61670 + */
61671 + verify_mm_writelocked(mm);
61672 +
61673 /* Clear old maps */
61674 error = -ENOMEM;
61675 -munmap_back:
61676 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
61677 if (vma && vma->vm_start < addr + len) {
61678 if (do_munmap(mm, addr, len))
61679 return -ENOMEM;
61680 - goto munmap_back;
61681 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
61682 + BUG_ON(vma && vma->vm_start < addr + len);
61683 }
61684
61685 /* Check against address space limit. */
61686 @@ -1266,6 +1387,16 @@ munmap_back:
61687 goto unacct_error;
61688 }
61689
61690 +#ifdef CONFIG_PAX_SEGMEXEC
61691 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
61692 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
61693 + if (!vma_m) {
61694 + error = -ENOMEM;
61695 + goto free_vma;
61696 + }
61697 + }
61698 +#endif
61699 +
61700 vma->vm_mm = mm;
61701 vma->vm_start = addr;
61702 vma->vm_end = addr + len;
61703 @@ -1289,6 +1420,19 @@ munmap_back:
61704 error = file->f_op->mmap(file, vma);
61705 if (error)
61706 goto unmap_and_free_vma;
61707 +
61708 +#ifdef CONFIG_PAX_SEGMEXEC
61709 + if (vma_m && (vm_flags & VM_EXECUTABLE))
61710 + added_exe_file_vma(mm);
61711 +#endif
61712 +
61713 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
61714 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
61715 + vma->vm_flags |= VM_PAGEEXEC;
61716 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
61717 + }
61718 +#endif
61719 +
61720 if (vm_flags & VM_EXECUTABLE)
61721 added_exe_file_vma(mm);
61722
61723 @@ -1324,6 +1468,11 @@ munmap_back:
61724 vma_link(mm, vma, prev, rb_link, rb_parent);
61725 file = vma->vm_file;
61726
61727 +#ifdef CONFIG_PAX_SEGMEXEC
61728 + if (vma_m)
61729 + BUG_ON(pax_mirror_vma(vma_m, vma));
61730 +#endif
61731 +
61732 /* Once vma denies write, undo our temporary denial count */
61733 if (correct_wcount)
61734 atomic_inc(&inode->i_writecount);
61735 @@ -1332,6 +1481,7 @@ out:
61736
61737 mm->total_vm += len >> PAGE_SHIFT;
61738 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
61739 + track_exec_limit(mm, addr, addr + len, vm_flags);
61740 if (vm_flags & VM_LOCKED) {
61741 if (!mlock_vma_pages_range(vma, addr, addr + len))
61742 mm->locked_vm += (len >> PAGE_SHIFT);
61743 @@ -1349,6 +1499,12 @@ unmap_and_free_vma:
61744 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
61745 charged = 0;
61746 free_vma:
61747 +
61748 +#ifdef CONFIG_PAX_SEGMEXEC
61749 + if (vma_m)
61750 + kmem_cache_free(vm_area_cachep, vma_m);
61751 +#endif
61752 +
61753 kmem_cache_free(vm_area_cachep, vma);
61754 unacct_error:
61755 if (charged)
61756 @@ -1356,6 +1512,44 @@ unacct_error:
61757 return error;
61758 }
61759
61760 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
61761 +{
61762 + if (!vma) {
61763 +#ifdef CONFIG_STACK_GROWSUP
61764 + if (addr > sysctl_heap_stack_gap)
61765 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
61766 + else
61767 + vma = find_vma(current->mm, 0);
61768 + if (vma && (vma->vm_flags & VM_GROWSUP))
61769 + return false;
61770 +#endif
61771 + return true;
61772 + }
61773 +
61774 + if (addr + len > vma->vm_start)
61775 + return false;
61776 +
61777 + if (vma->vm_flags & VM_GROWSDOWN)
61778 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
61779 +#ifdef CONFIG_STACK_GROWSUP
61780 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
61781 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
61782 +#endif
61783 +
61784 + return true;
61785 +}
61786 +
61787 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
61788 +{
61789 + if (vma->vm_start < len)
61790 + return -ENOMEM;
61791 + if (!(vma->vm_flags & VM_GROWSDOWN))
61792 + return vma->vm_start - len;
61793 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
61794 + return vma->vm_start - len - sysctl_heap_stack_gap;
61795 + return -ENOMEM;
61796 +}
61797 +
61798 /* Get an address range which is currently unmapped.
61799 * For shmat() with addr=0.
61800 *
61801 @@ -1382,18 +1576,23 @@ arch_get_unmapped_area(struct file *filp
61802 if (flags & MAP_FIXED)
61803 return addr;
61804
61805 +#ifdef CONFIG_PAX_RANDMMAP
61806 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
61807 +#endif
61808 +
61809 if (addr) {
61810 addr = PAGE_ALIGN(addr);
61811 - vma = find_vma(mm, addr);
61812 - if (TASK_SIZE - len >= addr &&
61813 - (!vma || addr + len <= vma->vm_start))
61814 - return addr;
61815 + if (TASK_SIZE - len >= addr) {
61816 + vma = find_vma(mm, addr);
61817 + if (check_heap_stack_gap(vma, addr, len))
61818 + return addr;
61819 + }
61820 }
61821 if (len > mm->cached_hole_size) {
61822 - start_addr = addr = mm->free_area_cache;
61823 + start_addr = addr = mm->free_area_cache;
61824 } else {
61825 - start_addr = addr = TASK_UNMAPPED_BASE;
61826 - mm->cached_hole_size = 0;
61827 + start_addr = addr = mm->mmap_base;
61828 + mm->cached_hole_size = 0;
61829 }
61830
61831 full_search:
61832 @@ -1404,34 +1603,40 @@ full_search:
61833 * Start a new search - just in case we missed
61834 * some holes.
61835 */
61836 - if (start_addr != TASK_UNMAPPED_BASE) {
61837 - addr = TASK_UNMAPPED_BASE;
61838 - start_addr = addr;
61839 + if (start_addr != mm->mmap_base) {
61840 + start_addr = addr = mm->mmap_base;
61841 mm->cached_hole_size = 0;
61842 goto full_search;
61843 }
61844 return -ENOMEM;
61845 }
61846 - if (!vma || addr + len <= vma->vm_start) {
61847 - /*
61848 - * Remember the place where we stopped the search:
61849 - */
61850 - mm->free_area_cache = addr + len;
61851 - return addr;
61852 - }
61853 + if (check_heap_stack_gap(vma, addr, len))
61854 + break;
61855 if (addr + mm->cached_hole_size < vma->vm_start)
61856 mm->cached_hole_size = vma->vm_start - addr;
61857 addr = vma->vm_end;
61858 }
61859 +
61860 + /*
61861 + * Remember the place where we stopped the search:
61862 + */
61863 + mm->free_area_cache = addr + len;
61864 + return addr;
61865 }
61866 #endif
61867
61868 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
61869 {
61870 +
61871 +#ifdef CONFIG_PAX_SEGMEXEC
61872 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
61873 + return;
61874 +#endif
61875 +
61876 /*
61877 * Is this a new hole at the lowest possible address?
61878 */
61879 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
61880 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
61881 mm->free_area_cache = addr;
61882 mm->cached_hole_size = ~0UL;
61883 }
61884 @@ -1449,7 +1654,7 @@ arch_get_unmapped_area_topdown(struct fi
61885 {
61886 struct vm_area_struct *vma;
61887 struct mm_struct *mm = current->mm;
61888 - unsigned long addr = addr0;
61889 + unsigned long base = mm->mmap_base, addr = addr0;
61890
61891 /* requested length too big for entire address space */
61892 if (len > TASK_SIZE)
61893 @@ -1458,13 +1663,18 @@ arch_get_unmapped_area_topdown(struct fi
61894 if (flags & MAP_FIXED)
61895 return addr;
61896
61897 +#ifdef CONFIG_PAX_RANDMMAP
61898 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
61899 +#endif
61900 +
61901 /* requesting a specific address */
61902 if (addr) {
61903 addr = PAGE_ALIGN(addr);
61904 - vma = find_vma(mm, addr);
61905 - if (TASK_SIZE - len >= addr &&
61906 - (!vma || addr + len <= vma->vm_start))
61907 - return addr;
61908 + if (TASK_SIZE - len >= addr) {
61909 + vma = find_vma(mm, addr);
61910 + if (check_heap_stack_gap(vma, addr, len))
61911 + return addr;
61912 + }
61913 }
61914
61915 /* check if free_area_cache is useful for us */
61916 @@ -1479,7 +1689,7 @@ arch_get_unmapped_area_topdown(struct fi
61917 /* make sure it can fit in the remaining address space */
61918 if (addr > len) {
61919 vma = find_vma(mm, addr-len);
61920 - if (!vma || addr <= vma->vm_start)
61921 + if (check_heap_stack_gap(vma, addr - len, len))
61922 /* remember the address as a hint for next time */
61923 return (mm->free_area_cache = addr-len);
61924 }
61925 @@ -1496,7 +1706,7 @@ arch_get_unmapped_area_topdown(struct fi
61926 * return with success:
61927 */
61928 vma = find_vma(mm, addr);
61929 - if (!vma || addr+len <= vma->vm_start)
61930 + if (check_heap_stack_gap(vma, addr, len))
61931 /* remember the address as a hint for next time */
61932 return (mm->free_area_cache = addr);
61933
61934 @@ -1505,8 +1715,8 @@ arch_get_unmapped_area_topdown(struct fi
61935 mm->cached_hole_size = vma->vm_start - addr;
61936
61937 /* try just below the current vma->vm_start */
61938 - addr = vma->vm_start-len;
61939 - } while (len < vma->vm_start);
61940 + addr = skip_heap_stack_gap(vma, len);
61941 + } while (!IS_ERR_VALUE(addr));
61942
61943 bottomup:
61944 /*
61945 @@ -1515,13 +1725,21 @@ bottomup:
61946 * can happen with large stack limits and large mmap()
61947 * allocations.
61948 */
61949 + mm->mmap_base = TASK_UNMAPPED_BASE;
61950 +
61951 +#ifdef CONFIG_PAX_RANDMMAP
61952 + if (mm->pax_flags & MF_PAX_RANDMMAP)
61953 + mm->mmap_base += mm->delta_mmap;
61954 +#endif
61955 +
61956 + mm->free_area_cache = mm->mmap_base;
61957 mm->cached_hole_size = ~0UL;
61958 - mm->free_area_cache = TASK_UNMAPPED_BASE;
61959 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
61960 /*
61961 * Restore the topdown base:
61962 */
61963 - mm->free_area_cache = mm->mmap_base;
61964 + mm->mmap_base = base;
61965 + mm->free_area_cache = base;
61966 mm->cached_hole_size = ~0UL;
61967
61968 return addr;
61969 @@ -1530,6 +1748,12 @@ bottomup:
61970
61971 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
61972 {
61973 +
61974 +#ifdef CONFIG_PAX_SEGMEXEC
61975 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
61976 + return;
61977 +#endif
61978 +
61979 /*
61980 * Is this a new hole at the highest possible address?
61981 */
61982 @@ -1537,8 +1761,10 @@ void arch_unmap_area_topdown(struct mm_s
61983 mm->free_area_cache = addr;
61984
61985 /* dont allow allocations above current base */
61986 - if (mm->free_area_cache > mm->mmap_base)
61987 + if (mm->free_area_cache > mm->mmap_base) {
61988 mm->free_area_cache = mm->mmap_base;
61989 + mm->cached_hole_size = ~0UL;
61990 + }
61991 }
61992
61993 unsigned long
61994 @@ -1646,6 +1872,28 @@ out:
61995 return prev ? prev->vm_next : vma;
61996 }
61997
61998 +#ifdef CONFIG_PAX_SEGMEXEC
61999 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
62000 +{
62001 + struct vm_area_struct *vma_m;
62002 +
62003 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
62004 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
62005 + BUG_ON(vma->vm_mirror);
62006 + return NULL;
62007 + }
62008 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
62009 + vma_m = vma->vm_mirror;
62010 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
62011 + BUG_ON(vma->vm_file != vma_m->vm_file);
62012 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
62013 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
62014 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
62015 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
62016 + return vma_m;
62017 +}
62018 +#endif
62019 +
62020 /*
62021 * Verify that the stack growth is acceptable and
62022 * update accounting. This is shared with both the
62023 @@ -1662,6 +1910,7 @@ static int acct_stack_growth(struct vm_a
62024 return -ENOMEM;
62025
62026 /* Stack limit test */
62027 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
62028 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
62029 return -ENOMEM;
62030
62031 @@ -1672,6 +1921,7 @@ static int acct_stack_growth(struct vm_a
62032 locked = mm->locked_vm + grow;
62033 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
62034 limit >>= PAGE_SHIFT;
62035 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
62036 if (locked > limit && !capable(CAP_IPC_LOCK))
62037 return -ENOMEM;
62038 }
62039 @@ -1702,37 +1952,48 @@ static int acct_stack_growth(struct vm_a
62040 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
62041 * vma is the last one with address > vma->vm_end. Have to extend vma.
62042 */
62043 +#ifndef CONFIG_IA64
62044 +static
62045 +#endif
62046 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
62047 {
62048 int error;
62049 + bool locknext;
62050
62051 if (!(vma->vm_flags & VM_GROWSUP))
62052 return -EFAULT;
62053
62054 + /* Also guard against wrapping around to address 0. */
62055 + if (address < PAGE_ALIGN(address+1))
62056 + address = PAGE_ALIGN(address+1);
62057 + else
62058 + return -ENOMEM;
62059 +
62060 /*
62061 * We must make sure the anon_vma is allocated
62062 * so that the anon_vma locking is not a noop.
62063 */
62064 if (unlikely(anon_vma_prepare(vma)))
62065 return -ENOMEM;
62066 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
62067 + if (locknext && anon_vma_prepare(vma->vm_next))
62068 + return -ENOMEM;
62069 vma_lock_anon_vma(vma);
62070 + if (locknext)
62071 + vma_lock_anon_vma(vma->vm_next);
62072
62073 /*
62074 * vma->vm_start/vm_end cannot change under us because the caller
62075 * is required to hold the mmap_sem in read mode. We need the
62076 - * anon_vma lock to serialize against concurrent expand_stacks.
62077 - * Also guard against wrapping around to address 0.
62078 + * anon_vma locks to serialize against concurrent expand_stacks
62079 + * and expand_upwards.
62080 */
62081 - if (address < PAGE_ALIGN(address+4))
62082 - address = PAGE_ALIGN(address+4);
62083 - else {
62084 - vma_unlock_anon_vma(vma);
62085 - return -ENOMEM;
62086 - }
62087 error = 0;
62088
62089 /* Somebody else might have raced and expanded it already */
62090 - if (address > vma->vm_end) {
62091 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
62092 + error = -ENOMEM;
62093 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
62094 unsigned long size, grow;
62095
62096 size = address - vma->vm_start;
62097 @@ -1747,6 +2008,8 @@ int expand_upwards(struct vm_area_struct
62098 }
62099 }
62100 }
62101 + if (locknext)
62102 + vma_unlock_anon_vma(vma->vm_next);
62103 vma_unlock_anon_vma(vma);
62104 khugepaged_enter_vma_merge(vma);
62105 return error;
62106 @@ -1760,6 +2023,8 @@ int expand_downwards(struct vm_area_stru
62107 unsigned long address)
62108 {
62109 int error;
62110 + bool lockprev = false;
62111 + struct vm_area_struct *prev;
62112
62113 /*
62114 * We must make sure the anon_vma is allocated
62115 @@ -1773,6 +2038,15 @@ int expand_downwards(struct vm_area_stru
62116 if (error)
62117 return error;
62118
62119 + prev = vma->vm_prev;
62120 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
62121 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
62122 +#endif
62123 + if (lockprev && anon_vma_prepare(prev))
62124 + return -ENOMEM;
62125 + if (lockprev)
62126 + vma_lock_anon_vma(prev);
62127 +
62128 vma_lock_anon_vma(vma);
62129
62130 /*
62131 @@ -1782,9 +2056,17 @@ int expand_downwards(struct vm_area_stru
62132 */
62133
62134 /* Somebody else might have raced and expanded it already */
62135 - if (address < vma->vm_start) {
62136 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
62137 + error = -ENOMEM;
62138 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
62139 unsigned long size, grow;
62140
62141 +#ifdef CONFIG_PAX_SEGMEXEC
62142 + struct vm_area_struct *vma_m;
62143 +
62144 + vma_m = pax_find_mirror_vma(vma);
62145 +#endif
62146 +
62147 size = vma->vm_end - address;
62148 grow = (vma->vm_start - address) >> PAGE_SHIFT;
62149
62150 @@ -1794,11 +2076,22 @@ int expand_downwards(struct vm_area_stru
62151 if (!error) {
62152 vma->vm_start = address;
62153 vma->vm_pgoff -= grow;
62154 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
62155 +
62156 +#ifdef CONFIG_PAX_SEGMEXEC
62157 + if (vma_m) {
62158 + vma_m->vm_start -= grow << PAGE_SHIFT;
62159 + vma_m->vm_pgoff -= grow;
62160 + }
62161 +#endif
62162 +
62163 perf_event_mmap(vma);
62164 }
62165 }
62166 }
62167 vma_unlock_anon_vma(vma);
62168 + if (lockprev)
62169 + vma_unlock_anon_vma(prev);
62170 khugepaged_enter_vma_merge(vma);
62171 return error;
62172 }
62173 @@ -1868,6 +2161,13 @@ static void remove_vma_list(struct mm_st
62174 do {
62175 long nrpages = vma_pages(vma);
62176
62177 +#ifdef CONFIG_PAX_SEGMEXEC
62178 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
62179 + vma = remove_vma(vma);
62180 + continue;
62181 + }
62182 +#endif
62183 +
62184 mm->total_vm -= nrpages;
62185 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
62186 vma = remove_vma(vma);
62187 @@ -1913,6 +2213,16 @@ detach_vmas_to_be_unmapped(struct mm_str
62188 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
62189 vma->vm_prev = NULL;
62190 do {
62191 +
62192 +#ifdef CONFIG_PAX_SEGMEXEC
62193 + if (vma->vm_mirror) {
62194 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
62195 + vma->vm_mirror->vm_mirror = NULL;
62196 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
62197 + vma->vm_mirror = NULL;
62198 + }
62199 +#endif
62200 +
62201 rb_erase(&vma->vm_rb, &mm->mm_rb);
62202 mm->map_count--;
62203 tail_vma = vma;
62204 @@ -1941,14 +2251,33 @@ static int __split_vma(struct mm_struct
62205 struct vm_area_struct *new;
62206 int err = -ENOMEM;
62207
62208 +#ifdef CONFIG_PAX_SEGMEXEC
62209 + struct vm_area_struct *vma_m, *new_m = NULL;
62210 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
62211 +#endif
62212 +
62213 if (is_vm_hugetlb_page(vma) && (addr &
62214 ~(huge_page_mask(hstate_vma(vma)))))
62215 return -EINVAL;
62216
62217 +#ifdef CONFIG_PAX_SEGMEXEC
62218 + vma_m = pax_find_mirror_vma(vma);
62219 +#endif
62220 +
62221 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62222 if (!new)
62223 goto out_err;
62224
62225 +#ifdef CONFIG_PAX_SEGMEXEC
62226 + if (vma_m) {
62227 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62228 + if (!new_m) {
62229 + kmem_cache_free(vm_area_cachep, new);
62230 + goto out_err;
62231 + }
62232 + }
62233 +#endif
62234 +
62235 /* most fields are the same, copy all, and then fixup */
62236 *new = *vma;
62237
62238 @@ -1961,6 +2290,22 @@ static int __split_vma(struct mm_struct
62239 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
62240 }
62241
62242 +#ifdef CONFIG_PAX_SEGMEXEC
62243 + if (vma_m) {
62244 + *new_m = *vma_m;
62245 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
62246 + new_m->vm_mirror = new;
62247 + new->vm_mirror = new_m;
62248 +
62249 + if (new_below)
62250 + new_m->vm_end = addr_m;
62251 + else {
62252 + new_m->vm_start = addr_m;
62253 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
62254 + }
62255 + }
62256 +#endif
62257 +
62258 pol = mpol_dup(vma_policy(vma));
62259 if (IS_ERR(pol)) {
62260 err = PTR_ERR(pol);
62261 @@ -1986,6 +2331,42 @@ static int __split_vma(struct mm_struct
62262 else
62263 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
62264
62265 +#ifdef CONFIG_PAX_SEGMEXEC
62266 + if (!err && vma_m) {
62267 + if (anon_vma_clone(new_m, vma_m))
62268 + goto out_free_mpol;
62269 +
62270 + mpol_get(pol);
62271 + vma_set_policy(new_m, pol);
62272 +
62273 + if (new_m->vm_file) {
62274 + get_file(new_m->vm_file);
62275 + if (vma_m->vm_flags & VM_EXECUTABLE)
62276 + added_exe_file_vma(mm);
62277 + }
62278 +
62279 + if (new_m->vm_ops && new_m->vm_ops->open)
62280 + new_m->vm_ops->open(new_m);
62281 +
62282 + if (new_below)
62283 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
62284 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
62285 + else
62286 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
62287 +
62288 + if (err) {
62289 + if (new_m->vm_ops && new_m->vm_ops->close)
62290 + new_m->vm_ops->close(new_m);
62291 + if (new_m->vm_file) {
62292 + if (vma_m->vm_flags & VM_EXECUTABLE)
62293 + removed_exe_file_vma(mm);
62294 + fput(new_m->vm_file);
62295 + }
62296 + mpol_put(pol);
62297 + }
62298 + }
62299 +#endif
62300 +
62301 /* Success. */
62302 if (!err)
62303 return 0;
62304 @@ -1998,10 +2379,18 @@ static int __split_vma(struct mm_struct
62305 removed_exe_file_vma(mm);
62306 fput(new->vm_file);
62307 }
62308 - unlink_anon_vmas(new);
62309 out_free_mpol:
62310 mpol_put(pol);
62311 out_free_vma:
62312 +
62313 +#ifdef CONFIG_PAX_SEGMEXEC
62314 + if (new_m) {
62315 + unlink_anon_vmas(new_m);
62316 + kmem_cache_free(vm_area_cachep, new_m);
62317 + }
62318 +#endif
62319 +
62320 + unlink_anon_vmas(new);
62321 kmem_cache_free(vm_area_cachep, new);
62322 out_err:
62323 return err;
62324 @@ -2014,6 +2403,15 @@ static int __split_vma(struct mm_struct
62325 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
62326 unsigned long addr, int new_below)
62327 {
62328 +
62329 +#ifdef CONFIG_PAX_SEGMEXEC
62330 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
62331 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
62332 + if (mm->map_count >= sysctl_max_map_count-1)
62333 + return -ENOMEM;
62334 + } else
62335 +#endif
62336 +
62337 if (mm->map_count >= sysctl_max_map_count)
62338 return -ENOMEM;
62339
62340 @@ -2025,11 +2423,30 @@ int split_vma(struct mm_struct *mm, stru
62341 * work. This now handles partial unmappings.
62342 * Jeremy Fitzhardinge <jeremy@goop.org>
62343 */
62344 +#ifdef CONFIG_PAX_SEGMEXEC
62345 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62346 {
62347 + int ret = __do_munmap(mm, start, len);
62348 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
62349 + return ret;
62350 +
62351 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
62352 +}
62353 +
62354 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62355 +#else
62356 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62357 +#endif
62358 +{
62359 unsigned long end;
62360 struct vm_area_struct *vma, *prev, *last;
62361
62362 + /*
62363 + * mm->mmap_sem is required to protect against another thread
62364 + * changing the mappings in case we sleep.
62365 + */
62366 + verify_mm_writelocked(mm);
62367 +
62368 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
62369 return -EINVAL;
62370
62371 @@ -2104,6 +2521,8 @@ int do_munmap(struct mm_struct *mm, unsi
62372 /* Fix up all other VM information */
62373 remove_vma_list(mm, vma);
62374
62375 + track_exec_limit(mm, start, end, 0UL);
62376 +
62377 return 0;
62378 }
62379
62380 @@ -2116,22 +2535,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
62381
62382 profile_munmap(addr);
62383
62384 +#ifdef CONFIG_PAX_SEGMEXEC
62385 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
62386 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
62387 + return -EINVAL;
62388 +#endif
62389 +
62390 down_write(&mm->mmap_sem);
62391 ret = do_munmap(mm, addr, len);
62392 up_write(&mm->mmap_sem);
62393 return ret;
62394 }
62395
62396 -static inline void verify_mm_writelocked(struct mm_struct *mm)
62397 -{
62398 -#ifdef CONFIG_DEBUG_VM
62399 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
62400 - WARN_ON(1);
62401 - up_read(&mm->mmap_sem);
62402 - }
62403 -#endif
62404 -}
62405 -
62406 /*
62407 * this is really a simplified "do_mmap". it only handles
62408 * anonymous maps. eventually we may be able to do some
62409 @@ -2145,6 +2560,7 @@ unsigned long do_brk(unsigned long addr,
62410 struct rb_node ** rb_link, * rb_parent;
62411 pgoff_t pgoff = addr >> PAGE_SHIFT;
62412 int error;
62413 + unsigned long charged;
62414
62415 len = PAGE_ALIGN(len);
62416 if (!len)
62417 @@ -2156,16 +2572,30 @@ unsigned long do_brk(unsigned long addr,
62418
62419 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
62420
62421 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
62422 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
62423 + flags &= ~VM_EXEC;
62424 +
62425 +#ifdef CONFIG_PAX_MPROTECT
62426 + if (mm->pax_flags & MF_PAX_MPROTECT)
62427 + flags &= ~VM_MAYEXEC;
62428 +#endif
62429 +
62430 + }
62431 +#endif
62432 +
62433 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
62434 if (error & ~PAGE_MASK)
62435 return error;
62436
62437 + charged = len >> PAGE_SHIFT;
62438 +
62439 /*
62440 * mlock MCL_FUTURE?
62441 */
62442 if (mm->def_flags & VM_LOCKED) {
62443 unsigned long locked, lock_limit;
62444 - locked = len >> PAGE_SHIFT;
62445 + locked = charged;
62446 locked += mm->locked_vm;
62447 lock_limit = rlimit(RLIMIT_MEMLOCK);
62448 lock_limit >>= PAGE_SHIFT;
62449 @@ -2182,22 +2612,22 @@ unsigned long do_brk(unsigned long addr,
62450 /*
62451 * Clear old maps. this also does some error checking for us
62452 */
62453 - munmap_back:
62454 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
62455 if (vma && vma->vm_start < addr + len) {
62456 if (do_munmap(mm, addr, len))
62457 return -ENOMEM;
62458 - goto munmap_back;
62459 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
62460 + BUG_ON(vma && vma->vm_start < addr + len);
62461 }
62462
62463 /* Check against address space limits *after* clearing old maps... */
62464 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
62465 + if (!may_expand_vm(mm, charged))
62466 return -ENOMEM;
62467
62468 if (mm->map_count > sysctl_max_map_count)
62469 return -ENOMEM;
62470
62471 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
62472 + if (security_vm_enough_memory(charged))
62473 return -ENOMEM;
62474
62475 /* Can we just expand an old private anonymous mapping? */
62476 @@ -2211,7 +2641,7 @@ unsigned long do_brk(unsigned long addr,
62477 */
62478 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
62479 if (!vma) {
62480 - vm_unacct_memory(len >> PAGE_SHIFT);
62481 + vm_unacct_memory(charged);
62482 return -ENOMEM;
62483 }
62484
62485 @@ -2225,11 +2655,12 @@ unsigned long do_brk(unsigned long addr,
62486 vma_link(mm, vma, prev, rb_link, rb_parent);
62487 out:
62488 perf_event_mmap(vma);
62489 - mm->total_vm += len >> PAGE_SHIFT;
62490 + mm->total_vm += charged;
62491 if (flags & VM_LOCKED) {
62492 if (!mlock_vma_pages_range(vma, addr, addr + len))
62493 - mm->locked_vm += (len >> PAGE_SHIFT);
62494 + mm->locked_vm += charged;
62495 }
62496 + track_exec_limit(mm, addr, addr + len, flags);
62497 return addr;
62498 }
62499
62500 @@ -2276,8 +2707,10 @@ void exit_mmap(struct mm_struct *mm)
62501 * Walk the list again, actually closing and freeing it,
62502 * with preemption enabled, without holding any MM locks.
62503 */
62504 - while (vma)
62505 + while (vma) {
62506 + vma->vm_mirror = NULL;
62507 vma = remove_vma(vma);
62508 + }
62509
62510 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
62511 }
62512 @@ -2291,6 +2724,13 @@ int insert_vm_struct(struct mm_struct *
62513 struct vm_area_struct * __vma, * prev;
62514 struct rb_node ** rb_link, * rb_parent;
62515
62516 +#ifdef CONFIG_PAX_SEGMEXEC
62517 + struct vm_area_struct *vma_m = NULL;
62518 +#endif
62519 +
62520 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
62521 + return -EPERM;
62522 +
62523 /*
62524 * The vm_pgoff of a purely anonymous vma should be irrelevant
62525 * until its first write fault, when page's anon_vma and index
62526 @@ -2313,7 +2753,22 @@ int insert_vm_struct(struct mm_struct *
62527 if ((vma->vm_flags & VM_ACCOUNT) &&
62528 security_vm_enough_memory_mm(mm, vma_pages(vma)))
62529 return -ENOMEM;
62530 +
62531 +#ifdef CONFIG_PAX_SEGMEXEC
62532 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
62533 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
62534 + if (!vma_m)
62535 + return -ENOMEM;
62536 + }
62537 +#endif
62538 +
62539 vma_link(mm, vma, prev, rb_link, rb_parent);
62540 +
62541 +#ifdef CONFIG_PAX_SEGMEXEC
62542 + if (vma_m)
62543 + BUG_ON(pax_mirror_vma(vma_m, vma));
62544 +#endif
62545 +
62546 return 0;
62547 }
62548
62549 @@ -2331,6 +2786,8 @@ struct vm_area_struct *copy_vma(struct v
62550 struct rb_node **rb_link, *rb_parent;
62551 struct mempolicy *pol;
62552
62553 + BUG_ON(vma->vm_mirror);
62554 +
62555 /*
62556 * If anonymous vma has not yet been faulted, update new pgoff
62557 * to match new location, to increase its chance of merging.
62558 @@ -2381,6 +2838,39 @@ struct vm_area_struct *copy_vma(struct v
62559 return NULL;
62560 }
62561
62562 +#ifdef CONFIG_PAX_SEGMEXEC
62563 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
62564 +{
62565 + struct vm_area_struct *prev_m;
62566 + struct rb_node **rb_link_m, *rb_parent_m;
62567 + struct mempolicy *pol_m;
62568 +
62569 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
62570 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
62571 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
62572 + *vma_m = *vma;
62573 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
62574 + if (anon_vma_clone(vma_m, vma))
62575 + return -ENOMEM;
62576 + pol_m = vma_policy(vma_m);
62577 + mpol_get(pol_m);
62578 + vma_set_policy(vma_m, pol_m);
62579 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
62580 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
62581 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
62582 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
62583 + if (vma_m->vm_file)
62584 + get_file(vma_m->vm_file);
62585 + if (vma_m->vm_ops && vma_m->vm_ops->open)
62586 + vma_m->vm_ops->open(vma_m);
62587 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
62588 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
62589 + vma_m->vm_mirror = vma;
62590 + vma->vm_mirror = vma_m;
62591 + return 0;
62592 +}
62593 +#endif
62594 +
62595 /*
62596 * Return true if the calling process may expand its vm space by the passed
62597 * number of pages
62598 @@ -2391,7 +2881,7 @@ int may_expand_vm(struct mm_struct *mm,
62599 unsigned long lim;
62600
62601 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
62602 -
62603 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
62604 if (cur + npages > lim)
62605 return 0;
62606 return 1;
62607 @@ -2462,6 +2952,22 @@ int install_special_mapping(struct mm_st
62608 vma->vm_start = addr;
62609 vma->vm_end = addr + len;
62610
62611 +#ifdef CONFIG_PAX_MPROTECT
62612 + if (mm->pax_flags & MF_PAX_MPROTECT) {
62613 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
62614 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
62615 + return -EPERM;
62616 + if (!(vm_flags & VM_EXEC))
62617 + vm_flags &= ~VM_MAYEXEC;
62618 +#else
62619 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
62620 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
62621 +#endif
62622 + else
62623 + vm_flags &= ~VM_MAYWRITE;
62624 + }
62625 +#endif
62626 +
62627 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
62628 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
62629
62630 diff -urNp linux-3.0.3/mm/mprotect.c linux-3.0.3/mm/mprotect.c
62631 --- linux-3.0.3/mm/mprotect.c 2011-07-21 22:17:23.000000000 -0400
62632 +++ linux-3.0.3/mm/mprotect.c 2011-08-23 21:48:14.000000000 -0400
62633 @@ -23,10 +23,16 @@
62634 #include <linux/mmu_notifier.h>
62635 #include <linux/migrate.h>
62636 #include <linux/perf_event.h>
62637 +
62638 +#ifdef CONFIG_PAX_MPROTECT
62639 +#include <linux/elf.h>
62640 +#endif
62641 +
62642 #include <asm/uaccess.h>
62643 #include <asm/pgtable.h>
62644 #include <asm/cacheflush.h>
62645 #include <asm/tlbflush.h>
62646 +#include <asm/mmu_context.h>
62647
62648 #ifndef pgprot_modify
62649 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
62650 @@ -141,6 +147,48 @@ static void change_protection(struct vm_
62651 flush_tlb_range(vma, start, end);
62652 }
62653
62654 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
62655 +/* called while holding the mmap semaphor for writing except stack expansion */
62656 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
62657 +{
62658 + unsigned long oldlimit, newlimit = 0UL;
62659 +
62660 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
62661 + return;
62662 +
62663 + spin_lock(&mm->page_table_lock);
62664 + oldlimit = mm->context.user_cs_limit;
62665 + if ((prot & VM_EXEC) && oldlimit < end)
62666 + /* USER_CS limit moved up */
62667 + newlimit = end;
62668 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
62669 + /* USER_CS limit moved down */
62670 + newlimit = start;
62671 +
62672 + if (newlimit) {
62673 + mm->context.user_cs_limit = newlimit;
62674 +
62675 +#ifdef CONFIG_SMP
62676 + wmb();
62677 + cpus_clear(mm->context.cpu_user_cs_mask);
62678 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
62679 +#endif
62680 +
62681 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
62682 + }
62683 + spin_unlock(&mm->page_table_lock);
62684 + if (newlimit == end) {
62685 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
62686 +
62687 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
62688 + if (is_vm_hugetlb_page(vma))
62689 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
62690 + else
62691 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
62692 + }
62693 +}
62694 +#endif
62695 +
62696 int
62697 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
62698 unsigned long start, unsigned long end, unsigned long newflags)
62699 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
62700 int error;
62701 int dirty_accountable = 0;
62702
62703 +#ifdef CONFIG_PAX_SEGMEXEC
62704 + struct vm_area_struct *vma_m = NULL;
62705 + unsigned long start_m, end_m;
62706 +
62707 + start_m = start + SEGMEXEC_TASK_SIZE;
62708 + end_m = end + SEGMEXEC_TASK_SIZE;
62709 +#endif
62710 +
62711 if (newflags == oldflags) {
62712 *pprev = vma;
62713 return 0;
62714 }
62715
62716 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
62717 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
62718 +
62719 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
62720 + return -ENOMEM;
62721 +
62722 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
62723 + return -ENOMEM;
62724 + }
62725 +
62726 /*
62727 * If we make a private mapping writable we increase our commit;
62728 * but (without finer accounting) cannot reduce our commit if we
62729 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
62730 }
62731 }
62732
62733 +#ifdef CONFIG_PAX_SEGMEXEC
62734 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
62735 + if (start != vma->vm_start) {
62736 + error = split_vma(mm, vma, start, 1);
62737 + if (error)
62738 + goto fail;
62739 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
62740 + *pprev = (*pprev)->vm_next;
62741 + }
62742 +
62743 + if (end != vma->vm_end) {
62744 + error = split_vma(mm, vma, end, 0);
62745 + if (error)
62746 + goto fail;
62747 + }
62748 +
62749 + if (pax_find_mirror_vma(vma)) {
62750 + error = __do_munmap(mm, start_m, end_m - start_m);
62751 + if (error)
62752 + goto fail;
62753 + } else {
62754 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
62755 + if (!vma_m) {
62756 + error = -ENOMEM;
62757 + goto fail;
62758 + }
62759 + vma->vm_flags = newflags;
62760 + error = pax_mirror_vma(vma_m, vma);
62761 + if (error) {
62762 + vma->vm_flags = oldflags;
62763 + goto fail;
62764 + }
62765 + }
62766 + }
62767 +#endif
62768 +
62769 /*
62770 * First try to merge with previous and/or next vma.
62771 */
62772 @@ -204,9 +306,21 @@ success:
62773 * vm_flags and vm_page_prot are protected by the mmap_sem
62774 * held in write mode.
62775 */
62776 +
62777 +#ifdef CONFIG_PAX_SEGMEXEC
62778 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
62779 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
62780 +#endif
62781 +
62782 vma->vm_flags = newflags;
62783 +
62784 +#ifdef CONFIG_PAX_MPROTECT
62785 + if (mm->binfmt && mm->binfmt->handle_mprotect)
62786 + mm->binfmt->handle_mprotect(vma, newflags);
62787 +#endif
62788 +
62789 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
62790 - vm_get_page_prot(newflags));
62791 + vm_get_page_prot(vma->vm_flags));
62792
62793 if (vma_wants_writenotify(vma)) {
62794 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
62795 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
62796 end = start + len;
62797 if (end <= start)
62798 return -ENOMEM;
62799 +
62800 +#ifdef CONFIG_PAX_SEGMEXEC
62801 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
62802 + if (end > SEGMEXEC_TASK_SIZE)
62803 + return -EINVAL;
62804 + } else
62805 +#endif
62806 +
62807 + if (end > TASK_SIZE)
62808 + return -EINVAL;
62809 +
62810 if (!arch_validate_prot(prot))
62811 return -EINVAL;
62812
62813 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
62814 /*
62815 * Does the application expect PROT_READ to imply PROT_EXEC:
62816 */
62817 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
62818 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
62819 prot |= PROT_EXEC;
62820
62821 vm_flags = calc_vm_prot_bits(prot);
62822 @@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
62823 if (start > vma->vm_start)
62824 prev = vma;
62825
62826 +#ifdef CONFIG_PAX_MPROTECT
62827 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
62828 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
62829 +#endif
62830 +
62831 for (nstart = start ; ; ) {
62832 unsigned long newflags;
62833
62834 @@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
62835
62836 /* newflags >> 4 shift VM_MAY% in place of VM_% */
62837 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
62838 + if (prot & (PROT_WRITE | PROT_EXEC))
62839 + gr_log_rwxmprotect(vma->vm_file);
62840 +
62841 + error = -EACCES;
62842 + goto out;
62843 + }
62844 +
62845 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
62846 error = -EACCES;
62847 goto out;
62848 }
62849 @@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
62850 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
62851 if (error)
62852 goto out;
62853 +
62854 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
62855 +
62856 nstart = tmp;
62857
62858 if (nstart < prev->vm_end)
62859 diff -urNp linux-3.0.3/mm/mremap.c linux-3.0.3/mm/mremap.c
62860 --- linux-3.0.3/mm/mremap.c 2011-07-21 22:17:23.000000000 -0400
62861 +++ linux-3.0.3/mm/mremap.c 2011-08-23 21:47:56.000000000 -0400
62862 @@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_str
62863 continue;
62864 pte = ptep_clear_flush(vma, old_addr, old_pte);
62865 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
62866 +
62867 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
62868 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
62869 + pte = pte_exprotect(pte);
62870 +#endif
62871 +
62872 set_pte_at(mm, new_addr, new_pte, pte);
62873 }
62874
62875 @@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_res
62876 if (is_vm_hugetlb_page(vma))
62877 goto Einval;
62878
62879 +#ifdef CONFIG_PAX_SEGMEXEC
62880 + if (pax_find_mirror_vma(vma))
62881 + goto Einval;
62882 +#endif
62883 +
62884 /* We can't remap across vm area boundaries */
62885 if (old_len > vma->vm_end - addr)
62886 goto Efault;
62887 @@ -328,20 +339,25 @@ static unsigned long mremap_to(unsigned
62888 unsigned long ret = -EINVAL;
62889 unsigned long charged = 0;
62890 unsigned long map_flags;
62891 + unsigned long pax_task_size = TASK_SIZE;
62892
62893 if (new_addr & ~PAGE_MASK)
62894 goto out;
62895
62896 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
62897 +#ifdef CONFIG_PAX_SEGMEXEC
62898 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
62899 + pax_task_size = SEGMEXEC_TASK_SIZE;
62900 +#endif
62901 +
62902 + pax_task_size -= PAGE_SIZE;
62903 +
62904 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
62905 goto out;
62906
62907 /* Check if the location we're moving into overlaps the
62908 * old location at all, and fail if it does.
62909 */
62910 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
62911 - goto out;
62912 -
62913 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
62914 + if (addr + old_len > new_addr && new_addr + new_len > addr)
62915 goto out;
62916
62917 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
62918 @@ -413,6 +429,7 @@ unsigned long do_mremap(unsigned long ad
62919 struct vm_area_struct *vma;
62920 unsigned long ret = -EINVAL;
62921 unsigned long charged = 0;
62922 + unsigned long pax_task_size = TASK_SIZE;
62923
62924 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
62925 goto out;
62926 @@ -431,6 +448,17 @@ unsigned long do_mremap(unsigned long ad
62927 if (!new_len)
62928 goto out;
62929
62930 +#ifdef CONFIG_PAX_SEGMEXEC
62931 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
62932 + pax_task_size = SEGMEXEC_TASK_SIZE;
62933 +#endif
62934 +
62935 + pax_task_size -= PAGE_SIZE;
62936 +
62937 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
62938 + old_len > pax_task_size || addr > pax_task_size-old_len)
62939 + goto out;
62940 +
62941 if (flags & MREMAP_FIXED) {
62942 if (flags & MREMAP_MAYMOVE)
62943 ret = mremap_to(addr, old_len, new_addr, new_len);
62944 @@ -480,6 +508,7 @@ unsigned long do_mremap(unsigned long ad
62945 addr + new_len);
62946 }
62947 ret = addr;
62948 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
62949 goto out;
62950 }
62951 }
62952 @@ -506,7 +535,13 @@ unsigned long do_mremap(unsigned long ad
62953 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
62954 if (ret)
62955 goto out;
62956 +
62957 + map_flags = vma->vm_flags;
62958 ret = move_vma(vma, addr, old_len, new_len, new_addr);
62959 + if (!(ret & ~PAGE_MASK)) {
62960 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
62961 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
62962 + }
62963 }
62964 out:
62965 if (ret & ~PAGE_MASK)
62966 diff -urNp linux-3.0.3/mm/nobootmem.c linux-3.0.3/mm/nobootmem.c
62967 --- linux-3.0.3/mm/nobootmem.c 2011-07-21 22:17:23.000000000 -0400
62968 +++ linux-3.0.3/mm/nobootmem.c 2011-08-23 21:47:56.000000000 -0400
62969 @@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
62970 unsigned long __init free_all_memory_core_early(int nodeid)
62971 {
62972 int i;
62973 - u64 start, end;
62974 + u64 start, end, startrange, endrange;
62975 unsigned long count = 0;
62976 - struct range *range = NULL;
62977 + struct range *range = NULL, rangerange = { 0, 0 };
62978 int nr_range;
62979
62980 nr_range = get_free_all_memory_range(&range, nodeid);
62981 + startrange = __pa(range) >> PAGE_SHIFT;
62982 + endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
62983
62984 for (i = 0; i < nr_range; i++) {
62985 start = range[i].start;
62986 end = range[i].end;
62987 + if (start <= endrange && startrange < end) {
62988 + BUG_ON(rangerange.start | rangerange.end);
62989 + rangerange = range[i];
62990 + continue;
62991 + }
62992 count += end - start;
62993 __free_pages_memory(start, end);
62994 }
62995 + start = rangerange.start;
62996 + end = rangerange.end;
62997 + count += end - start;
62998 + __free_pages_memory(start, end);
62999
63000 return count;
63001 }
63002 diff -urNp linux-3.0.3/mm/nommu.c linux-3.0.3/mm/nommu.c
63003 --- linux-3.0.3/mm/nommu.c 2011-07-21 22:17:23.000000000 -0400
63004 +++ linux-3.0.3/mm/nommu.c 2011-08-23 21:47:56.000000000 -0400
63005 @@ -63,7 +63,6 @@ int sysctl_overcommit_memory = OVERCOMMI
63006 int sysctl_overcommit_ratio = 50; /* default is 50% */
63007 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
63008 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
63009 -int heap_stack_gap = 0;
63010
63011 atomic_long_t mmap_pages_allocated;
63012
63013 @@ -826,15 +825,6 @@ struct vm_area_struct *find_vma(struct m
63014 EXPORT_SYMBOL(find_vma);
63015
63016 /*
63017 - * find a VMA
63018 - * - we don't extend stack VMAs under NOMMU conditions
63019 - */
63020 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
63021 -{
63022 - return find_vma(mm, addr);
63023 -}
63024 -
63025 -/*
63026 * expand a stack to a given address
63027 * - not supported under NOMMU conditions
63028 */
63029 @@ -1554,6 +1544,7 @@ int split_vma(struct mm_struct *mm, stru
63030
63031 /* most fields are the same, copy all, and then fixup */
63032 *new = *vma;
63033 + INIT_LIST_HEAD(&new->anon_vma_chain);
63034 *region = *vma->vm_region;
63035 new->vm_region = region;
63036
63037 diff -urNp linux-3.0.3/mm/page_alloc.c linux-3.0.3/mm/page_alloc.c
63038 --- linux-3.0.3/mm/page_alloc.c 2011-07-21 22:17:23.000000000 -0400
63039 +++ linux-3.0.3/mm/page_alloc.c 2011-08-23 21:48:14.000000000 -0400
63040 @@ -340,7 +340,7 @@ out:
63041 * This usage means that zero-order pages may not be compound.
63042 */
63043
63044 -static void free_compound_page(struct page *page)
63045 +void free_compound_page(struct page *page)
63046 {
63047 __free_pages_ok(page, compound_order(page));
63048 }
63049 @@ -653,6 +653,10 @@ static bool free_pages_prepare(struct pa
63050 int i;
63051 int bad = 0;
63052
63053 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
63054 + unsigned long index = 1UL << order;
63055 +#endif
63056 +
63057 trace_mm_page_free_direct(page, order);
63058 kmemcheck_free_shadow(page, order);
63059
63060 @@ -668,6 +672,12 @@ static bool free_pages_prepare(struct pa
63061 debug_check_no_obj_freed(page_address(page),
63062 PAGE_SIZE << order);
63063 }
63064 +
63065 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
63066 + for (; index; --index)
63067 + sanitize_highpage(page + index - 1);
63068 +#endif
63069 +
63070 arch_free_page(page, order);
63071 kernel_map_pages(page, 1 << order, 0);
63072
63073 @@ -783,8 +793,10 @@ static int prep_new_page(struct page *pa
63074 arch_alloc_page(page, order);
63075 kernel_map_pages(page, 1 << order, 1);
63076
63077 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
63078 if (gfp_flags & __GFP_ZERO)
63079 prep_zero_page(page, order, gfp_flags);
63080 +#endif
63081
63082 if (order && (gfp_flags & __GFP_COMP))
63083 prep_compound_page(page, order);
63084 @@ -2525,6 +2537,8 @@ void show_free_areas(unsigned int filter
63085 int cpu;
63086 struct zone *zone;
63087
63088 + pax_track_stack();
63089 +
63090 for_each_populated_zone(zone) {
63091 if (skip_free_areas_node(filter, zone_to_nid(zone)))
63092 continue;
63093 diff -urNp linux-3.0.3/mm/percpu.c linux-3.0.3/mm/percpu.c
63094 --- linux-3.0.3/mm/percpu.c 2011-07-21 22:17:23.000000000 -0400
63095 +++ linux-3.0.3/mm/percpu.c 2011-08-23 21:47:56.000000000 -0400
63096 @@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
63097 static unsigned int pcpu_last_unit_cpu __read_mostly;
63098
63099 /* the address of the first chunk which starts with the kernel static area */
63100 -void *pcpu_base_addr __read_mostly;
63101 +void *pcpu_base_addr __read_only;
63102 EXPORT_SYMBOL_GPL(pcpu_base_addr);
63103
63104 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
63105 diff -urNp linux-3.0.3/mm/rmap.c linux-3.0.3/mm/rmap.c
63106 --- linux-3.0.3/mm/rmap.c 2011-07-21 22:17:23.000000000 -0400
63107 +++ linux-3.0.3/mm/rmap.c 2011-08-23 21:47:56.000000000 -0400
63108 @@ -153,6 +153,10 @@ int anon_vma_prepare(struct vm_area_stru
63109 struct anon_vma *anon_vma = vma->anon_vma;
63110 struct anon_vma_chain *avc;
63111
63112 +#ifdef CONFIG_PAX_SEGMEXEC
63113 + struct anon_vma_chain *avc_m = NULL;
63114 +#endif
63115 +
63116 might_sleep();
63117 if (unlikely(!anon_vma)) {
63118 struct mm_struct *mm = vma->vm_mm;
63119 @@ -162,6 +166,12 @@ int anon_vma_prepare(struct vm_area_stru
63120 if (!avc)
63121 goto out_enomem;
63122
63123 +#ifdef CONFIG_PAX_SEGMEXEC
63124 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
63125 + if (!avc_m)
63126 + goto out_enomem_free_avc;
63127 +#endif
63128 +
63129 anon_vma = find_mergeable_anon_vma(vma);
63130 allocated = NULL;
63131 if (!anon_vma) {
63132 @@ -175,6 +185,21 @@ int anon_vma_prepare(struct vm_area_stru
63133 /* page_table_lock to protect against threads */
63134 spin_lock(&mm->page_table_lock);
63135 if (likely(!vma->anon_vma)) {
63136 +
63137 +#ifdef CONFIG_PAX_SEGMEXEC
63138 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
63139 +
63140 + if (vma_m) {
63141 + BUG_ON(vma_m->anon_vma);
63142 + vma_m->anon_vma = anon_vma;
63143 + avc_m->anon_vma = anon_vma;
63144 + avc_m->vma = vma;
63145 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
63146 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
63147 + avc_m = NULL;
63148 + }
63149 +#endif
63150 +
63151 vma->anon_vma = anon_vma;
63152 avc->anon_vma = anon_vma;
63153 avc->vma = vma;
63154 @@ -188,12 +213,24 @@ int anon_vma_prepare(struct vm_area_stru
63155
63156 if (unlikely(allocated))
63157 put_anon_vma(allocated);
63158 +
63159 +#ifdef CONFIG_PAX_SEGMEXEC
63160 + if (unlikely(avc_m))
63161 + anon_vma_chain_free(avc_m);
63162 +#endif
63163 +
63164 if (unlikely(avc))
63165 anon_vma_chain_free(avc);
63166 }
63167 return 0;
63168
63169 out_enomem_free_avc:
63170 +
63171 +#ifdef CONFIG_PAX_SEGMEXEC
63172 + if (avc_m)
63173 + anon_vma_chain_free(avc_m);
63174 +#endif
63175 +
63176 anon_vma_chain_free(avc);
63177 out_enomem:
63178 return -ENOMEM;
63179 @@ -244,7 +281,7 @@ static void anon_vma_chain_link(struct v
63180 * Attach the anon_vmas from src to dst.
63181 * Returns 0 on success, -ENOMEM on failure.
63182 */
63183 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
63184 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
63185 {
63186 struct anon_vma_chain *avc, *pavc;
63187 struct anon_vma *root = NULL;
63188 @@ -277,7 +314,7 @@ int anon_vma_clone(struct vm_area_struct
63189 * the corresponding VMA in the parent process is attached to.
63190 * Returns 0 on success, non-zero on failure.
63191 */
63192 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
63193 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
63194 {
63195 struct anon_vma_chain *avc;
63196 struct anon_vma *anon_vma;
63197 diff -urNp linux-3.0.3/mm/shmem.c linux-3.0.3/mm/shmem.c
63198 --- linux-3.0.3/mm/shmem.c 2011-07-21 22:17:23.000000000 -0400
63199 +++ linux-3.0.3/mm/shmem.c 2011-08-23 21:48:14.000000000 -0400
63200 @@ -31,7 +31,7 @@
63201 #include <linux/percpu_counter.h>
63202 #include <linux/swap.h>
63203
63204 -static struct vfsmount *shm_mnt;
63205 +struct vfsmount *shm_mnt;
63206
63207 #ifdef CONFIG_SHMEM
63208 /*
63209 @@ -1101,6 +1101,8 @@ static int shmem_writepage(struct page *
63210 goto unlock;
63211 }
63212 entry = shmem_swp_entry(info, index, NULL);
63213 + if (!entry)
63214 + goto unlock;
63215 if (entry->val) {
63216 /*
63217 * The more uptodate page coming down from a stacked
63218 @@ -1172,6 +1174,8 @@ static struct page *shmem_swapin(swp_ent
63219 struct vm_area_struct pvma;
63220 struct page *page;
63221
63222 + pax_track_stack();
63223 +
63224 spol = mpol_cond_copy(&mpol,
63225 mpol_shared_policy_lookup(&info->policy, idx));
63226
63227 @@ -2568,8 +2572,7 @@ int shmem_fill_super(struct super_block
63228 int err = -ENOMEM;
63229
63230 /* Round up to L1_CACHE_BYTES to resist false sharing */
63231 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
63232 - L1_CACHE_BYTES), GFP_KERNEL);
63233 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
63234 if (!sbinfo)
63235 return -ENOMEM;
63236
63237 diff -urNp linux-3.0.3/mm/slab.c linux-3.0.3/mm/slab.c
63238 --- linux-3.0.3/mm/slab.c 2011-07-21 22:17:23.000000000 -0400
63239 +++ linux-3.0.3/mm/slab.c 2011-08-23 21:48:14.000000000 -0400
63240 @@ -151,7 +151,7 @@
63241
63242 /* Legal flag mask for kmem_cache_create(). */
63243 #if DEBUG
63244 -# define CREATE_MASK (SLAB_RED_ZONE | \
63245 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
63246 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
63247 SLAB_CACHE_DMA | \
63248 SLAB_STORE_USER | \
63249 @@ -159,7 +159,7 @@
63250 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
63251 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
63252 #else
63253 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
63254 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
63255 SLAB_CACHE_DMA | \
63256 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
63257 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
63258 @@ -288,7 +288,7 @@ struct kmem_list3 {
63259 * Need this for bootstrapping a per node allocator.
63260 */
63261 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
63262 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
63263 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
63264 #define CACHE_CACHE 0
63265 #define SIZE_AC MAX_NUMNODES
63266 #define SIZE_L3 (2 * MAX_NUMNODES)
63267 @@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_
63268 if ((x)->max_freeable < i) \
63269 (x)->max_freeable = i; \
63270 } while (0)
63271 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
63272 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
63273 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
63274 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
63275 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
63276 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
63277 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
63278 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
63279 #else
63280 #define STATS_INC_ACTIVE(x) do { } while (0)
63281 #define STATS_DEC_ACTIVE(x) do { } while (0)
63282 @@ -538,7 +538,7 @@ static inline void *index_to_obj(struct
63283 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
63284 */
63285 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
63286 - const struct slab *slab, void *obj)
63287 + const struct slab *slab, const void *obj)
63288 {
63289 u32 offset = (obj - slab->s_mem);
63290 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
63291 @@ -564,7 +564,7 @@ struct cache_names {
63292 static struct cache_names __initdata cache_names[] = {
63293 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
63294 #include <linux/kmalloc_sizes.h>
63295 - {NULL,}
63296 + {NULL}
63297 #undef CACHE
63298 };
63299
63300 @@ -1530,7 +1530,7 @@ void __init kmem_cache_init(void)
63301 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
63302 sizes[INDEX_AC].cs_size,
63303 ARCH_KMALLOC_MINALIGN,
63304 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63305 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63306 NULL);
63307
63308 if (INDEX_AC != INDEX_L3) {
63309 @@ -1538,7 +1538,7 @@ void __init kmem_cache_init(void)
63310 kmem_cache_create(names[INDEX_L3].name,
63311 sizes[INDEX_L3].cs_size,
63312 ARCH_KMALLOC_MINALIGN,
63313 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63314 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63315 NULL);
63316 }
63317
63318 @@ -1556,7 +1556,7 @@ void __init kmem_cache_init(void)
63319 sizes->cs_cachep = kmem_cache_create(names->name,
63320 sizes->cs_size,
63321 ARCH_KMALLOC_MINALIGN,
63322 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63323 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63324 NULL);
63325 }
63326 #ifdef CONFIG_ZONE_DMA
63327 @@ -4272,10 +4272,10 @@ static int s_show(struct seq_file *m, vo
63328 }
63329 /* cpu stats */
63330 {
63331 - unsigned long allochit = atomic_read(&cachep->allochit);
63332 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
63333 - unsigned long freehit = atomic_read(&cachep->freehit);
63334 - unsigned long freemiss = atomic_read(&cachep->freemiss);
63335 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
63336 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
63337 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
63338 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
63339
63340 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
63341 allochit, allocmiss, freehit, freemiss);
63342 @@ -4532,15 +4532,66 @@ static const struct file_operations proc
63343
63344 static int __init slab_proc_init(void)
63345 {
63346 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
63347 + mode_t gr_mode = S_IRUGO;
63348 +
63349 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
63350 + gr_mode = S_IRUSR;
63351 +#endif
63352 +
63353 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
63354 #ifdef CONFIG_DEBUG_SLAB_LEAK
63355 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
63356 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
63357 #endif
63358 return 0;
63359 }
63360 module_init(slab_proc_init);
63361 #endif
63362
63363 +void check_object_size(const void *ptr, unsigned long n, bool to)
63364 +{
63365 +
63366 +#ifdef CONFIG_PAX_USERCOPY
63367 + struct page *page;
63368 + struct kmem_cache *cachep = NULL;
63369 + struct slab *slabp;
63370 + unsigned int objnr;
63371 + unsigned long offset;
63372 +
63373 + if (!n)
63374 + return;
63375 +
63376 + if (ZERO_OR_NULL_PTR(ptr))
63377 + goto report;
63378 +
63379 + if (!virt_addr_valid(ptr))
63380 + return;
63381 +
63382 + page = virt_to_head_page(ptr);
63383 +
63384 + if (!PageSlab(page)) {
63385 + if (object_is_on_stack(ptr, n) == -1)
63386 + goto report;
63387 + return;
63388 + }
63389 +
63390 + cachep = page_get_cache(page);
63391 + if (!(cachep->flags & SLAB_USERCOPY))
63392 + goto report;
63393 +
63394 + slabp = page_get_slab(page);
63395 + objnr = obj_to_index(cachep, slabp, ptr);
63396 + BUG_ON(objnr >= cachep->num);
63397 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
63398 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
63399 + return;
63400 +
63401 +report:
63402 + pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
63403 +#endif
63404 +
63405 +}
63406 +EXPORT_SYMBOL(check_object_size);
63407 +
63408 /**
63409 * ksize - get the actual amount of memory allocated for a given object
63410 * @objp: Pointer to the object
63411 diff -urNp linux-3.0.3/mm/slob.c linux-3.0.3/mm/slob.c
63412 --- linux-3.0.3/mm/slob.c 2011-07-21 22:17:23.000000000 -0400
63413 +++ linux-3.0.3/mm/slob.c 2011-08-23 21:47:56.000000000 -0400
63414 @@ -29,7 +29,7 @@
63415 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
63416 * alloc_pages() directly, allocating compound pages so the page order
63417 * does not have to be separately tracked, and also stores the exact
63418 - * allocation size in page->private so that it can be used to accurately
63419 + * allocation size in slob_page->size so that it can be used to accurately
63420 * provide ksize(). These objects are detected in kfree() because slob_page()
63421 * is false for them.
63422 *
63423 @@ -58,6 +58,7 @@
63424 */
63425
63426 #include <linux/kernel.h>
63427 +#include <linux/sched.h>
63428 #include <linux/slab.h>
63429 #include <linux/mm.h>
63430 #include <linux/swap.h> /* struct reclaim_state */
63431 @@ -102,7 +103,8 @@ struct slob_page {
63432 unsigned long flags; /* mandatory */
63433 atomic_t _count; /* mandatory */
63434 slobidx_t units; /* free units left in page */
63435 - unsigned long pad[2];
63436 + unsigned long pad[1];
63437 + unsigned long size; /* size when >=PAGE_SIZE */
63438 slob_t *free; /* first free slob_t in page */
63439 struct list_head list; /* linked list of free pages */
63440 };
63441 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
63442 */
63443 static inline int is_slob_page(struct slob_page *sp)
63444 {
63445 - return PageSlab((struct page *)sp);
63446 + return PageSlab((struct page *)sp) && !sp->size;
63447 }
63448
63449 static inline void set_slob_page(struct slob_page *sp)
63450 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
63451
63452 static inline struct slob_page *slob_page(const void *addr)
63453 {
63454 - return (struct slob_page *)virt_to_page(addr);
63455 + return (struct slob_page *)virt_to_head_page(addr);
63456 }
63457
63458 /*
63459 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
63460 /*
63461 * Return the size of a slob block.
63462 */
63463 -static slobidx_t slob_units(slob_t *s)
63464 +static slobidx_t slob_units(const slob_t *s)
63465 {
63466 if (s->units > 0)
63467 return s->units;
63468 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
63469 /*
63470 * Return the next free slob block pointer after this one.
63471 */
63472 -static slob_t *slob_next(slob_t *s)
63473 +static slob_t *slob_next(const slob_t *s)
63474 {
63475 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
63476 slobidx_t next;
63477 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
63478 /*
63479 * Returns true if s is the last free block in its page.
63480 */
63481 -static int slob_last(slob_t *s)
63482 +static int slob_last(const slob_t *s)
63483 {
63484 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
63485 }
63486 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
63487 if (!page)
63488 return NULL;
63489
63490 + set_slob_page(page);
63491 return page_address(page);
63492 }
63493
63494 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
63495 if (!b)
63496 return NULL;
63497 sp = slob_page(b);
63498 - set_slob_page(sp);
63499
63500 spin_lock_irqsave(&slob_lock, flags);
63501 sp->units = SLOB_UNITS(PAGE_SIZE);
63502 sp->free = b;
63503 + sp->size = 0;
63504 INIT_LIST_HEAD(&sp->list);
63505 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
63506 set_slob_page_free(sp, slob_list);
63507 @@ -476,10 +479,9 @@ out:
63508 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
63509 */
63510
63511 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
63512 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
63513 {
63514 - unsigned int *m;
63515 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
63516 + slob_t *m;
63517 void *ret;
63518
63519 lockdep_trace_alloc(gfp);
63520 @@ -492,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t
63521
63522 if (!m)
63523 return NULL;
63524 - *m = size;
63525 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
63526 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
63527 + m[0].units = size;
63528 + m[1].units = align;
63529 ret = (void *)m + align;
63530
63531 trace_kmalloc_node(_RET_IP_, ret,
63532 @@ -504,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t
63533 gfp |= __GFP_COMP;
63534 ret = slob_new_pages(gfp, order, node);
63535 if (ret) {
63536 - struct page *page;
63537 - page = virt_to_page(ret);
63538 - page->private = size;
63539 + struct slob_page *sp;
63540 + sp = slob_page(ret);
63541 + sp->size = size;
63542 }
63543
63544 trace_kmalloc_node(_RET_IP_, ret,
63545 size, PAGE_SIZE << order, gfp, node);
63546 }
63547
63548 - kmemleak_alloc(ret, size, 1, gfp);
63549 + return ret;
63550 +}
63551 +
63552 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
63553 +{
63554 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
63555 + void *ret = __kmalloc_node_align(size, gfp, node, align);
63556 +
63557 + if (!ZERO_OR_NULL_PTR(ret))
63558 + kmemleak_alloc(ret, size, 1, gfp);
63559 return ret;
63560 }
63561 EXPORT_SYMBOL(__kmalloc_node);
63562 @@ -531,13 +545,88 @@ void kfree(const void *block)
63563 sp = slob_page(block);
63564 if (is_slob_page(sp)) {
63565 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
63566 - unsigned int *m = (unsigned int *)(block - align);
63567 - slob_free(m, *m + align);
63568 - } else
63569 + slob_t *m = (slob_t *)(block - align);
63570 + slob_free(m, m[0].units + align);
63571 + } else {
63572 + clear_slob_page(sp);
63573 + free_slob_page(sp);
63574 + sp->size = 0;
63575 put_page(&sp->page);
63576 + }
63577 }
63578 EXPORT_SYMBOL(kfree);
63579
63580 +void check_object_size(const void *ptr, unsigned long n, bool to)
63581 +{
63582 +
63583 +#ifdef CONFIG_PAX_USERCOPY
63584 + struct slob_page *sp;
63585 + const slob_t *free;
63586 + const void *base;
63587 + unsigned long flags;
63588 +
63589 + if (!n)
63590 + return;
63591 +
63592 + if (ZERO_OR_NULL_PTR(ptr))
63593 + goto report;
63594 +
63595 + if (!virt_addr_valid(ptr))
63596 + return;
63597 +
63598 + sp = slob_page(ptr);
63599 + if (!PageSlab((struct page*)sp)) {
63600 + if (object_is_on_stack(ptr, n) == -1)
63601 + goto report;
63602 + return;
63603 + }
63604 +
63605 + if (sp->size) {
63606 + base = page_address(&sp->page);
63607 + if (base <= ptr && n <= sp->size - (ptr - base))
63608 + return;
63609 + goto report;
63610 + }
63611 +
63612 + /* some tricky double walking to find the chunk */
63613 + spin_lock_irqsave(&slob_lock, flags);
63614 + base = (void *)((unsigned long)ptr & PAGE_MASK);
63615 + free = sp->free;
63616 +
63617 + while (!slob_last(free) && (void *)free <= ptr) {
63618 + base = free + slob_units(free);
63619 + free = slob_next(free);
63620 + }
63621 +
63622 + while (base < (void *)free) {
63623 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
63624 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
63625 + int offset;
63626 +
63627 + if (ptr < base + align)
63628 + break;
63629 +
63630 + offset = ptr - base - align;
63631 + if (offset >= m) {
63632 + base += size;
63633 + continue;
63634 + }
63635 +
63636 + if (n > m - offset)
63637 + break;
63638 +
63639 + spin_unlock_irqrestore(&slob_lock, flags);
63640 + return;
63641 + }
63642 +
63643 + spin_unlock_irqrestore(&slob_lock, flags);
63644 +report:
63645 + pax_report_usercopy(ptr, n, to, NULL);
63646 +#endif
63647 +
63648 +}
63649 +EXPORT_SYMBOL(check_object_size);
63650 +
63651 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
63652 size_t ksize(const void *block)
63653 {
63654 @@ -550,10 +639,10 @@ size_t ksize(const void *block)
63655 sp = slob_page(block);
63656 if (is_slob_page(sp)) {
63657 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
63658 - unsigned int *m = (unsigned int *)(block - align);
63659 - return SLOB_UNITS(*m) * SLOB_UNIT;
63660 + slob_t *m = (slob_t *)(block - align);
63661 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
63662 } else
63663 - return sp->page.private;
63664 + return sp->size;
63665 }
63666 EXPORT_SYMBOL(ksize);
63667
63668 @@ -569,8 +658,13 @@ struct kmem_cache *kmem_cache_create(con
63669 {
63670 struct kmem_cache *c;
63671
63672 +#ifdef CONFIG_PAX_USERCOPY
63673 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
63674 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
63675 +#else
63676 c = slob_alloc(sizeof(struct kmem_cache),
63677 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
63678 +#endif
63679
63680 if (c) {
63681 c->name = name;
63682 @@ -608,17 +702,25 @@ void *kmem_cache_alloc_node(struct kmem_
63683 {
63684 void *b;
63685
63686 +#ifdef CONFIG_PAX_USERCOPY
63687 + b = __kmalloc_node_align(c->size, flags, node, c->align);
63688 +#else
63689 if (c->size < PAGE_SIZE) {
63690 b = slob_alloc(c->size, flags, c->align, node);
63691 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
63692 SLOB_UNITS(c->size) * SLOB_UNIT,
63693 flags, node);
63694 } else {
63695 + struct slob_page *sp;
63696 +
63697 b = slob_new_pages(flags, get_order(c->size), node);
63698 + sp = slob_page(b);
63699 + sp->size = c->size;
63700 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
63701 PAGE_SIZE << get_order(c->size),
63702 flags, node);
63703 }
63704 +#endif
63705
63706 if (c->ctor)
63707 c->ctor(b);
63708 @@ -630,10 +732,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
63709
63710 static void __kmem_cache_free(void *b, int size)
63711 {
63712 - if (size < PAGE_SIZE)
63713 + struct slob_page *sp = slob_page(b);
63714 +
63715 + if (is_slob_page(sp))
63716 slob_free(b, size);
63717 - else
63718 + else {
63719 + clear_slob_page(sp);
63720 + free_slob_page(sp);
63721 + sp->size = 0;
63722 slob_free_pages(b, get_order(size));
63723 + }
63724 }
63725
63726 static void kmem_rcu_free(struct rcu_head *head)
63727 @@ -646,17 +754,31 @@ static void kmem_rcu_free(struct rcu_hea
63728
63729 void kmem_cache_free(struct kmem_cache *c, void *b)
63730 {
63731 + int size = c->size;
63732 +
63733 +#ifdef CONFIG_PAX_USERCOPY
63734 + if (size + c->align < PAGE_SIZE) {
63735 + size += c->align;
63736 + b -= c->align;
63737 + }
63738 +#endif
63739 +
63740 kmemleak_free_recursive(b, c->flags);
63741 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
63742 struct slob_rcu *slob_rcu;
63743 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
63744 - slob_rcu->size = c->size;
63745 + slob_rcu = b + (size - sizeof(struct slob_rcu));
63746 + slob_rcu->size = size;
63747 call_rcu(&slob_rcu->head, kmem_rcu_free);
63748 } else {
63749 - __kmem_cache_free(b, c->size);
63750 + __kmem_cache_free(b, size);
63751 }
63752
63753 +#ifdef CONFIG_PAX_USERCOPY
63754 + trace_kfree(_RET_IP_, b);
63755 +#else
63756 trace_kmem_cache_free(_RET_IP_, b);
63757 +#endif
63758 +
63759 }
63760 EXPORT_SYMBOL(kmem_cache_free);
63761
63762 diff -urNp linux-3.0.3/mm/slub.c linux-3.0.3/mm/slub.c
63763 --- linux-3.0.3/mm/slub.c 2011-07-21 22:17:23.000000000 -0400
63764 +++ linux-3.0.3/mm/slub.c 2011-08-23 21:48:14.000000000 -0400
63765 @@ -442,7 +442,7 @@ static void print_track(const char *s, s
63766 if (!t->addr)
63767 return;
63768
63769 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
63770 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
63771 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
63772 }
63773
63774 @@ -2137,6 +2137,8 @@ void kmem_cache_free(struct kmem_cache *
63775
63776 page = virt_to_head_page(x);
63777
63778 + BUG_ON(!PageSlab(page));
63779 +
63780 slab_free(s, page, x, _RET_IP_);
63781
63782 trace_kmem_cache_free(_RET_IP_, x);
63783 @@ -2170,7 +2172,7 @@ static int slub_min_objects;
63784 * Merge control. If this is set then no merging of slab caches will occur.
63785 * (Could be removed. This was introduced to pacify the merge skeptics.)
63786 */
63787 -static int slub_nomerge;
63788 +static int slub_nomerge = 1;
63789
63790 /*
63791 * Calculate the order of allocation given an slab object size.
63792 @@ -2594,7 +2596,7 @@ static int kmem_cache_open(struct kmem_c
63793 * list to avoid pounding the page allocator excessively.
63794 */
63795 set_min_partial(s, ilog2(s->size));
63796 - s->refcount = 1;
63797 + atomic_set(&s->refcount, 1);
63798 #ifdef CONFIG_NUMA
63799 s->remote_node_defrag_ratio = 1000;
63800 #endif
63801 @@ -2699,8 +2701,7 @@ static inline int kmem_cache_close(struc
63802 void kmem_cache_destroy(struct kmem_cache *s)
63803 {
63804 down_write(&slub_lock);
63805 - s->refcount--;
63806 - if (!s->refcount) {
63807 + if (atomic_dec_and_test(&s->refcount)) {
63808 list_del(&s->list);
63809 if (kmem_cache_close(s)) {
63810 printk(KERN_ERR "SLUB %s: %s called for cache that "
63811 @@ -2910,6 +2911,46 @@ void *__kmalloc_node(size_t size, gfp_t
63812 EXPORT_SYMBOL(__kmalloc_node);
63813 #endif
63814
63815 +void check_object_size(const void *ptr, unsigned long n, bool to)
63816 +{
63817 +
63818 +#ifdef CONFIG_PAX_USERCOPY
63819 + struct page *page;
63820 + struct kmem_cache *s = NULL;
63821 + unsigned long offset;
63822 +
63823 + if (!n)
63824 + return;
63825 +
63826 + if (ZERO_OR_NULL_PTR(ptr))
63827 + goto report;
63828 +
63829 + if (!virt_addr_valid(ptr))
63830 + return;
63831 +
63832 + page = virt_to_head_page(ptr);
63833 +
63834 + if (!PageSlab(page)) {
63835 + if (object_is_on_stack(ptr, n) == -1)
63836 + goto report;
63837 + return;
63838 + }
63839 +
63840 + s = page->slab;
63841 + if (!(s->flags & SLAB_USERCOPY))
63842 + goto report;
63843 +
63844 + offset = (ptr - page_address(page)) % s->size;
63845 + if (offset <= s->objsize && n <= s->objsize - offset)
63846 + return;
63847 +
63848 +report:
63849 + pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
63850 +#endif
63851 +
63852 +}
63853 +EXPORT_SYMBOL(check_object_size);
63854 +
63855 size_t ksize(const void *object)
63856 {
63857 struct page *page;
63858 @@ -3154,7 +3195,7 @@ static void __init kmem_cache_bootstrap_
63859 int node;
63860
63861 list_add(&s->list, &slab_caches);
63862 - s->refcount = -1;
63863 + atomic_set(&s->refcount, -1);
63864
63865 for_each_node_state(node, N_NORMAL_MEMORY) {
63866 struct kmem_cache_node *n = get_node(s, node);
63867 @@ -3271,17 +3312,17 @@ void __init kmem_cache_init(void)
63868
63869 /* Caches that are not of the two-to-the-power-of size */
63870 if (KMALLOC_MIN_SIZE <= 32) {
63871 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
63872 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
63873 caches++;
63874 }
63875
63876 if (KMALLOC_MIN_SIZE <= 64) {
63877 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
63878 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
63879 caches++;
63880 }
63881
63882 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
63883 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
63884 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
63885 caches++;
63886 }
63887
63888 @@ -3349,7 +3390,7 @@ static int slab_unmergeable(struct kmem_
63889 /*
63890 * We may have set a slab to be unmergeable during bootstrap.
63891 */
63892 - if (s->refcount < 0)
63893 + if (atomic_read(&s->refcount) < 0)
63894 return 1;
63895
63896 return 0;
63897 @@ -3408,7 +3449,7 @@ struct kmem_cache *kmem_cache_create(con
63898 down_write(&slub_lock);
63899 s = find_mergeable(size, align, flags, name, ctor);
63900 if (s) {
63901 - s->refcount++;
63902 + atomic_inc(&s->refcount);
63903 /*
63904 * Adjust the object sizes so that we clear
63905 * the complete object on kzalloc.
63906 @@ -3417,7 +3458,7 @@ struct kmem_cache *kmem_cache_create(con
63907 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
63908
63909 if (sysfs_slab_alias(s, name)) {
63910 - s->refcount--;
63911 + atomic_dec(&s->refcount);
63912 goto err;
63913 }
63914 up_write(&slub_lock);
63915 @@ -4150,7 +4191,7 @@ SLAB_ATTR_RO(ctor);
63916
63917 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
63918 {
63919 - return sprintf(buf, "%d\n", s->refcount - 1);
63920 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
63921 }
63922 SLAB_ATTR_RO(aliases);
63923
63924 @@ -4894,7 +4935,13 @@ static const struct file_operations proc
63925
63926 static int __init slab_proc_init(void)
63927 {
63928 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
63929 + mode_t gr_mode = S_IRUGO;
63930 +
63931 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
63932 + gr_mode = S_IRUSR;
63933 +#endif
63934 +
63935 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
63936 return 0;
63937 }
63938 module_init(slab_proc_init);
63939 diff -urNp linux-3.0.3/mm/swap.c linux-3.0.3/mm/swap.c
63940 --- linux-3.0.3/mm/swap.c 2011-07-21 22:17:23.000000000 -0400
63941 +++ linux-3.0.3/mm/swap.c 2011-08-23 21:47:56.000000000 -0400
63942 @@ -31,6 +31,7 @@
63943 #include <linux/backing-dev.h>
63944 #include <linux/memcontrol.h>
63945 #include <linux/gfp.h>
63946 +#include <linux/hugetlb.h>
63947
63948 #include "internal.h"
63949
63950 @@ -71,6 +72,8 @@ static void __put_compound_page(struct p
63951
63952 __page_cache_release(page);
63953 dtor = get_compound_page_dtor(page);
63954 + if (!PageHuge(page))
63955 + BUG_ON(dtor != free_compound_page);
63956 (*dtor)(page);
63957 }
63958
63959 diff -urNp linux-3.0.3/mm/swapfile.c linux-3.0.3/mm/swapfile.c
63960 --- linux-3.0.3/mm/swapfile.c 2011-07-21 22:17:23.000000000 -0400
63961 +++ linux-3.0.3/mm/swapfile.c 2011-08-23 21:47:56.000000000 -0400
63962 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(swapon_mutex);
63963
63964 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
63965 /* Activity counter to indicate that a swapon or swapoff has occurred */
63966 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
63967 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
63968
63969 static inline unsigned char swap_count(unsigned char ent)
63970 {
63971 @@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
63972 }
63973 filp_close(swap_file, NULL);
63974 err = 0;
63975 - atomic_inc(&proc_poll_event);
63976 + atomic_inc_unchecked(&proc_poll_event);
63977 wake_up_interruptible(&proc_poll_wait);
63978
63979 out_dput:
63980 @@ -1692,8 +1692,8 @@ static unsigned swaps_poll(struct file *
63981
63982 poll_wait(file, &proc_poll_wait, wait);
63983
63984 - if (s->event != atomic_read(&proc_poll_event)) {
63985 - s->event = atomic_read(&proc_poll_event);
63986 + if (s->event != atomic_read_unchecked(&proc_poll_event)) {
63987 + s->event = atomic_read_unchecked(&proc_poll_event);
63988 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
63989 }
63990
63991 @@ -1799,7 +1799,7 @@ static int swaps_open(struct inode *inod
63992 }
63993
63994 s->seq.private = s;
63995 - s->event = atomic_read(&proc_poll_event);
63996 + s->event = atomic_read_unchecked(&proc_poll_event);
63997 return ret;
63998 }
63999
64000 @@ -2133,7 +2133,7 @@ SYSCALL_DEFINE2(swapon, const char __use
64001 (p->flags & SWP_DISCARDABLE) ? "D" : "");
64002
64003 mutex_unlock(&swapon_mutex);
64004 - atomic_inc(&proc_poll_event);
64005 + atomic_inc_unchecked(&proc_poll_event);
64006 wake_up_interruptible(&proc_poll_wait);
64007
64008 if (S_ISREG(inode->i_mode))
64009 diff -urNp linux-3.0.3/mm/util.c linux-3.0.3/mm/util.c
64010 --- linux-3.0.3/mm/util.c 2011-07-21 22:17:23.000000000 -0400
64011 +++ linux-3.0.3/mm/util.c 2011-08-23 21:47:56.000000000 -0400
64012 @@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
64013 * allocated buffer. Use this if you don't want to free the buffer immediately
64014 * like, for example, with RCU.
64015 */
64016 +#undef __krealloc
64017 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
64018 {
64019 void *ret;
64020 @@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
64021 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
64022 * %NULL pointer, the object pointed to is freed.
64023 */
64024 +#undef krealloc
64025 void *krealloc(const void *p, size_t new_size, gfp_t flags)
64026 {
64027 void *ret;
64028 @@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *m
64029 void arch_pick_mmap_layout(struct mm_struct *mm)
64030 {
64031 mm->mmap_base = TASK_UNMAPPED_BASE;
64032 +
64033 +#ifdef CONFIG_PAX_RANDMMAP
64034 + if (mm->pax_flags & MF_PAX_RANDMMAP)
64035 + mm->mmap_base += mm->delta_mmap;
64036 +#endif
64037 +
64038 mm->get_unmapped_area = arch_get_unmapped_area;
64039 mm->unmap_area = arch_unmap_area;
64040 }
64041 diff -urNp linux-3.0.3/mm/vmalloc.c linux-3.0.3/mm/vmalloc.c
64042 --- linux-3.0.3/mm/vmalloc.c 2011-08-23 21:44:40.000000000 -0400
64043 +++ linux-3.0.3/mm/vmalloc.c 2011-08-23 21:47:56.000000000 -0400
64044 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
64045
64046 pte = pte_offset_kernel(pmd, addr);
64047 do {
64048 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
64049 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
64050 +
64051 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64052 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
64053 + BUG_ON(!pte_exec(*pte));
64054 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
64055 + continue;
64056 + }
64057 +#endif
64058 +
64059 + {
64060 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
64061 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
64062 + }
64063 } while (pte++, addr += PAGE_SIZE, addr != end);
64064 }
64065
64066 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
64067 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
64068 {
64069 pte_t *pte;
64070 + int ret = -ENOMEM;
64071
64072 /*
64073 * nr is a running index into the array which helps higher level
64074 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
64075 pte = pte_alloc_kernel(pmd, addr);
64076 if (!pte)
64077 return -ENOMEM;
64078 +
64079 + pax_open_kernel();
64080 do {
64081 struct page *page = pages[*nr];
64082
64083 - if (WARN_ON(!pte_none(*pte)))
64084 - return -EBUSY;
64085 - if (WARN_ON(!page))
64086 - return -ENOMEM;
64087 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64088 + if (pgprot_val(prot) & _PAGE_NX)
64089 +#endif
64090 +
64091 + if (WARN_ON(!pte_none(*pte))) {
64092 + ret = -EBUSY;
64093 + goto out;
64094 + }
64095 + if (WARN_ON(!page)) {
64096 + ret = -ENOMEM;
64097 + goto out;
64098 + }
64099 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
64100 (*nr)++;
64101 } while (pte++, addr += PAGE_SIZE, addr != end);
64102 - return 0;
64103 + ret = 0;
64104 +out:
64105 + pax_close_kernel();
64106 + return ret;
64107 }
64108
64109 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
64110 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
64111 * and fall back on vmalloc() if that fails. Others
64112 * just put it in the vmalloc space.
64113 */
64114 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
64115 +#ifdef CONFIG_MODULES
64116 +#ifdef MODULES_VADDR
64117 unsigned long addr = (unsigned long)x;
64118 if (addr >= MODULES_VADDR && addr < MODULES_END)
64119 return 1;
64120 #endif
64121 +
64122 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64123 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
64124 + return 1;
64125 +#endif
64126 +
64127 +#endif
64128 +
64129 return is_vmalloc_addr(x);
64130 }
64131
64132 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
64133
64134 if (!pgd_none(*pgd)) {
64135 pud_t *pud = pud_offset(pgd, addr);
64136 +#ifdef CONFIG_X86
64137 + if (!pud_large(*pud))
64138 +#endif
64139 if (!pud_none(*pud)) {
64140 pmd_t *pmd = pmd_offset(pud, addr);
64141 +#ifdef CONFIG_X86
64142 + if (!pmd_large(*pmd))
64143 +#endif
64144 if (!pmd_none(*pmd)) {
64145 pte_t *ptep, pte;
64146
64147 @@ -1297,6 +1337,16 @@ static struct vm_struct *__get_vm_area_n
64148 struct vm_struct *area;
64149
64150 BUG_ON(in_interrupt());
64151 +
64152 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64153 + if (flags & VM_KERNEXEC) {
64154 + if (start != VMALLOC_START || end != VMALLOC_END)
64155 + return NULL;
64156 + start = (unsigned long)MODULES_EXEC_VADDR;
64157 + end = (unsigned long)MODULES_EXEC_END;
64158 + }
64159 +#endif
64160 +
64161 if (flags & VM_IOREMAP) {
64162 int bit = fls(size);
64163
64164 @@ -1515,6 +1565,11 @@ void *vmap(struct page **pages, unsigned
64165 if (count > totalram_pages)
64166 return NULL;
64167
64168 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64169 + if (!(pgprot_val(prot) & _PAGE_NX))
64170 + flags |= VM_KERNEXEC;
64171 +#endif
64172 +
64173 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
64174 __builtin_return_address(0));
64175 if (!area)
64176 @@ -1616,6 +1671,13 @@ void *__vmalloc_node_range(unsigned long
64177 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
64178 return NULL;
64179
64180 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64181 + if (!(pgprot_val(prot) & _PAGE_NX))
64182 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
64183 + node, gfp_mask, caller);
64184 + else
64185 +#endif
64186 +
64187 area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
64188 gfp_mask, caller);
64189
64190 @@ -1655,6 +1717,7 @@ static void *__vmalloc_node(unsigned lon
64191 gfp_mask, prot, node, caller);
64192 }
64193
64194 +#undef __vmalloc
64195 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
64196 {
64197 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
64198 @@ -1678,6 +1741,7 @@ static inline void *__vmalloc_node_flags
64199 * For tight control over page level allocator and protection flags
64200 * use __vmalloc() instead.
64201 */
64202 +#undef vmalloc
64203 void *vmalloc(unsigned long size)
64204 {
64205 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
64206 @@ -1694,6 +1758,7 @@ EXPORT_SYMBOL(vmalloc);
64207 * For tight control over page level allocator and protection flags
64208 * use __vmalloc() instead.
64209 */
64210 +#undef vzalloc
64211 void *vzalloc(unsigned long size)
64212 {
64213 return __vmalloc_node_flags(size, -1,
64214 @@ -1708,6 +1773,7 @@ EXPORT_SYMBOL(vzalloc);
64215 * The resulting memory area is zeroed so it can be mapped to userspace
64216 * without leaking data.
64217 */
64218 +#undef vmalloc_user
64219 void *vmalloc_user(unsigned long size)
64220 {
64221 struct vm_struct *area;
64222 @@ -1735,6 +1801,7 @@ EXPORT_SYMBOL(vmalloc_user);
64223 * For tight control over page level allocator and protection flags
64224 * use __vmalloc() instead.
64225 */
64226 +#undef vmalloc_node
64227 void *vmalloc_node(unsigned long size, int node)
64228 {
64229 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
64230 @@ -1754,6 +1821,7 @@ EXPORT_SYMBOL(vmalloc_node);
64231 * For tight control over page level allocator and protection flags
64232 * use __vmalloc_node() instead.
64233 */
64234 +#undef vzalloc_node
64235 void *vzalloc_node(unsigned long size, int node)
64236 {
64237 return __vmalloc_node_flags(size, node,
64238 @@ -1776,10 +1844,10 @@ EXPORT_SYMBOL(vzalloc_node);
64239 * For tight control over page level allocator and protection flags
64240 * use __vmalloc() instead.
64241 */
64242 -
64243 +#undef vmalloc_exec
64244 void *vmalloc_exec(unsigned long size)
64245 {
64246 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
64247 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
64248 -1, __builtin_return_address(0));
64249 }
64250
64251 @@ -1798,6 +1866,7 @@ void *vmalloc_exec(unsigned long size)
64252 * Allocate enough 32bit PA addressable pages to cover @size from the
64253 * page level allocator and map them into contiguous kernel virtual space.
64254 */
64255 +#undef vmalloc_32
64256 void *vmalloc_32(unsigned long size)
64257 {
64258 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
64259 @@ -1812,6 +1881,7 @@ EXPORT_SYMBOL(vmalloc_32);
64260 * The resulting memory area is 32bit addressable and zeroed so it can be
64261 * mapped to userspace without leaking data.
64262 */
64263 +#undef vmalloc_32_user
64264 void *vmalloc_32_user(unsigned long size)
64265 {
64266 struct vm_struct *area;
64267 @@ -2074,6 +2144,8 @@ int remap_vmalloc_range(struct vm_area_s
64268 unsigned long uaddr = vma->vm_start;
64269 unsigned long usize = vma->vm_end - vma->vm_start;
64270
64271 + BUG_ON(vma->vm_mirror);
64272 +
64273 if ((PAGE_SIZE-1) & (unsigned long)addr)
64274 return -EINVAL;
64275
64276 diff -urNp linux-3.0.3/mm/vmstat.c linux-3.0.3/mm/vmstat.c
64277 --- linux-3.0.3/mm/vmstat.c 2011-07-21 22:17:23.000000000 -0400
64278 +++ linux-3.0.3/mm/vmstat.c 2011-08-23 21:48:14.000000000 -0400
64279 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
64280 *
64281 * vm_stat contains the global counters
64282 */
64283 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64284 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64285 EXPORT_SYMBOL(vm_stat);
64286
64287 #ifdef CONFIG_SMP
64288 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
64289 v = p->vm_stat_diff[i];
64290 p->vm_stat_diff[i] = 0;
64291 local_irq_restore(flags);
64292 - atomic_long_add(v, &zone->vm_stat[i]);
64293 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
64294 global_diff[i] += v;
64295 #ifdef CONFIG_NUMA
64296 /* 3 seconds idle till flush */
64297 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
64298
64299 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
64300 if (global_diff[i])
64301 - atomic_long_add(global_diff[i], &vm_stat[i]);
64302 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
64303 }
64304
64305 #endif
64306 @@ -1207,10 +1207,20 @@ static int __init setup_vmstat(void)
64307 start_cpu_timer(cpu);
64308 #endif
64309 #ifdef CONFIG_PROC_FS
64310 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
64311 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
64312 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
64313 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
64314 + {
64315 + mode_t gr_mode = S_IRUGO;
64316 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
64317 + gr_mode = S_IRUSR;
64318 +#endif
64319 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
64320 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
64321 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
64322 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
64323 +#else
64324 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
64325 +#endif
64326 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
64327 + }
64328 #endif
64329 return 0;
64330 }
64331 diff -urNp linux-3.0.3/net/8021q/vlan.c linux-3.0.3/net/8021q/vlan.c
64332 --- linux-3.0.3/net/8021q/vlan.c 2011-07-21 22:17:23.000000000 -0400
64333 +++ linux-3.0.3/net/8021q/vlan.c 2011-08-23 21:47:56.000000000 -0400
64334 @@ -591,8 +591,7 @@ static int vlan_ioctl_handler(struct net
64335 err = -EPERM;
64336 if (!capable(CAP_NET_ADMIN))
64337 break;
64338 - if ((args.u.name_type >= 0) &&
64339 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
64340 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
64341 struct vlan_net *vn;
64342
64343 vn = net_generic(net, vlan_net_id);
64344 diff -urNp linux-3.0.3/net/atm/atm_misc.c linux-3.0.3/net/atm/atm_misc.c
64345 --- linux-3.0.3/net/atm/atm_misc.c 2011-07-21 22:17:23.000000000 -0400
64346 +++ linux-3.0.3/net/atm/atm_misc.c 2011-08-23 21:47:56.000000000 -0400
64347 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
64348 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
64349 return 1;
64350 atm_return(vcc, truesize);
64351 - atomic_inc(&vcc->stats->rx_drop);
64352 + atomic_inc_unchecked(&vcc->stats->rx_drop);
64353 return 0;
64354 }
64355 EXPORT_SYMBOL(atm_charge);
64356 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
64357 }
64358 }
64359 atm_return(vcc, guess);
64360 - atomic_inc(&vcc->stats->rx_drop);
64361 + atomic_inc_unchecked(&vcc->stats->rx_drop);
64362 return NULL;
64363 }
64364 EXPORT_SYMBOL(atm_alloc_charge);
64365 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
64366
64367 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
64368 {
64369 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
64370 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
64371 __SONET_ITEMS
64372 #undef __HANDLE_ITEM
64373 }
64374 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
64375
64376 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
64377 {
64378 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
64379 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
64380 __SONET_ITEMS
64381 #undef __HANDLE_ITEM
64382 }
64383 diff -urNp linux-3.0.3/net/atm/lec.h linux-3.0.3/net/atm/lec.h
64384 --- linux-3.0.3/net/atm/lec.h 2011-07-21 22:17:23.000000000 -0400
64385 +++ linux-3.0.3/net/atm/lec.h 2011-08-23 21:47:56.000000000 -0400
64386 @@ -48,7 +48,7 @@ struct lane2_ops {
64387 const u8 *tlvs, u32 sizeoftlvs);
64388 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
64389 const u8 *tlvs, u32 sizeoftlvs);
64390 -};
64391 +} __no_const;
64392
64393 /*
64394 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
64395 diff -urNp linux-3.0.3/net/atm/mpc.h linux-3.0.3/net/atm/mpc.h
64396 --- linux-3.0.3/net/atm/mpc.h 2011-07-21 22:17:23.000000000 -0400
64397 +++ linux-3.0.3/net/atm/mpc.h 2011-08-23 21:47:56.000000000 -0400
64398 @@ -33,7 +33,7 @@ struct mpoa_client {
64399 struct mpc_parameters parameters; /* parameters for this client */
64400
64401 const struct net_device_ops *old_ops;
64402 - struct net_device_ops new_ops;
64403 + net_device_ops_no_const new_ops;
64404 };
64405
64406
64407 diff -urNp linux-3.0.3/net/atm/mpoa_caches.c linux-3.0.3/net/atm/mpoa_caches.c
64408 --- linux-3.0.3/net/atm/mpoa_caches.c 2011-07-21 22:17:23.000000000 -0400
64409 +++ linux-3.0.3/net/atm/mpoa_caches.c 2011-08-23 21:48:14.000000000 -0400
64410 @@ -255,6 +255,8 @@ static void check_resolving_entries(stru
64411 struct timeval now;
64412 struct k_message msg;
64413
64414 + pax_track_stack();
64415 +
64416 do_gettimeofday(&now);
64417
64418 read_lock_bh(&client->ingress_lock);
64419 diff -urNp linux-3.0.3/net/atm/proc.c linux-3.0.3/net/atm/proc.c
64420 --- linux-3.0.3/net/atm/proc.c 2011-07-21 22:17:23.000000000 -0400
64421 +++ linux-3.0.3/net/atm/proc.c 2011-08-23 21:47:56.000000000 -0400
64422 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
64423 const struct k_atm_aal_stats *stats)
64424 {
64425 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
64426 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
64427 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
64428 - atomic_read(&stats->rx_drop));
64429 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
64430 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
64431 + atomic_read_unchecked(&stats->rx_drop));
64432 }
64433
64434 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
64435 diff -urNp linux-3.0.3/net/atm/resources.c linux-3.0.3/net/atm/resources.c
64436 --- linux-3.0.3/net/atm/resources.c 2011-07-21 22:17:23.000000000 -0400
64437 +++ linux-3.0.3/net/atm/resources.c 2011-08-23 21:47:56.000000000 -0400
64438 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
64439 static void copy_aal_stats(struct k_atm_aal_stats *from,
64440 struct atm_aal_stats *to)
64441 {
64442 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
64443 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
64444 __AAL_STAT_ITEMS
64445 #undef __HANDLE_ITEM
64446 }
64447 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
64448 static void subtract_aal_stats(struct k_atm_aal_stats *from,
64449 struct atm_aal_stats *to)
64450 {
64451 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
64452 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
64453 __AAL_STAT_ITEMS
64454 #undef __HANDLE_ITEM
64455 }
64456 diff -urNp linux-3.0.3/net/batman-adv/hard-interface.c linux-3.0.3/net/batman-adv/hard-interface.c
64457 --- linux-3.0.3/net/batman-adv/hard-interface.c 2011-07-21 22:17:23.000000000 -0400
64458 +++ linux-3.0.3/net/batman-adv/hard-interface.c 2011-08-23 21:47:56.000000000 -0400
64459 @@ -351,8 +351,8 @@ int hardif_enable_interface(struct hard_
64460 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
64461 dev_add_pack(&hard_iface->batman_adv_ptype);
64462
64463 - atomic_set(&hard_iface->seqno, 1);
64464 - atomic_set(&hard_iface->frag_seqno, 1);
64465 + atomic_set_unchecked(&hard_iface->seqno, 1);
64466 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
64467 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
64468 hard_iface->net_dev->name);
64469
64470 diff -urNp linux-3.0.3/net/batman-adv/routing.c linux-3.0.3/net/batman-adv/routing.c
64471 --- linux-3.0.3/net/batman-adv/routing.c 2011-07-21 22:17:23.000000000 -0400
64472 +++ linux-3.0.3/net/batman-adv/routing.c 2011-08-23 21:47:56.000000000 -0400
64473 @@ -627,7 +627,7 @@ void receive_bat_packet(struct ethhdr *e
64474 return;
64475
64476 /* could be changed by schedule_own_packet() */
64477 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
64478 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
64479
64480 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
64481
64482 diff -urNp linux-3.0.3/net/batman-adv/send.c linux-3.0.3/net/batman-adv/send.c
64483 --- linux-3.0.3/net/batman-adv/send.c 2011-07-21 22:17:23.000000000 -0400
64484 +++ linux-3.0.3/net/batman-adv/send.c 2011-08-23 21:47:56.000000000 -0400
64485 @@ -279,7 +279,7 @@ void schedule_own_packet(struct hard_ifa
64486
64487 /* change sequence number to network order */
64488 batman_packet->seqno =
64489 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
64490 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
64491
64492 if (vis_server == VIS_TYPE_SERVER_SYNC)
64493 batman_packet->flags |= VIS_SERVER;
64494 @@ -293,7 +293,7 @@ void schedule_own_packet(struct hard_ifa
64495 else
64496 batman_packet->gw_flags = 0;
64497
64498 - atomic_inc(&hard_iface->seqno);
64499 + atomic_inc_unchecked(&hard_iface->seqno);
64500
64501 slide_own_bcast_window(hard_iface);
64502 send_time = own_send_time(bat_priv);
64503 diff -urNp linux-3.0.3/net/batman-adv/soft-interface.c linux-3.0.3/net/batman-adv/soft-interface.c
64504 --- linux-3.0.3/net/batman-adv/soft-interface.c 2011-07-21 22:17:23.000000000 -0400
64505 +++ linux-3.0.3/net/batman-adv/soft-interface.c 2011-08-23 21:47:56.000000000 -0400
64506 @@ -628,7 +628,7 @@ int interface_tx(struct sk_buff *skb, st
64507
64508 /* set broadcast sequence number */
64509 bcast_packet->seqno =
64510 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
64511 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
64512
64513 add_bcast_packet_to_list(bat_priv, skb);
64514
64515 @@ -830,7 +830,7 @@ struct net_device *softif_create(char *n
64516 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
64517
64518 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
64519 - atomic_set(&bat_priv->bcast_seqno, 1);
64520 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
64521 atomic_set(&bat_priv->tt_local_changed, 0);
64522
64523 bat_priv->primary_if = NULL;
64524 diff -urNp linux-3.0.3/net/batman-adv/types.h linux-3.0.3/net/batman-adv/types.h
64525 --- linux-3.0.3/net/batman-adv/types.h 2011-07-21 22:17:23.000000000 -0400
64526 +++ linux-3.0.3/net/batman-adv/types.h 2011-08-23 21:47:56.000000000 -0400
64527 @@ -38,8 +38,8 @@ struct hard_iface {
64528 int16_t if_num;
64529 char if_status;
64530 struct net_device *net_dev;
64531 - atomic_t seqno;
64532 - atomic_t frag_seqno;
64533 + atomic_unchecked_t seqno;
64534 + atomic_unchecked_t frag_seqno;
64535 unsigned char *packet_buff;
64536 int packet_len;
64537 struct kobject *hardif_obj;
64538 @@ -142,7 +142,7 @@ struct bat_priv {
64539 atomic_t orig_interval; /* uint */
64540 atomic_t hop_penalty; /* uint */
64541 atomic_t log_level; /* uint */
64542 - atomic_t bcast_seqno;
64543 + atomic_unchecked_t bcast_seqno;
64544 atomic_t bcast_queue_left;
64545 atomic_t batman_queue_left;
64546 char num_ifaces;
64547 diff -urNp linux-3.0.3/net/batman-adv/unicast.c linux-3.0.3/net/batman-adv/unicast.c
64548 --- linux-3.0.3/net/batman-adv/unicast.c 2011-07-21 22:17:23.000000000 -0400
64549 +++ linux-3.0.3/net/batman-adv/unicast.c 2011-08-23 21:47:56.000000000 -0400
64550 @@ -265,7 +265,7 @@ int frag_send_skb(struct sk_buff *skb, s
64551 frag1->flags = UNI_FRAG_HEAD | large_tail;
64552 frag2->flags = large_tail;
64553
64554 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
64555 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
64556 frag1->seqno = htons(seqno - 1);
64557 frag2->seqno = htons(seqno);
64558
64559 diff -urNp linux-3.0.3/net/bridge/br_multicast.c linux-3.0.3/net/bridge/br_multicast.c
64560 --- linux-3.0.3/net/bridge/br_multicast.c 2011-07-21 22:17:23.000000000 -0400
64561 +++ linux-3.0.3/net/bridge/br_multicast.c 2011-08-23 21:47:56.000000000 -0400
64562 @@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct
64563 nexthdr = ip6h->nexthdr;
64564 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
64565
64566 - if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
64567 + if (nexthdr != IPPROTO_ICMPV6)
64568 return 0;
64569
64570 /* Okay, we found ICMPv6 header */
64571 diff -urNp linux-3.0.3/net/bridge/netfilter/ebtables.c linux-3.0.3/net/bridge/netfilter/ebtables.c
64572 --- linux-3.0.3/net/bridge/netfilter/ebtables.c 2011-07-21 22:17:23.000000000 -0400
64573 +++ linux-3.0.3/net/bridge/netfilter/ebtables.c 2011-08-23 21:48:14.000000000 -0400
64574 @@ -1512,7 +1512,7 @@ static int do_ebt_get_ctl(struct sock *s
64575 tmp.valid_hooks = t->table->valid_hooks;
64576 }
64577 mutex_unlock(&ebt_mutex);
64578 - if (copy_to_user(user, &tmp, *len) != 0){
64579 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
64580 BUGPRINT("c2u Didn't work\n");
64581 ret = -EFAULT;
64582 break;
64583 @@ -1780,6 +1780,8 @@ static int compat_copy_everything_to_use
64584 int ret;
64585 void __user *pos;
64586
64587 + pax_track_stack();
64588 +
64589 memset(&tinfo, 0, sizeof(tinfo));
64590
64591 if (cmd == EBT_SO_GET_ENTRIES) {
64592 diff -urNp linux-3.0.3/net/caif/caif_socket.c linux-3.0.3/net/caif/caif_socket.c
64593 --- linux-3.0.3/net/caif/caif_socket.c 2011-07-21 22:17:23.000000000 -0400
64594 +++ linux-3.0.3/net/caif/caif_socket.c 2011-08-23 21:47:56.000000000 -0400
64595 @@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
64596 #ifdef CONFIG_DEBUG_FS
64597 struct debug_fs_counter {
64598 atomic_t caif_nr_socks;
64599 - atomic_t caif_sock_create;
64600 - atomic_t num_connect_req;
64601 - atomic_t num_connect_resp;
64602 - atomic_t num_connect_fail_resp;
64603 - atomic_t num_disconnect;
64604 - atomic_t num_remote_shutdown_ind;
64605 - atomic_t num_tx_flow_off_ind;
64606 - atomic_t num_tx_flow_on_ind;
64607 - atomic_t num_rx_flow_off;
64608 - atomic_t num_rx_flow_on;
64609 + atomic_unchecked_t caif_sock_create;
64610 + atomic_unchecked_t num_connect_req;
64611 + atomic_unchecked_t num_connect_resp;
64612 + atomic_unchecked_t num_connect_fail_resp;
64613 + atomic_unchecked_t num_disconnect;
64614 + atomic_unchecked_t num_remote_shutdown_ind;
64615 + atomic_unchecked_t num_tx_flow_off_ind;
64616 + atomic_unchecked_t num_tx_flow_on_ind;
64617 + atomic_unchecked_t num_rx_flow_off;
64618 + atomic_unchecked_t num_rx_flow_on;
64619 };
64620 static struct debug_fs_counter cnt;
64621 #define dbfs_atomic_inc(v) atomic_inc_return(v)
64622 +#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
64623 #define dbfs_atomic_dec(v) atomic_dec_return(v)
64624 #else
64625 #define dbfs_atomic_inc(v) 0
64626 @@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct soc
64627 atomic_read(&cf_sk->sk.sk_rmem_alloc),
64628 sk_rcvbuf_lowwater(cf_sk));
64629 set_rx_flow_off(cf_sk);
64630 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
64631 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
64632 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
64633 }
64634
64635 @@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct soc
64636 set_rx_flow_off(cf_sk);
64637 if (net_ratelimit())
64638 pr_debug("sending flow OFF due to rmem_schedule\n");
64639 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
64640 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
64641 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
64642 }
64643 skb->dev = NULL;
64644 @@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer
64645 switch (flow) {
64646 case CAIF_CTRLCMD_FLOW_ON_IND:
64647 /* OK from modem to start sending again */
64648 - dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
64649 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
64650 set_tx_flow_on(cf_sk);
64651 cf_sk->sk.sk_state_change(&cf_sk->sk);
64652 break;
64653
64654 case CAIF_CTRLCMD_FLOW_OFF_IND:
64655 /* Modem asks us to shut up */
64656 - dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
64657 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
64658 set_tx_flow_off(cf_sk);
64659 cf_sk->sk.sk_state_change(&cf_sk->sk);
64660 break;
64661 @@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer
64662 /* We're now connected */
64663 caif_client_register_refcnt(&cf_sk->layer,
64664 cfsk_hold, cfsk_put);
64665 - dbfs_atomic_inc(&cnt.num_connect_resp);
64666 + dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
64667 cf_sk->sk.sk_state = CAIF_CONNECTED;
64668 set_tx_flow_on(cf_sk);
64669 cf_sk->sk.sk_state_change(&cf_sk->sk);
64670 @@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer
64671
64672 case CAIF_CTRLCMD_INIT_FAIL_RSP:
64673 /* Connect request failed */
64674 - dbfs_atomic_inc(&cnt.num_connect_fail_resp);
64675 + dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
64676 cf_sk->sk.sk_err = ECONNREFUSED;
64677 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
64678 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
64679 @@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer
64680
64681 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
64682 /* Modem has closed this connection, or device is down. */
64683 - dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
64684 + dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
64685 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
64686 cf_sk->sk.sk_err = ECONNRESET;
64687 set_rx_flow_on(cf_sk);
64688 @@ -297,7 +298,7 @@ static void caif_check_flow_release(stru
64689 return;
64690
64691 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
64692 - dbfs_atomic_inc(&cnt.num_rx_flow_on);
64693 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
64694 set_rx_flow_on(cf_sk);
64695 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
64696 }
64697 @@ -854,7 +855,7 @@ static int caif_connect(struct socket *s
64698 /*ifindex = id of the interface.*/
64699 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
64700
64701 - dbfs_atomic_inc(&cnt.num_connect_req);
64702 + dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
64703 cf_sk->layer.receive = caif_sktrecv_cb;
64704
64705 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
64706 @@ -943,7 +944,7 @@ static int caif_release(struct socket *s
64707 spin_unlock_bh(&sk->sk_receive_queue.lock);
64708 sock->sk = NULL;
64709
64710 - dbfs_atomic_inc(&cnt.num_disconnect);
64711 + dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
64712
64713 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
64714 if (cf_sk->debugfs_socket_dir != NULL)
64715 @@ -1122,7 +1123,7 @@ static int caif_create(struct net *net,
64716 cf_sk->conn_req.protocol = protocol;
64717 /* Increase the number of sockets created. */
64718 dbfs_atomic_inc(&cnt.caif_nr_socks);
64719 - num = dbfs_atomic_inc(&cnt.caif_sock_create);
64720 + num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
64721 #ifdef CONFIG_DEBUG_FS
64722 if (!IS_ERR(debugfsdir)) {
64723
64724 diff -urNp linux-3.0.3/net/caif/cfctrl.c linux-3.0.3/net/caif/cfctrl.c
64725 --- linux-3.0.3/net/caif/cfctrl.c 2011-07-21 22:17:23.000000000 -0400
64726 +++ linux-3.0.3/net/caif/cfctrl.c 2011-08-23 21:48:14.000000000 -0400
64727 @@ -9,6 +9,7 @@
64728 #include <linux/stddef.h>
64729 #include <linux/spinlock.h>
64730 #include <linux/slab.h>
64731 +#include <linux/sched.h>
64732 #include <net/caif/caif_layer.h>
64733 #include <net/caif/cfpkt.h>
64734 #include <net/caif/cfctrl.h>
64735 @@ -45,8 +46,8 @@ struct cflayer *cfctrl_create(void)
64736 dev_info.id = 0xff;
64737 memset(this, 0, sizeof(*this));
64738 cfsrvl_init(&this->serv, 0, &dev_info, false);
64739 - atomic_set(&this->req_seq_no, 1);
64740 - atomic_set(&this->rsp_seq_no, 1);
64741 + atomic_set_unchecked(&this->req_seq_no, 1);
64742 + atomic_set_unchecked(&this->rsp_seq_no, 1);
64743 this->serv.layer.receive = cfctrl_recv;
64744 sprintf(this->serv.layer.name, "ctrl");
64745 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
64746 @@ -132,8 +133,8 @@ static void cfctrl_insert_req(struct cfc
64747 struct cfctrl_request_info *req)
64748 {
64749 spin_lock_bh(&ctrl->info_list_lock);
64750 - atomic_inc(&ctrl->req_seq_no);
64751 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
64752 + atomic_inc_unchecked(&ctrl->req_seq_no);
64753 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
64754 list_add_tail(&req->list, &ctrl->list);
64755 spin_unlock_bh(&ctrl->info_list_lock);
64756 }
64757 @@ -151,7 +152,7 @@ static struct cfctrl_request_info *cfctr
64758 if (p != first)
64759 pr_warn("Requests are not received in order\n");
64760
64761 - atomic_set(&ctrl->rsp_seq_no,
64762 + atomic_set_unchecked(&ctrl->rsp_seq_no,
64763 p->sequence_no);
64764 list_del(&p->list);
64765 goto out;
64766 @@ -364,6 +365,7 @@ static int cfctrl_recv(struct cflayer *l
64767 struct cfctrl *cfctrl = container_obj(layer);
64768 struct cfctrl_request_info rsp, *req;
64769
64770 + pax_track_stack();
64771
64772 cfpkt_extr_head(pkt, &cmdrsp, 1);
64773 cmd = cmdrsp & CFCTRL_CMD_MASK;
64774 diff -urNp linux-3.0.3/net/core/datagram.c linux-3.0.3/net/core/datagram.c
64775 --- linux-3.0.3/net/core/datagram.c 2011-07-21 22:17:23.000000000 -0400
64776 +++ linux-3.0.3/net/core/datagram.c 2011-08-23 21:47:56.000000000 -0400
64777 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
64778 }
64779
64780 kfree_skb(skb);
64781 - atomic_inc(&sk->sk_drops);
64782 + atomic_inc_unchecked(&sk->sk_drops);
64783 sk_mem_reclaim_partial(sk);
64784
64785 return err;
64786 diff -urNp linux-3.0.3/net/core/dev.c linux-3.0.3/net/core/dev.c
64787 --- linux-3.0.3/net/core/dev.c 2011-07-21 22:17:23.000000000 -0400
64788 +++ linux-3.0.3/net/core/dev.c 2011-08-23 21:48:14.000000000 -0400
64789 @@ -1125,10 +1125,14 @@ void dev_load(struct net *net, const cha
64790 if (no_module && capable(CAP_NET_ADMIN))
64791 no_module = request_module("netdev-%s", name);
64792 if (no_module && capable(CAP_SYS_MODULE)) {
64793 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64794 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
64795 +#else
64796 if (!request_module("%s", name))
64797 pr_err("Loading kernel module for a network device "
64798 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
64799 "instead\n", name);
64800 +#endif
64801 }
64802 }
64803 EXPORT_SYMBOL(dev_load);
64804 @@ -1959,7 +1963,7 @@ static int illegal_highdma(struct net_de
64805
64806 struct dev_gso_cb {
64807 void (*destructor)(struct sk_buff *skb);
64808 -};
64809 +} __no_const;
64810
64811 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
64812
64813 @@ -2912,7 +2916,7 @@ int netif_rx_ni(struct sk_buff *skb)
64814 }
64815 EXPORT_SYMBOL(netif_rx_ni);
64816
64817 -static void net_tx_action(struct softirq_action *h)
64818 +static void net_tx_action(void)
64819 {
64820 struct softnet_data *sd = &__get_cpu_var(softnet_data);
64821
64822 @@ -3761,7 +3765,7 @@ void netif_napi_del(struct napi_struct *
64823 }
64824 EXPORT_SYMBOL(netif_napi_del);
64825
64826 -static void net_rx_action(struct softirq_action *h)
64827 +static void net_rx_action(void)
64828 {
64829 struct softnet_data *sd = &__get_cpu_var(softnet_data);
64830 unsigned long time_limit = jiffies + 2;
64831 diff -urNp linux-3.0.3/net/core/flow.c linux-3.0.3/net/core/flow.c
64832 --- linux-3.0.3/net/core/flow.c 2011-07-21 22:17:23.000000000 -0400
64833 +++ linux-3.0.3/net/core/flow.c 2011-08-23 21:47:56.000000000 -0400
64834 @@ -60,7 +60,7 @@ struct flow_cache {
64835 struct timer_list rnd_timer;
64836 };
64837
64838 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
64839 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
64840 EXPORT_SYMBOL(flow_cache_genid);
64841 static struct flow_cache flow_cache_global;
64842 static struct kmem_cache *flow_cachep __read_mostly;
64843 @@ -85,7 +85,7 @@ static void flow_cache_new_hashrnd(unsig
64844
64845 static int flow_entry_valid(struct flow_cache_entry *fle)
64846 {
64847 - if (atomic_read(&flow_cache_genid) != fle->genid)
64848 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
64849 return 0;
64850 if (fle->object && !fle->object->ops->check(fle->object))
64851 return 0;
64852 @@ -253,7 +253,7 @@ flow_cache_lookup(struct net *net, const
64853 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
64854 fcp->hash_count++;
64855 }
64856 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
64857 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
64858 flo = fle->object;
64859 if (!flo)
64860 goto ret_object;
64861 @@ -274,7 +274,7 @@ nocache:
64862 }
64863 flo = resolver(net, key, family, dir, flo, ctx);
64864 if (fle) {
64865 - fle->genid = atomic_read(&flow_cache_genid);
64866 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
64867 if (!IS_ERR(flo))
64868 fle->object = flo;
64869 else
64870 diff -urNp linux-3.0.3/net/core/rtnetlink.c linux-3.0.3/net/core/rtnetlink.c
64871 --- linux-3.0.3/net/core/rtnetlink.c 2011-07-21 22:17:23.000000000 -0400
64872 +++ linux-3.0.3/net/core/rtnetlink.c 2011-08-23 21:47:56.000000000 -0400
64873 @@ -56,7 +56,7 @@
64874 struct rtnl_link {
64875 rtnl_doit_func doit;
64876 rtnl_dumpit_func dumpit;
64877 -};
64878 +} __no_const;
64879
64880 static DEFINE_MUTEX(rtnl_mutex);
64881
64882 diff -urNp linux-3.0.3/net/core/skbuff.c linux-3.0.3/net/core/skbuff.c
64883 --- linux-3.0.3/net/core/skbuff.c 2011-07-21 22:17:23.000000000 -0400
64884 +++ linux-3.0.3/net/core/skbuff.c 2011-08-23 21:48:14.000000000 -0400
64885 @@ -1543,6 +1543,8 @@ int skb_splice_bits(struct sk_buff *skb,
64886 struct sock *sk = skb->sk;
64887 int ret = 0;
64888
64889 + pax_track_stack();
64890 +
64891 if (splice_grow_spd(pipe, &spd))
64892 return -ENOMEM;
64893
64894 diff -urNp linux-3.0.3/net/core/sock.c linux-3.0.3/net/core/sock.c
64895 --- linux-3.0.3/net/core/sock.c 2011-07-21 22:17:23.000000000 -0400
64896 +++ linux-3.0.3/net/core/sock.c 2011-08-23 21:48:14.000000000 -0400
64897 @@ -291,7 +291,7 @@ int sock_queue_rcv_skb(struct sock *sk,
64898 */
64899 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
64900 (unsigned)sk->sk_rcvbuf) {
64901 - atomic_inc(&sk->sk_drops);
64902 + atomic_inc_unchecked(&sk->sk_drops);
64903 return -ENOMEM;
64904 }
64905
64906 @@ -300,7 +300,7 @@ int sock_queue_rcv_skb(struct sock *sk,
64907 return err;
64908
64909 if (!sk_rmem_schedule(sk, skb->truesize)) {
64910 - atomic_inc(&sk->sk_drops);
64911 + atomic_inc_unchecked(&sk->sk_drops);
64912 return -ENOBUFS;
64913 }
64914
64915 @@ -320,7 +320,7 @@ int sock_queue_rcv_skb(struct sock *sk,
64916 skb_dst_force(skb);
64917
64918 spin_lock_irqsave(&list->lock, flags);
64919 - skb->dropcount = atomic_read(&sk->sk_drops);
64920 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
64921 __skb_queue_tail(list, skb);
64922 spin_unlock_irqrestore(&list->lock, flags);
64923
64924 @@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, stru
64925 skb->dev = NULL;
64926
64927 if (sk_rcvqueues_full(sk, skb)) {
64928 - atomic_inc(&sk->sk_drops);
64929 + atomic_inc_unchecked(&sk->sk_drops);
64930 goto discard_and_relse;
64931 }
64932 if (nested)
64933 @@ -358,7 +358,7 @@ int sk_receive_skb(struct sock *sk, stru
64934 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
64935 } else if (sk_add_backlog(sk, skb)) {
64936 bh_unlock_sock(sk);
64937 - atomic_inc(&sk->sk_drops);
64938 + atomic_inc_unchecked(&sk->sk_drops);
64939 goto discard_and_relse;
64940 }
64941
64942 @@ -921,7 +921,7 @@ int sock_getsockopt(struct socket *sock,
64943 if (len > sizeof(peercred))
64944 len = sizeof(peercred);
64945 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
64946 - if (copy_to_user(optval, &peercred, len))
64947 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
64948 return -EFAULT;
64949 goto lenout;
64950 }
64951 @@ -934,7 +934,7 @@ int sock_getsockopt(struct socket *sock,
64952 return -ENOTCONN;
64953 if (lv < len)
64954 return -EINVAL;
64955 - if (copy_to_user(optval, address, len))
64956 + if (len > sizeof(address) || copy_to_user(optval, address, len))
64957 return -EFAULT;
64958 goto lenout;
64959 }
64960 @@ -967,7 +967,7 @@ int sock_getsockopt(struct socket *sock,
64961
64962 if (len > lv)
64963 len = lv;
64964 - if (copy_to_user(optval, &v, len))
64965 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
64966 return -EFAULT;
64967 lenout:
64968 if (put_user(len, optlen))
64969 @@ -2023,7 +2023,7 @@ void sock_init_data(struct socket *sock,
64970 */
64971 smp_wmb();
64972 atomic_set(&sk->sk_refcnt, 1);
64973 - atomic_set(&sk->sk_drops, 0);
64974 + atomic_set_unchecked(&sk->sk_drops, 0);
64975 }
64976 EXPORT_SYMBOL(sock_init_data);
64977
64978 diff -urNp linux-3.0.3/net/decnet/sysctl_net_decnet.c linux-3.0.3/net/decnet/sysctl_net_decnet.c
64979 --- linux-3.0.3/net/decnet/sysctl_net_decnet.c 2011-07-21 22:17:23.000000000 -0400
64980 +++ linux-3.0.3/net/decnet/sysctl_net_decnet.c 2011-08-23 21:47:56.000000000 -0400
64981 @@ -173,7 +173,7 @@ static int dn_node_address_handler(ctl_t
64982
64983 if (len > *lenp) len = *lenp;
64984
64985 - if (copy_to_user(buffer, addr, len))
64986 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
64987 return -EFAULT;
64988
64989 *lenp = len;
64990 @@ -236,7 +236,7 @@ static int dn_def_dev_handler(ctl_table
64991
64992 if (len > *lenp) len = *lenp;
64993
64994 - if (copy_to_user(buffer, devname, len))
64995 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
64996 return -EFAULT;
64997
64998 *lenp = len;
64999 diff -urNp linux-3.0.3/net/econet/Kconfig linux-3.0.3/net/econet/Kconfig
65000 --- linux-3.0.3/net/econet/Kconfig 2011-07-21 22:17:23.000000000 -0400
65001 +++ linux-3.0.3/net/econet/Kconfig 2011-08-23 21:48:14.000000000 -0400
65002 @@ -4,7 +4,7 @@
65003
65004 config ECONET
65005 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
65006 - depends on EXPERIMENTAL && INET
65007 + depends on EXPERIMENTAL && INET && BROKEN
65008 ---help---
65009 Econet is a fairly old and slow networking protocol mainly used by
65010 Acorn computers to access file and print servers. It uses native
65011 diff -urNp linux-3.0.3/net/ipv4/fib_frontend.c linux-3.0.3/net/ipv4/fib_frontend.c
65012 --- linux-3.0.3/net/ipv4/fib_frontend.c 2011-07-21 22:17:23.000000000 -0400
65013 +++ linux-3.0.3/net/ipv4/fib_frontend.c 2011-08-23 21:47:56.000000000 -0400
65014 @@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct not
65015 #ifdef CONFIG_IP_ROUTE_MULTIPATH
65016 fib_sync_up(dev);
65017 #endif
65018 - atomic_inc(&net->ipv4.dev_addr_genid);
65019 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65020 rt_cache_flush(dev_net(dev), -1);
65021 break;
65022 case NETDEV_DOWN:
65023 fib_del_ifaddr(ifa, NULL);
65024 - atomic_inc(&net->ipv4.dev_addr_genid);
65025 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65026 if (ifa->ifa_dev->ifa_list == NULL) {
65027 /* Last address was deleted from this interface.
65028 * Disable IP.
65029 @@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notif
65030 #ifdef CONFIG_IP_ROUTE_MULTIPATH
65031 fib_sync_up(dev);
65032 #endif
65033 - atomic_inc(&net->ipv4.dev_addr_genid);
65034 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65035 rt_cache_flush(dev_net(dev), -1);
65036 break;
65037 case NETDEV_DOWN:
65038 diff -urNp linux-3.0.3/net/ipv4/fib_semantics.c linux-3.0.3/net/ipv4/fib_semantics.c
65039 --- linux-3.0.3/net/ipv4/fib_semantics.c 2011-07-21 22:17:23.000000000 -0400
65040 +++ linux-3.0.3/net/ipv4/fib_semantics.c 2011-08-23 21:47:56.000000000 -0400
65041 @@ -691,7 +691,7 @@ __be32 fib_info_update_nh_saddr(struct n
65042 nh->nh_saddr = inet_select_addr(nh->nh_dev,
65043 nh->nh_gw,
65044 nh->nh_parent->fib_scope);
65045 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
65046 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
65047
65048 return nh->nh_saddr;
65049 }
65050 diff -urNp linux-3.0.3/net/ipv4/inet_diag.c linux-3.0.3/net/ipv4/inet_diag.c
65051 --- linux-3.0.3/net/ipv4/inet_diag.c 2011-07-21 22:17:23.000000000 -0400
65052 +++ linux-3.0.3/net/ipv4/inet_diag.c 2011-08-23 21:48:14.000000000 -0400
65053 @@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
65054 r->idiag_retrans = 0;
65055
65056 r->id.idiag_if = sk->sk_bound_dev_if;
65057 +
65058 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65059 + r->id.idiag_cookie[0] = 0;
65060 + r->id.idiag_cookie[1] = 0;
65061 +#else
65062 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
65063 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
65064 +#endif
65065
65066 r->id.idiag_sport = inet->inet_sport;
65067 r->id.idiag_dport = inet->inet_dport;
65068 @@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
65069 r->idiag_family = tw->tw_family;
65070 r->idiag_retrans = 0;
65071 r->id.idiag_if = tw->tw_bound_dev_if;
65072 +
65073 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65074 + r->id.idiag_cookie[0] = 0;
65075 + r->id.idiag_cookie[1] = 0;
65076 +#else
65077 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
65078 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
65079 +#endif
65080 +
65081 r->id.idiag_sport = tw->tw_sport;
65082 r->id.idiag_dport = tw->tw_dport;
65083 r->id.idiag_src[0] = tw->tw_rcv_saddr;
65084 @@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
65085 if (sk == NULL)
65086 goto unlock;
65087
65088 +#ifndef CONFIG_GRKERNSEC_HIDESYM
65089 err = -ESTALE;
65090 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
65091 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
65092 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
65093 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
65094 goto out;
65095 +#endif
65096
65097 err = -ENOMEM;
65098 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
65099 @@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
65100 r->idiag_retrans = req->retrans;
65101
65102 r->id.idiag_if = sk->sk_bound_dev_if;
65103 +
65104 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65105 + r->id.idiag_cookie[0] = 0;
65106 + r->id.idiag_cookie[1] = 0;
65107 +#else
65108 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
65109 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
65110 +#endif
65111
65112 tmo = req->expires - jiffies;
65113 if (tmo < 0)
65114 diff -urNp linux-3.0.3/net/ipv4/inet_hashtables.c linux-3.0.3/net/ipv4/inet_hashtables.c
65115 --- linux-3.0.3/net/ipv4/inet_hashtables.c 2011-08-23 21:44:40.000000000 -0400
65116 +++ linux-3.0.3/net/ipv4/inet_hashtables.c 2011-08-23 21:55:24.000000000 -0400
65117 @@ -18,12 +18,15 @@
65118 #include <linux/sched.h>
65119 #include <linux/slab.h>
65120 #include <linux/wait.h>
65121 +#include <linux/security.h>
65122
65123 #include <net/inet_connection_sock.h>
65124 #include <net/inet_hashtables.h>
65125 #include <net/secure_seq.h>
65126 #include <net/ip.h>
65127
65128 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
65129 +
65130 /*
65131 * Allocate and initialize a new local port bind bucket.
65132 * The bindhash mutex for snum's hash chain must be held here.
65133 @@ -530,6 +533,8 @@ ok:
65134 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
65135 spin_unlock(&head->lock);
65136
65137 + gr_update_task_in_ip_table(current, inet_sk(sk));
65138 +
65139 if (tw) {
65140 inet_twsk_deschedule(tw, death_row);
65141 while (twrefcnt) {
65142 diff -urNp linux-3.0.3/net/ipv4/inetpeer.c linux-3.0.3/net/ipv4/inetpeer.c
65143 --- linux-3.0.3/net/ipv4/inetpeer.c 2011-08-23 21:44:40.000000000 -0400
65144 +++ linux-3.0.3/net/ipv4/inetpeer.c 2011-08-23 21:48:14.000000000 -0400
65145 @@ -481,6 +481,8 @@ struct inet_peer *inet_getpeer(struct in
65146 unsigned int sequence;
65147 int invalidated, newrefcnt = 0;
65148
65149 + pax_track_stack();
65150 +
65151 /* Look up for the address quickly, lockless.
65152 * Because of a concurrent writer, we might not find an existing entry.
65153 */
65154 @@ -517,8 +519,8 @@ found: /* The existing node has been fo
65155 if (p) {
65156 p->daddr = *daddr;
65157 atomic_set(&p->refcnt, 1);
65158 - atomic_set(&p->rid, 0);
65159 - atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
65160 + atomic_set_unchecked(&p->rid, 0);
65161 + atomic_set_unchecked(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
65162 p->tcp_ts_stamp = 0;
65163 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
65164 p->rate_tokens = 0;
65165 diff -urNp linux-3.0.3/net/ipv4/ip_fragment.c linux-3.0.3/net/ipv4/ip_fragment.c
65166 --- linux-3.0.3/net/ipv4/ip_fragment.c 2011-07-21 22:17:23.000000000 -0400
65167 +++ linux-3.0.3/net/ipv4/ip_fragment.c 2011-08-23 21:47:56.000000000 -0400
65168 @@ -315,7 +315,7 @@ static inline int ip_frag_too_far(struct
65169 return 0;
65170
65171 start = qp->rid;
65172 - end = atomic_inc_return(&peer->rid);
65173 + end = atomic_inc_return_unchecked(&peer->rid);
65174 qp->rid = end;
65175
65176 rc = qp->q.fragments && (end - start) > max;
65177 diff -urNp linux-3.0.3/net/ipv4/ip_sockglue.c linux-3.0.3/net/ipv4/ip_sockglue.c
65178 --- linux-3.0.3/net/ipv4/ip_sockglue.c 2011-07-21 22:17:23.000000000 -0400
65179 +++ linux-3.0.3/net/ipv4/ip_sockglue.c 2011-08-23 21:48:14.000000000 -0400
65180 @@ -1073,6 +1073,8 @@ static int do_ip_getsockopt(struct sock
65181 int val;
65182 int len;
65183
65184 + pax_track_stack();
65185 +
65186 if (level != SOL_IP)
65187 return -EOPNOTSUPP;
65188
65189 @@ -1110,7 +1112,8 @@ static int do_ip_getsockopt(struct sock
65190 len = min_t(unsigned int, len, opt->optlen);
65191 if (put_user(len, optlen))
65192 return -EFAULT;
65193 - if (copy_to_user(optval, opt->__data, len))
65194 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
65195 + copy_to_user(optval, opt->__data, len))
65196 return -EFAULT;
65197 return 0;
65198 }
65199 diff -urNp linux-3.0.3/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-3.0.3/net/ipv4/netfilter/nf_nat_snmp_basic.c
65200 --- linux-3.0.3/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-07-21 22:17:23.000000000 -0400
65201 +++ linux-3.0.3/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-08-23 21:47:56.000000000 -0400
65202 @@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
65203
65204 *len = 0;
65205
65206 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
65207 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
65208 if (*octets == NULL) {
65209 if (net_ratelimit())
65210 pr_notice("OOM in bsalg (%d)\n", __LINE__);
65211 diff -urNp linux-3.0.3/net/ipv4/ping.c linux-3.0.3/net/ipv4/ping.c
65212 --- linux-3.0.3/net/ipv4/ping.c 2011-07-21 22:17:23.000000000 -0400
65213 +++ linux-3.0.3/net/ipv4/ping.c 2011-08-23 21:47:56.000000000 -0400
65214 @@ -837,7 +837,7 @@ static void ping_format_sock(struct sock
65215 sk_rmem_alloc_get(sp),
65216 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
65217 atomic_read(&sp->sk_refcnt), sp,
65218 - atomic_read(&sp->sk_drops), len);
65219 + atomic_read_unchecked(&sp->sk_drops), len);
65220 }
65221
65222 static int ping_seq_show(struct seq_file *seq, void *v)
65223 diff -urNp linux-3.0.3/net/ipv4/raw.c linux-3.0.3/net/ipv4/raw.c
65224 --- linux-3.0.3/net/ipv4/raw.c 2011-07-21 22:17:23.000000000 -0400
65225 +++ linux-3.0.3/net/ipv4/raw.c 2011-08-23 21:48:14.000000000 -0400
65226 @@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
65227 int raw_rcv(struct sock *sk, struct sk_buff *skb)
65228 {
65229 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
65230 - atomic_inc(&sk->sk_drops);
65231 + atomic_inc_unchecked(&sk->sk_drops);
65232 kfree_skb(skb);
65233 return NET_RX_DROP;
65234 }
65235 @@ -736,16 +736,20 @@ static int raw_init(struct sock *sk)
65236
65237 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
65238 {
65239 + struct icmp_filter filter;
65240 +
65241 if (optlen > sizeof(struct icmp_filter))
65242 optlen = sizeof(struct icmp_filter);
65243 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
65244 + if (copy_from_user(&filter, optval, optlen))
65245 return -EFAULT;
65246 + raw_sk(sk)->filter = filter;
65247 return 0;
65248 }
65249
65250 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
65251 {
65252 int len, ret = -EFAULT;
65253 + struct icmp_filter filter;
65254
65255 if (get_user(len, optlen))
65256 goto out;
65257 @@ -755,8 +759,9 @@ static int raw_geticmpfilter(struct sock
65258 if (len > sizeof(struct icmp_filter))
65259 len = sizeof(struct icmp_filter);
65260 ret = -EFAULT;
65261 - if (put_user(len, optlen) ||
65262 - copy_to_user(optval, &raw_sk(sk)->filter, len))
65263 + filter = raw_sk(sk)->filter;
65264 + if (put_user(len, optlen) || len > sizeof filter ||
65265 + copy_to_user(optval, &filter, len))
65266 goto out;
65267 ret = 0;
65268 out: return ret;
65269 @@ -984,7 +989,13 @@ static void raw_sock_seq_show(struct seq
65270 sk_wmem_alloc_get(sp),
65271 sk_rmem_alloc_get(sp),
65272 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
65273 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
65274 + atomic_read(&sp->sk_refcnt),
65275 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65276 + NULL,
65277 +#else
65278 + sp,
65279 +#endif
65280 + atomic_read_unchecked(&sp->sk_drops));
65281 }
65282
65283 static int raw_seq_show(struct seq_file *seq, void *v)
65284 diff -urNp linux-3.0.3/net/ipv4/route.c linux-3.0.3/net/ipv4/route.c
65285 --- linux-3.0.3/net/ipv4/route.c 2011-08-23 21:44:40.000000000 -0400
65286 +++ linux-3.0.3/net/ipv4/route.c 2011-08-23 21:47:56.000000000 -0400
65287 @@ -304,7 +304,7 @@ static inline unsigned int rt_hash(__be3
65288
65289 static inline int rt_genid(struct net *net)
65290 {
65291 - return atomic_read(&net->ipv4.rt_genid);
65292 + return atomic_read_unchecked(&net->ipv4.rt_genid);
65293 }
65294
65295 #ifdef CONFIG_PROC_FS
65296 @@ -833,7 +833,7 @@ static void rt_cache_invalidate(struct n
65297 unsigned char shuffle;
65298
65299 get_random_bytes(&shuffle, sizeof(shuffle));
65300 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
65301 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
65302 }
65303
65304 /*
65305 @@ -2834,7 +2834,7 @@ static int rt_fill_info(struct net *net,
65306 error = rt->dst.error;
65307 if (peer) {
65308 inet_peer_refcheck(rt->peer);
65309 - id = atomic_read(&peer->ip_id_count) & 0xffff;
65310 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
65311 if (peer->tcp_ts_stamp) {
65312 ts = peer->tcp_ts;
65313 tsage = get_seconds() - peer->tcp_ts_stamp;
65314 diff -urNp linux-3.0.3/net/ipv4/tcp.c linux-3.0.3/net/ipv4/tcp.c
65315 --- linux-3.0.3/net/ipv4/tcp.c 2011-07-21 22:17:23.000000000 -0400
65316 +++ linux-3.0.3/net/ipv4/tcp.c 2011-08-23 21:48:14.000000000 -0400
65317 @@ -2122,6 +2122,8 @@ static int do_tcp_setsockopt(struct sock
65318 int val;
65319 int err = 0;
65320
65321 + pax_track_stack();
65322 +
65323 /* These are data/string values, all the others are ints */
65324 switch (optname) {
65325 case TCP_CONGESTION: {
65326 @@ -2501,6 +2503,8 @@ static int do_tcp_getsockopt(struct sock
65327 struct tcp_sock *tp = tcp_sk(sk);
65328 int val, len;
65329
65330 + pax_track_stack();
65331 +
65332 if (get_user(len, optlen))
65333 return -EFAULT;
65334
65335 diff -urNp linux-3.0.3/net/ipv4/tcp_ipv4.c linux-3.0.3/net/ipv4/tcp_ipv4.c
65336 --- linux-3.0.3/net/ipv4/tcp_ipv4.c 2011-08-23 21:44:40.000000000 -0400
65337 +++ linux-3.0.3/net/ipv4/tcp_ipv4.c 2011-08-23 21:48:14.000000000 -0400
65338 @@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
65339 int sysctl_tcp_low_latency __read_mostly;
65340 EXPORT_SYMBOL(sysctl_tcp_low_latency);
65341
65342 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65343 +extern int grsec_enable_blackhole;
65344 +#endif
65345
65346 #ifdef CONFIG_TCP_MD5SIG
65347 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
65348 @@ -1607,6 +1610,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
65349 return 0;
65350
65351 reset:
65352 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65353 + if (!grsec_enable_blackhole)
65354 +#endif
65355 tcp_v4_send_reset(rsk, skb);
65356 discard:
65357 kfree_skb(skb);
65358 @@ -1669,12 +1675,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
65359 TCP_SKB_CB(skb)->sacked = 0;
65360
65361 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
65362 - if (!sk)
65363 + if (!sk) {
65364 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65365 + ret = 1;
65366 +#endif
65367 goto no_tcp_socket;
65368 -
65369 + }
65370 process:
65371 - if (sk->sk_state == TCP_TIME_WAIT)
65372 + if (sk->sk_state == TCP_TIME_WAIT) {
65373 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65374 + ret = 2;
65375 +#endif
65376 goto do_time_wait;
65377 + }
65378
65379 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
65380 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
65381 @@ -1724,6 +1737,10 @@ no_tcp_socket:
65382 bad_packet:
65383 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
65384 } else {
65385 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65386 + if (!grsec_enable_blackhole || (ret == 1 &&
65387 + (skb->dev->flags & IFF_LOOPBACK)))
65388 +#endif
65389 tcp_v4_send_reset(NULL, skb);
65390 }
65391
65392 @@ -2388,7 +2405,11 @@ static void get_openreq4(struct sock *sk
65393 0, /* non standard timer */
65394 0, /* open_requests have no inode */
65395 atomic_read(&sk->sk_refcnt),
65396 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65397 + NULL,
65398 +#else
65399 req,
65400 +#endif
65401 len);
65402 }
65403
65404 @@ -2438,7 +2459,12 @@ static void get_tcp4_sock(struct sock *s
65405 sock_i_uid(sk),
65406 icsk->icsk_probes_out,
65407 sock_i_ino(sk),
65408 - atomic_read(&sk->sk_refcnt), sk,
65409 + atomic_read(&sk->sk_refcnt),
65410 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65411 + NULL,
65412 +#else
65413 + sk,
65414 +#endif
65415 jiffies_to_clock_t(icsk->icsk_rto),
65416 jiffies_to_clock_t(icsk->icsk_ack.ato),
65417 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
65418 @@ -2466,7 +2492,13 @@ static void get_timewait4_sock(struct in
65419 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
65420 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
65421 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
65422 - atomic_read(&tw->tw_refcnt), tw, len);
65423 + atomic_read(&tw->tw_refcnt),
65424 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65425 + NULL,
65426 +#else
65427 + tw,
65428 +#endif
65429 + len);
65430 }
65431
65432 #define TMPSZ 150
65433 diff -urNp linux-3.0.3/net/ipv4/tcp_minisocks.c linux-3.0.3/net/ipv4/tcp_minisocks.c
65434 --- linux-3.0.3/net/ipv4/tcp_minisocks.c 2011-07-21 22:17:23.000000000 -0400
65435 +++ linux-3.0.3/net/ipv4/tcp_minisocks.c 2011-08-23 21:48:14.000000000 -0400
65436 @@ -27,6 +27,10 @@
65437 #include <net/inet_common.h>
65438 #include <net/xfrm.h>
65439
65440 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65441 +extern int grsec_enable_blackhole;
65442 +#endif
65443 +
65444 int sysctl_tcp_syncookies __read_mostly = 1;
65445 EXPORT_SYMBOL(sysctl_tcp_syncookies);
65446
65447 @@ -745,6 +749,10 @@ listen_overflow:
65448
65449 embryonic_reset:
65450 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
65451 +
65452 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65453 + if (!grsec_enable_blackhole)
65454 +#endif
65455 if (!(flg & TCP_FLAG_RST))
65456 req->rsk_ops->send_reset(sk, skb);
65457
65458 diff -urNp linux-3.0.3/net/ipv4/tcp_output.c linux-3.0.3/net/ipv4/tcp_output.c
65459 --- linux-3.0.3/net/ipv4/tcp_output.c 2011-07-21 22:17:23.000000000 -0400
65460 +++ linux-3.0.3/net/ipv4/tcp_output.c 2011-08-23 21:48:14.000000000 -0400
65461 @@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
65462 int mss;
65463 int s_data_desired = 0;
65464
65465 + pax_track_stack();
65466 +
65467 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
65468 s_data_desired = cvp->s_data_desired;
65469 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
65470 diff -urNp linux-3.0.3/net/ipv4/tcp_probe.c linux-3.0.3/net/ipv4/tcp_probe.c
65471 --- linux-3.0.3/net/ipv4/tcp_probe.c 2011-07-21 22:17:23.000000000 -0400
65472 +++ linux-3.0.3/net/ipv4/tcp_probe.c 2011-08-23 21:47:56.000000000 -0400
65473 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
65474 if (cnt + width >= len)
65475 break;
65476
65477 - if (copy_to_user(buf + cnt, tbuf, width))
65478 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
65479 return -EFAULT;
65480 cnt += width;
65481 }
65482 diff -urNp linux-3.0.3/net/ipv4/tcp_timer.c linux-3.0.3/net/ipv4/tcp_timer.c
65483 --- linux-3.0.3/net/ipv4/tcp_timer.c 2011-07-21 22:17:23.000000000 -0400
65484 +++ linux-3.0.3/net/ipv4/tcp_timer.c 2011-08-23 21:48:14.000000000 -0400
65485 @@ -22,6 +22,10 @@
65486 #include <linux/gfp.h>
65487 #include <net/tcp.h>
65488
65489 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65490 +extern int grsec_lastack_retries;
65491 +#endif
65492 +
65493 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
65494 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
65495 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
65496 @@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
65497 }
65498 }
65499
65500 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65501 + if ((sk->sk_state == TCP_LAST_ACK) &&
65502 + (grsec_lastack_retries > 0) &&
65503 + (grsec_lastack_retries < retry_until))
65504 + retry_until = grsec_lastack_retries;
65505 +#endif
65506 +
65507 if (retransmits_timed_out(sk, retry_until,
65508 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
65509 /* Has it gone just too far? */
65510 diff -urNp linux-3.0.3/net/ipv4/udp.c linux-3.0.3/net/ipv4/udp.c
65511 --- linux-3.0.3/net/ipv4/udp.c 2011-07-21 22:17:23.000000000 -0400
65512 +++ linux-3.0.3/net/ipv4/udp.c 2011-08-23 21:48:14.000000000 -0400
65513 @@ -86,6 +86,7 @@
65514 #include <linux/types.h>
65515 #include <linux/fcntl.h>
65516 #include <linux/module.h>
65517 +#include <linux/security.h>
65518 #include <linux/socket.h>
65519 #include <linux/sockios.h>
65520 #include <linux/igmp.h>
65521 @@ -107,6 +108,10 @@
65522 #include <net/xfrm.h>
65523 #include "udp_impl.h"
65524
65525 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65526 +extern int grsec_enable_blackhole;
65527 +#endif
65528 +
65529 struct udp_table udp_table __read_mostly;
65530 EXPORT_SYMBOL(udp_table);
65531
65532 @@ -564,6 +569,9 @@ found:
65533 return s;
65534 }
65535
65536 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
65537 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
65538 +
65539 /*
65540 * This routine is called by the ICMP module when it gets some
65541 * sort of error condition. If err < 0 then the socket should
65542 @@ -855,9 +863,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
65543 dport = usin->sin_port;
65544 if (dport == 0)
65545 return -EINVAL;
65546 +
65547 + err = gr_search_udp_sendmsg(sk, usin);
65548 + if (err)
65549 + return err;
65550 } else {
65551 if (sk->sk_state != TCP_ESTABLISHED)
65552 return -EDESTADDRREQ;
65553 +
65554 + err = gr_search_udp_sendmsg(sk, NULL);
65555 + if (err)
65556 + return err;
65557 +
65558 daddr = inet->inet_daddr;
65559 dport = inet->inet_dport;
65560 /* Open fast path for connected socket.
65561 @@ -1098,7 +1115,7 @@ static unsigned int first_packet_length(
65562 udp_lib_checksum_complete(skb)) {
65563 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
65564 IS_UDPLITE(sk));
65565 - atomic_inc(&sk->sk_drops);
65566 + atomic_inc_unchecked(&sk->sk_drops);
65567 __skb_unlink(skb, rcvq);
65568 __skb_queue_tail(&list_kill, skb);
65569 }
65570 @@ -1184,6 +1201,10 @@ try_again:
65571 if (!skb)
65572 goto out;
65573
65574 + err = gr_search_udp_recvmsg(sk, skb);
65575 + if (err)
65576 + goto out_free;
65577 +
65578 ulen = skb->len - sizeof(struct udphdr);
65579 if (len > ulen)
65580 len = ulen;
65581 @@ -1483,7 +1504,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
65582
65583 drop:
65584 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
65585 - atomic_inc(&sk->sk_drops);
65586 + atomic_inc_unchecked(&sk->sk_drops);
65587 kfree_skb(skb);
65588 return -1;
65589 }
65590 @@ -1502,7 +1523,7 @@ static void flush_stack(struct sock **st
65591 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
65592
65593 if (!skb1) {
65594 - atomic_inc(&sk->sk_drops);
65595 + atomic_inc_unchecked(&sk->sk_drops);
65596 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
65597 IS_UDPLITE(sk));
65598 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
65599 @@ -1671,6 +1692,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
65600 goto csum_error;
65601
65602 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
65603 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65604 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
65605 +#endif
65606 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
65607
65608 /*
65609 @@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock
65610 sk_wmem_alloc_get(sp),
65611 sk_rmem_alloc_get(sp),
65612 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
65613 - atomic_read(&sp->sk_refcnt), sp,
65614 - atomic_read(&sp->sk_drops), len);
65615 + atomic_read(&sp->sk_refcnt),
65616 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65617 + NULL,
65618 +#else
65619 + sp,
65620 +#endif
65621 + atomic_read_unchecked(&sp->sk_drops), len);
65622 }
65623
65624 int udp4_seq_show(struct seq_file *seq, void *v)
65625 diff -urNp linux-3.0.3/net/ipv6/inet6_connection_sock.c linux-3.0.3/net/ipv6/inet6_connection_sock.c
65626 --- linux-3.0.3/net/ipv6/inet6_connection_sock.c 2011-07-21 22:17:23.000000000 -0400
65627 +++ linux-3.0.3/net/ipv6/inet6_connection_sock.c 2011-08-23 21:47:56.000000000 -0400
65628 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
65629 #ifdef CONFIG_XFRM
65630 {
65631 struct rt6_info *rt = (struct rt6_info *)dst;
65632 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
65633 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
65634 }
65635 #endif
65636 }
65637 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
65638 #ifdef CONFIG_XFRM
65639 if (dst) {
65640 struct rt6_info *rt = (struct rt6_info *)dst;
65641 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
65642 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
65643 __sk_dst_reset(sk);
65644 dst = NULL;
65645 }
65646 diff -urNp linux-3.0.3/net/ipv6/ipv6_sockglue.c linux-3.0.3/net/ipv6/ipv6_sockglue.c
65647 --- linux-3.0.3/net/ipv6/ipv6_sockglue.c 2011-07-21 22:17:23.000000000 -0400
65648 +++ linux-3.0.3/net/ipv6/ipv6_sockglue.c 2011-08-23 21:48:14.000000000 -0400
65649 @@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
65650 int val, valbool;
65651 int retv = -ENOPROTOOPT;
65652
65653 + pax_track_stack();
65654 +
65655 if (optval == NULL)
65656 val=0;
65657 else {
65658 @@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
65659 int len;
65660 int val;
65661
65662 + pax_track_stack();
65663 +
65664 if (ip6_mroute_opt(optname))
65665 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
65666
65667 diff -urNp linux-3.0.3/net/ipv6/raw.c linux-3.0.3/net/ipv6/raw.c
65668 --- linux-3.0.3/net/ipv6/raw.c 2011-07-21 22:17:23.000000000 -0400
65669 +++ linux-3.0.3/net/ipv6/raw.c 2011-08-23 21:48:14.000000000 -0400
65670 @@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
65671 {
65672 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
65673 skb_checksum_complete(skb)) {
65674 - atomic_inc(&sk->sk_drops);
65675 + atomic_inc_unchecked(&sk->sk_drops);
65676 kfree_skb(skb);
65677 return NET_RX_DROP;
65678 }
65679 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
65680 struct raw6_sock *rp = raw6_sk(sk);
65681
65682 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
65683 - atomic_inc(&sk->sk_drops);
65684 + atomic_inc_unchecked(&sk->sk_drops);
65685 kfree_skb(skb);
65686 return NET_RX_DROP;
65687 }
65688 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
65689
65690 if (inet->hdrincl) {
65691 if (skb_checksum_complete(skb)) {
65692 - atomic_inc(&sk->sk_drops);
65693 + atomic_inc_unchecked(&sk->sk_drops);
65694 kfree_skb(skb);
65695 return NET_RX_DROP;
65696 }
65697 @@ -601,7 +601,7 @@ out:
65698 return err;
65699 }
65700
65701 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
65702 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
65703 struct flowi6 *fl6, struct dst_entry **dstp,
65704 unsigned int flags)
65705 {
65706 @@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
65707 u16 proto;
65708 int err;
65709
65710 + pax_track_stack();
65711 +
65712 /* Rough check on arithmetic overflow,
65713 better check is made in ip6_append_data().
65714 */
65715 @@ -909,12 +911,15 @@ do_confirm:
65716 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
65717 char __user *optval, int optlen)
65718 {
65719 + struct icmp6_filter filter;
65720 +
65721 switch (optname) {
65722 case ICMPV6_FILTER:
65723 if (optlen > sizeof(struct icmp6_filter))
65724 optlen = sizeof(struct icmp6_filter);
65725 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
65726 + if (copy_from_user(&filter, optval, optlen))
65727 return -EFAULT;
65728 + raw6_sk(sk)->filter = filter;
65729 return 0;
65730 default:
65731 return -ENOPROTOOPT;
65732 @@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct so
65733 char __user *optval, int __user *optlen)
65734 {
65735 int len;
65736 + struct icmp6_filter filter;
65737
65738 switch (optname) {
65739 case ICMPV6_FILTER:
65740 @@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
65741 len = sizeof(struct icmp6_filter);
65742 if (put_user(len, optlen))
65743 return -EFAULT;
65744 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
65745 + filter = raw6_sk(sk)->filter;
65746 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
65747 return -EFAULT;
65748 return 0;
65749 default:
65750 @@ -1252,7 +1259,13 @@ static void raw6_sock_seq_show(struct se
65751 0, 0L, 0,
65752 sock_i_uid(sp), 0,
65753 sock_i_ino(sp),
65754 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
65755 + atomic_read(&sp->sk_refcnt),
65756 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65757 + NULL,
65758 +#else
65759 + sp,
65760 +#endif
65761 + atomic_read_unchecked(&sp->sk_drops));
65762 }
65763
65764 static int raw6_seq_show(struct seq_file *seq, void *v)
65765 diff -urNp linux-3.0.3/net/ipv6/tcp_ipv6.c linux-3.0.3/net/ipv6/tcp_ipv6.c
65766 --- linux-3.0.3/net/ipv6/tcp_ipv6.c 2011-08-23 21:44:40.000000000 -0400
65767 +++ linux-3.0.3/net/ipv6/tcp_ipv6.c 2011-08-23 21:48:14.000000000 -0400
65768 @@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
65769 }
65770 #endif
65771
65772 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65773 +extern int grsec_enable_blackhole;
65774 +#endif
65775 +
65776 static void tcp_v6_hash(struct sock *sk)
65777 {
65778 if (sk->sk_state != TCP_CLOSE) {
65779 @@ -1662,6 +1666,9 @@ static int tcp_v6_do_rcv(struct sock *sk
65780 return 0;
65781
65782 reset:
65783 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65784 + if (!grsec_enable_blackhole)
65785 +#endif
65786 tcp_v6_send_reset(sk, skb);
65787 discard:
65788 if (opt_skb)
65789 @@ -1741,12 +1748,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
65790 TCP_SKB_CB(skb)->sacked = 0;
65791
65792 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
65793 - if (!sk)
65794 + if (!sk) {
65795 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65796 + ret = 1;
65797 +#endif
65798 goto no_tcp_socket;
65799 + }
65800
65801 process:
65802 - if (sk->sk_state == TCP_TIME_WAIT)
65803 + if (sk->sk_state == TCP_TIME_WAIT) {
65804 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65805 + ret = 2;
65806 +#endif
65807 goto do_time_wait;
65808 + }
65809
65810 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
65811 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
65812 @@ -1794,6 +1809,10 @@ no_tcp_socket:
65813 bad_packet:
65814 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
65815 } else {
65816 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65817 + if (!grsec_enable_blackhole || (ret == 1 &&
65818 + (skb->dev->flags & IFF_LOOPBACK)))
65819 +#endif
65820 tcp_v6_send_reset(NULL, skb);
65821 }
65822
65823 @@ -2054,7 +2073,13 @@ static void get_openreq6(struct seq_file
65824 uid,
65825 0, /* non standard timer */
65826 0, /* open_requests have no inode */
65827 - 0, req);
65828 + 0,
65829 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65830 + NULL
65831 +#else
65832 + req
65833 +#endif
65834 + );
65835 }
65836
65837 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
65838 @@ -2104,7 +2129,12 @@ static void get_tcp6_sock(struct seq_fil
65839 sock_i_uid(sp),
65840 icsk->icsk_probes_out,
65841 sock_i_ino(sp),
65842 - atomic_read(&sp->sk_refcnt), sp,
65843 + atomic_read(&sp->sk_refcnt),
65844 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65845 + NULL,
65846 +#else
65847 + sp,
65848 +#endif
65849 jiffies_to_clock_t(icsk->icsk_rto),
65850 jiffies_to_clock_t(icsk->icsk_ack.ato),
65851 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
65852 @@ -2139,7 +2169,13 @@ static void get_timewait6_sock(struct se
65853 dest->s6_addr32[2], dest->s6_addr32[3], destp,
65854 tw->tw_substate, 0, 0,
65855 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
65856 - atomic_read(&tw->tw_refcnt), tw);
65857 + atomic_read(&tw->tw_refcnt),
65858 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65859 + NULL
65860 +#else
65861 + tw
65862 +#endif
65863 + );
65864 }
65865
65866 static int tcp6_seq_show(struct seq_file *seq, void *v)
65867 diff -urNp linux-3.0.3/net/ipv6/udp.c linux-3.0.3/net/ipv6/udp.c
65868 --- linux-3.0.3/net/ipv6/udp.c 2011-08-23 21:44:40.000000000 -0400
65869 +++ linux-3.0.3/net/ipv6/udp.c 2011-08-23 21:48:14.000000000 -0400
65870 @@ -50,6 +50,10 @@
65871 #include <linux/seq_file.h>
65872 #include "udp_impl.h"
65873
65874 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65875 +extern int grsec_enable_blackhole;
65876 +#endif
65877 +
65878 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
65879 {
65880 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
65881 @@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
65882
65883 return 0;
65884 drop:
65885 - atomic_inc(&sk->sk_drops);
65886 + atomic_inc_unchecked(&sk->sk_drops);
65887 drop_no_sk_drops_inc:
65888 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
65889 kfree_skb(skb);
65890 @@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
65891 continue;
65892 }
65893 drop:
65894 - atomic_inc(&sk->sk_drops);
65895 + atomic_inc_unchecked(&sk->sk_drops);
65896 UDP6_INC_STATS_BH(sock_net(sk),
65897 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
65898 UDP6_INC_STATS_BH(sock_net(sk),
65899 @@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
65900 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
65901 proto == IPPROTO_UDPLITE);
65902
65903 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65904 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
65905 +#endif
65906 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
65907
65908 kfree_skb(skb);
65909 @@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
65910 if (!sock_owned_by_user(sk))
65911 udpv6_queue_rcv_skb(sk, skb);
65912 else if (sk_add_backlog(sk, skb)) {
65913 - atomic_inc(&sk->sk_drops);
65914 + atomic_inc_unchecked(&sk->sk_drops);
65915 bh_unlock_sock(sk);
65916 sock_put(sk);
65917 goto discard;
65918 @@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct se
65919 0, 0L, 0,
65920 sock_i_uid(sp), 0,
65921 sock_i_ino(sp),
65922 - atomic_read(&sp->sk_refcnt), sp,
65923 - atomic_read(&sp->sk_drops));
65924 + atomic_read(&sp->sk_refcnt),
65925 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65926 + NULL,
65927 +#else
65928 + sp,
65929 +#endif
65930 + atomic_read_unchecked(&sp->sk_drops));
65931 }
65932
65933 int udp6_seq_show(struct seq_file *seq, void *v)
65934 diff -urNp linux-3.0.3/net/irda/ircomm/ircomm_tty.c linux-3.0.3/net/irda/ircomm/ircomm_tty.c
65935 --- linux-3.0.3/net/irda/ircomm/ircomm_tty.c 2011-07-21 22:17:23.000000000 -0400
65936 +++ linux-3.0.3/net/irda/ircomm/ircomm_tty.c 2011-08-23 21:47:56.000000000 -0400
65937 @@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(st
65938 add_wait_queue(&self->open_wait, &wait);
65939
65940 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
65941 - __FILE__,__LINE__, tty->driver->name, self->open_count );
65942 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
65943
65944 /* As far as I can see, we protect open_count - Jean II */
65945 spin_lock_irqsave(&self->spinlock, flags);
65946 if (!tty_hung_up_p(filp)) {
65947 extra_count = 1;
65948 - self->open_count--;
65949 + local_dec(&self->open_count);
65950 }
65951 spin_unlock_irqrestore(&self->spinlock, flags);
65952 - self->blocked_open++;
65953 + local_inc(&self->blocked_open);
65954
65955 while (1) {
65956 if (tty->termios->c_cflag & CBAUD) {
65957 @@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(st
65958 }
65959
65960 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
65961 - __FILE__,__LINE__, tty->driver->name, self->open_count );
65962 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
65963
65964 schedule();
65965 }
65966 @@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(st
65967 if (extra_count) {
65968 /* ++ is not atomic, so this should be protected - Jean II */
65969 spin_lock_irqsave(&self->spinlock, flags);
65970 - self->open_count++;
65971 + local_inc(&self->open_count);
65972 spin_unlock_irqrestore(&self->spinlock, flags);
65973 }
65974 - self->blocked_open--;
65975 + local_dec(&self->blocked_open);
65976
65977 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
65978 - __FILE__,__LINE__, tty->driver->name, self->open_count);
65979 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
65980
65981 if (!retval)
65982 self->flags |= ASYNC_NORMAL_ACTIVE;
65983 @@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_st
65984 }
65985 /* ++ is not atomic, so this should be protected - Jean II */
65986 spin_lock_irqsave(&self->spinlock, flags);
65987 - self->open_count++;
65988 + local_inc(&self->open_count);
65989
65990 tty->driver_data = self;
65991 self->tty = tty;
65992 spin_unlock_irqrestore(&self->spinlock, flags);
65993
65994 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
65995 - self->line, self->open_count);
65996 + self->line, local_read(&self->open_count));
65997
65998 /* Not really used by us, but lets do it anyway */
65999 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
66000 @@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_
66001 return;
66002 }
66003
66004 - if ((tty->count == 1) && (self->open_count != 1)) {
66005 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
66006 /*
66007 * Uh, oh. tty->count is 1, which means that the tty
66008 * structure will be freed. state->count should always
66009 @@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_
66010 */
66011 IRDA_DEBUG(0, "%s(), bad serial port count; "
66012 "tty->count is 1, state->count is %d\n", __func__ ,
66013 - self->open_count);
66014 - self->open_count = 1;
66015 + local_read(&self->open_count));
66016 + local_set(&self->open_count, 1);
66017 }
66018
66019 - if (--self->open_count < 0) {
66020 + if (local_dec_return(&self->open_count) < 0) {
66021 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
66022 - __func__, self->line, self->open_count);
66023 - self->open_count = 0;
66024 + __func__, self->line, local_read(&self->open_count));
66025 + local_set(&self->open_count, 0);
66026 }
66027 - if (self->open_count) {
66028 + if (local_read(&self->open_count)) {
66029 spin_unlock_irqrestore(&self->spinlock, flags);
66030
66031 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
66032 @@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_
66033 tty->closing = 0;
66034 self->tty = NULL;
66035
66036 - if (self->blocked_open) {
66037 + if (local_read(&self->blocked_open)) {
66038 if (self->close_delay)
66039 schedule_timeout_interruptible(self->close_delay);
66040 wake_up_interruptible(&self->open_wait);
66041 @@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty
66042 spin_lock_irqsave(&self->spinlock, flags);
66043 self->flags &= ~ASYNC_NORMAL_ACTIVE;
66044 self->tty = NULL;
66045 - self->open_count = 0;
66046 + local_set(&self->open_count, 0);
66047 spin_unlock_irqrestore(&self->spinlock, flags);
66048
66049 wake_up_interruptible(&self->open_wait);
66050 @@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct
66051 seq_putc(m, '\n');
66052
66053 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
66054 - seq_printf(m, "Open count: %d\n", self->open_count);
66055 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
66056 seq_printf(m, "Max data size: %d\n", self->max_data_size);
66057 seq_printf(m, "Max header size: %d\n", self->max_header_size);
66058
66059 diff -urNp linux-3.0.3/net/iucv/af_iucv.c linux-3.0.3/net/iucv/af_iucv.c
66060 --- linux-3.0.3/net/iucv/af_iucv.c 2011-07-21 22:17:23.000000000 -0400
66061 +++ linux-3.0.3/net/iucv/af_iucv.c 2011-08-23 21:47:56.000000000 -0400
66062 @@ -648,10 +648,10 @@ static int iucv_sock_autobind(struct soc
66063
66064 write_lock_bh(&iucv_sk_list.lock);
66065
66066 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
66067 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
66068 while (__iucv_get_sock_by_name(name)) {
66069 sprintf(name, "%08x",
66070 - atomic_inc_return(&iucv_sk_list.autobind_name));
66071 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
66072 }
66073
66074 write_unlock_bh(&iucv_sk_list.lock);
66075 diff -urNp linux-3.0.3/net/key/af_key.c linux-3.0.3/net/key/af_key.c
66076 --- linux-3.0.3/net/key/af_key.c 2011-07-21 22:17:23.000000000 -0400
66077 +++ linux-3.0.3/net/key/af_key.c 2011-08-23 21:48:14.000000000 -0400
66078 @@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
66079 struct xfrm_migrate m[XFRM_MAX_DEPTH];
66080 struct xfrm_kmaddress k;
66081
66082 + pax_track_stack();
66083 +
66084 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
66085 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
66086 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
66087 @@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
66088 static u32 get_acqseq(void)
66089 {
66090 u32 res;
66091 - static atomic_t acqseq;
66092 + static atomic_unchecked_t acqseq;
66093
66094 do {
66095 - res = atomic_inc_return(&acqseq);
66096 + res = atomic_inc_return_unchecked(&acqseq);
66097 } while (!res);
66098 return res;
66099 }
66100 diff -urNp linux-3.0.3/net/lapb/lapb_iface.c linux-3.0.3/net/lapb/lapb_iface.c
66101 --- linux-3.0.3/net/lapb/lapb_iface.c 2011-07-21 22:17:23.000000000 -0400
66102 +++ linux-3.0.3/net/lapb/lapb_iface.c 2011-08-23 21:47:56.000000000 -0400
66103 @@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
66104 goto out;
66105
66106 lapb->dev = dev;
66107 - lapb->callbacks = *callbacks;
66108 + lapb->callbacks = callbacks;
66109
66110 __lapb_insert_cb(lapb);
66111
66112 @@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
66113
66114 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
66115 {
66116 - if (lapb->callbacks.connect_confirmation)
66117 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
66118 + if (lapb->callbacks->connect_confirmation)
66119 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
66120 }
66121
66122 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
66123 {
66124 - if (lapb->callbacks.connect_indication)
66125 - lapb->callbacks.connect_indication(lapb->dev, reason);
66126 + if (lapb->callbacks->connect_indication)
66127 + lapb->callbacks->connect_indication(lapb->dev, reason);
66128 }
66129
66130 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
66131 {
66132 - if (lapb->callbacks.disconnect_confirmation)
66133 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
66134 + if (lapb->callbacks->disconnect_confirmation)
66135 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
66136 }
66137
66138 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
66139 {
66140 - if (lapb->callbacks.disconnect_indication)
66141 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
66142 + if (lapb->callbacks->disconnect_indication)
66143 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
66144 }
66145
66146 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
66147 {
66148 - if (lapb->callbacks.data_indication)
66149 - return lapb->callbacks.data_indication(lapb->dev, skb);
66150 + if (lapb->callbacks->data_indication)
66151 + return lapb->callbacks->data_indication(lapb->dev, skb);
66152
66153 kfree_skb(skb);
66154 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
66155 @@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
66156 {
66157 int used = 0;
66158
66159 - if (lapb->callbacks.data_transmit) {
66160 - lapb->callbacks.data_transmit(lapb->dev, skb);
66161 + if (lapb->callbacks->data_transmit) {
66162 + lapb->callbacks->data_transmit(lapb->dev, skb);
66163 used = 1;
66164 }
66165
66166 diff -urNp linux-3.0.3/net/mac80211/debugfs_sta.c linux-3.0.3/net/mac80211/debugfs_sta.c
66167 --- linux-3.0.3/net/mac80211/debugfs_sta.c 2011-07-21 22:17:23.000000000 -0400
66168 +++ linux-3.0.3/net/mac80211/debugfs_sta.c 2011-08-23 21:48:14.000000000 -0400
66169 @@ -140,6 +140,8 @@ static ssize_t sta_agg_status_read(struc
66170 struct tid_ampdu_rx *tid_rx;
66171 struct tid_ampdu_tx *tid_tx;
66172
66173 + pax_track_stack();
66174 +
66175 rcu_read_lock();
66176
66177 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
66178 @@ -240,6 +242,8 @@ static ssize_t sta_ht_capa_read(struct f
66179 struct sta_info *sta = file->private_data;
66180 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
66181
66182 + pax_track_stack();
66183 +
66184 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
66185 htc->ht_supported ? "" : "not ");
66186 if (htc->ht_supported) {
66187 diff -urNp linux-3.0.3/net/mac80211/ieee80211_i.h linux-3.0.3/net/mac80211/ieee80211_i.h
66188 --- linux-3.0.3/net/mac80211/ieee80211_i.h 2011-07-21 22:17:23.000000000 -0400
66189 +++ linux-3.0.3/net/mac80211/ieee80211_i.h 2011-08-23 21:47:56.000000000 -0400
66190 @@ -27,6 +27,7 @@
66191 #include <net/ieee80211_radiotap.h>
66192 #include <net/cfg80211.h>
66193 #include <net/mac80211.h>
66194 +#include <asm/local.h>
66195 #include "key.h"
66196 #include "sta_info.h"
66197
66198 @@ -721,7 +722,7 @@ struct ieee80211_local {
66199 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
66200 spinlock_t queue_stop_reason_lock;
66201
66202 - int open_count;
66203 + local_t open_count;
66204 int monitors, cooked_mntrs;
66205 /* number of interfaces with corresponding FIF_ flags */
66206 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
66207 diff -urNp linux-3.0.3/net/mac80211/iface.c linux-3.0.3/net/mac80211/iface.c
66208 --- linux-3.0.3/net/mac80211/iface.c 2011-08-23 21:44:40.000000000 -0400
66209 +++ linux-3.0.3/net/mac80211/iface.c 2011-08-23 21:47:56.000000000 -0400
66210 @@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
66211 break;
66212 }
66213
66214 - if (local->open_count == 0) {
66215 + if (local_read(&local->open_count) == 0) {
66216 res = drv_start(local);
66217 if (res)
66218 goto err_del_bss;
66219 @@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
66220 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
66221
66222 if (!is_valid_ether_addr(dev->dev_addr)) {
66223 - if (!local->open_count)
66224 + if (!local_read(&local->open_count))
66225 drv_stop(local);
66226 return -EADDRNOTAVAIL;
66227 }
66228 @@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
66229 mutex_unlock(&local->mtx);
66230
66231 if (coming_up)
66232 - local->open_count++;
66233 + local_inc(&local->open_count);
66234
66235 if (hw_reconf_flags) {
66236 ieee80211_hw_config(local, hw_reconf_flags);
66237 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
66238 err_del_interface:
66239 drv_remove_interface(local, &sdata->vif);
66240 err_stop:
66241 - if (!local->open_count)
66242 + if (!local_read(&local->open_count))
66243 drv_stop(local);
66244 err_del_bss:
66245 sdata->bss = NULL;
66246 @@ -475,7 +475,7 @@ static void ieee80211_do_stop(struct iee
66247 }
66248
66249 if (going_down)
66250 - local->open_count--;
66251 + local_dec(&local->open_count);
66252
66253 switch (sdata->vif.type) {
66254 case NL80211_IFTYPE_AP_VLAN:
66255 @@ -534,7 +534,7 @@ static void ieee80211_do_stop(struct iee
66256
66257 ieee80211_recalc_ps(local, -1);
66258
66259 - if (local->open_count == 0) {
66260 + if (local_read(&local->open_count) == 0) {
66261 if (local->ops->napi_poll)
66262 napi_disable(&local->napi);
66263 ieee80211_clear_tx_pending(local);
66264 diff -urNp linux-3.0.3/net/mac80211/main.c linux-3.0.3/net/mac80211/main.c
66265 --- linux-3.0.3/net/mac80211/main.c 2011-07-21 22:17:23.000000000 -0400
66266 +++ linux-3.0.3/net/mac80211/main.c 2011-08-23 21:47:56.000000000 -0400
66267 @@ -209,7 +209,7 @@ int ieee80211_hw_config(struct ieee80211
66268 local->hw.conf.power_level = power;
66269 }
66270
66271 - if (changed && local->open_count) {
66272 + if (changed && local_read(&local->open_count)) {
66273 ret = drv_config(local, changed);
66274 /*
66275 * Goal:
66276 diff -urNp linux-3.0.3/net/mac80211/mlme.c linux-3.0.3/net/mac80211/mlme.c
66277 --- linux-3.0.3/net/mac80211/mlme.c 2011-08-23 21:44:40.000000000 -0400
66278 +++ linux-3.0.3/net/mac80211/mlme.c 2011-08-23 21:48:14.000000000 -0400
66279 @@ -1444,6 +1444,8 @@ static bool ieee80211_assoc_success(stru
66280 bool have_higher_than_11mbit = false;
66281 u16 ap_ht_cap_flags;
66282
66283 + pax_track_stack();
66284 +
66285 /* AssocResp and ReassocResp have identical structure */
66286
66287 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
66288 diff -urNp linux-3.0.3/net/mac80211/pm.c linux-3.0.3/net/mac80211/pm.c
66289 --- linux-3.0.3/net/mac80211/pm.c 2011-07-21 22:17:23.000000000 -0400
66290 +++ linux-3.0.3/net/mac80211/pm.c 2011-08-23 21:47:56.000000000 -0400
66291 @@ -47,7 +47,7 @@ int __ieee80211_suspend(struct ieee80211
66292 cancel_work_sync(&local->dynamic_ps_enable_work);
66293 del_timer_sync(&local->dynamic_ps_timer);
66294
66295 - local->wowlan = wowlan && local->open_count;
66296 + local->wowlan = wowlan && local_read(&local->open_count);
66297 if (local->wowlan) {
66298 int err = drv_suspend(local, wowlan);
66299 if (err) {
66300 @@ -111,7 +111,7 @@ int __ieee80211_suspend(struct ieee80211
66301 }
66302
66303 /* stop hardware - this must stop RX */
66304 - if (local->open_count)
66305 + if (local_read(&local->open_count))
66306 ieee80211_stop_device(local);
66307
66308 suspend:
66309 diff -urNp linux-3.0.3/net/mac80211/rate.c linux-3.0.3/net/mac80211/rate.c
66310 --- linux-3.0.3/net/mac80211/rate.c 2011-07-21 22:17:23.000000000 -0400
66311 +++ linux-3.0.3/net/mac80211/rate.c 2011-08-23 21:47:56.000000000 -0400
66312 @@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
66313
66314 ASSERT_RTNL();
66315
66316 - if (local->open_count)
66317 + if (local_read(&local->open_count))
66318 return -EBUSY;
66319
66320 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
66321 diff -urNp linux-3.0.3/net/mac80211/rc80211_pid_debugfs.c linux-3.0.3/net/mac80211/rc80211_pid_debugfs.c
66322 --- linux-3.0.3/net/mac80211/rc80211_pid_debugfs.c 2011-07-21 22:17:23.000000000 -0400
66323 +++ linux-3.0.3/net/mac80211/rc80211_pid_debugfs.c 2011-08-23 21:47:56.000000000 -0400
66324 @@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
66325
66326 spin_unlock_irqrestore(&events->lock, status);
66327
66328 - if (copy_to_user(buf, pb, p))
66329 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
66330 return -EFAULT;
66331
66332 return p;
66333 diff -urNp linux-3.0.3/net/mac80211/util.c linux-3.0.3/net/mac80211/util.c
66334 --- linux-3.0.3/net/mac80211/util.c 2011-07-21 22:17:23.000000000 -0400
66335 +++ linux-3.0.3/net/mac80211/util.c 2011-08-23 21:47:56.000000000 -0400
66336 @@ -1147,7 +1147,7 @@ int ieee80211_reconfig(struct ieee80211_
66337 #endif
66338
66339 /* restart hardware */
66340 - if (local->open_count) {
66341 + if (local_read(&local->open_count)) {
66342 /*
66343 * Upon resume hardware can sometimes be goofy due to
66344 * various platform / driver / bus issues, so restarting
66345 diff -urNp linux-3.0.3/net/netfilter/ipvs/ip_vs_conn.c linux-3.0.3/net/netfilter/ipvs/ip_vs_conn.c
66346 --- linux-3.0.3/net/netfilter/ipvs/ip_vs_conn.c 2011-07-21 22:17:23.000000000 -0400
66347 +++ linux-3.0.3/net/netfilter/ipvs/ip_vs_conn.c 2011-08-23 21:47:56.000000000 -0400
66348 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
66349 /* Increase the refcnt counter of the dest */
66350 atomic_inc(&dest->refcnt);
66351
66352 - conn_flags = atomic_read(&dest->conn_flags);
66353 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
66354 if (cp->protocol != IPPROTO_UDP)
66355 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
66356 /* Bind with the destination and its corresponding transmitter */
66357 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
66358 atomic_set(&cp->refcnt, 1);
66359
66360 atomic_set(&cp->n_control, 0);
66361 - atomic_set(&cp->in_pkts, 0);
66362 + atomic_set_unchecked(&cp->in_pkts, 0);
66363
66364 atomic_inc(&ipvs->conn_count);
66365 if (flags & IP_VS_CONN_F_NO_CPORT)
66366 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
66367
66368 /* Don't drop the entry if its number of incoming packets is not
66369 located in [0, 8] */
66370 - i = atomic_read(&cp->in_pkts);
66371 + i = atomic_read_unchecked(&cp->in_pkts);
66372 if (i > 8 || i < 0) return 0;
66373
66374 if (!todrop_rate[i]) return 0;
66375 diff -urNp linux-3.0.3/net/netfilter/ipvs/ip_vs_core.c linux-3.0.3/net/netfilter/ipvs/ip_vs_core.c
66376 --- linux-3.0.3/net/netfilter/ipvs/ip_vs_core.c 2011-07-21 22:17:23.000000000 -0400
66377 +++ linux-3.0.3/net/netfilter/ipvs/ip_vs_core.c 2011-08-23 21:47:56.000000000 -0400
66378 @@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
66379 ret = cp->packet_xmit(skb, cp, pd->pp);
66380 /* do not touch skb anymore */
66381
66382 - atomic_inc(&cp->in_pkts);
66383 + atomic_inc_unchecked(&cp->in_pkts);
66384 ip_vs_conn_put(cp);
66385 return ret;
66386 }
66387 @@ -1613,7 +1613,7 @@ ip_vs_in(unsigned int hooknum, struct sk
66388 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
66389 pkts = sysctl_sync_threshold(ipvs);
66390 else
66391 - pkts = atomic_add_return(1, &cp->in_pkts);
66392 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
66393
66394 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
66395 cp->protocol == IPPROTO_SCTP) {
66396 diff -urNp linux-3.0.3/net/netfilter/ipvs/ip_vs_ctl.c linux-3.0.3/net/netfilter/ipvs/ip_vs_ctl.c
66397 --- linux-3.0.3/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-23 21:44:40.000000000 -0400
66398 +++ linux-3.0.3/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-23 21:48:14.000000000 -0400
66399 @@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
66400 ip_vs_rs_hash(ipvs, dest);
66401 write_unlock_bh(&ipvs->rs_lock);
66402 }
66403 - atomic_set(&dest->conn_flags, conn_flags);
66404 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
66405
66406 /* bind the service */
66407 if (!dest->svc) {
66408 @@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
66409 " %-7s %-6d %-10d %-10d\n",
66410 &dest->addr.in6,
66411 ntohs(dest->port),
66412 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
66413 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
66414 atomic_read(&dest->weight),
66415 atomic_read(&dest->activeconns),
66416 atomic_read(&dest->inactconns));
66417 @@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
66418 "%-7s %-6d %-10d %-10d\n",
66419 ntohl(dest->addr.ip),
66420 ntohs(dest->port),
66421 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
66422 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
66423 atomic_read(&dest->weight),
66424 atomic_read(&dest->activeconns),
66425 atomic_read(&dest->inactconns));
66426 @@ -2284,6 +2284,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
66427 struct ip_vs_dest_user *udest_compat;
66428 struct ip_vs_dest_user_kern udest;
66429
66430 + pax_track_stack();
66431 +
66432 if (!capable(CAP_NET_ADMIN))
66433 return -EPERM;
66434
66435 @@ -2498,7 +2500,7 @@ __ip_vs_get_dest_entries(struct net *net
66436
66437 entry.addr = dest->addr.ip;
66438 entry.port = dest->port;
66439 - entry.conn_flags = atomic_read(&dest->conn_flags);
66440 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
66441 entry.weight = atomic_read(&dest->weight);
66442 entry.u_threshold = dest->u_threshold;
66443 entry.l_threshold = dest->l_threshold;
66444 @@ -3026,7 +3028,7 @@ static int ip_vs_genl_fill_dest(struct s
66445 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
66446
66447 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
66448 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
66449 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
66450 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
66451 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
66452 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
66453 diff -urNp linux-3.0.3/net/netfilter/ipvs/ip_vs_sync.c linux-3.0.3/net/netfilter/ipvs/ip_vs_sync.c
66454 --- linux-3.0.3/net/netfilter/ipvs/ip_vs_sync.c 2011-07-21 22:17:23.000000000 -0400
66455 +++ linux-3.0.3/net/netfilter/ipvs/ip_vs_sync.c 2011-08-23 21:47:56.000000000 -0400
66456 @@ -648,7 +648,7 @@ control:
66457 * i.e only increment in_pkts for Templates.
66458 */
66459 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
66460 - int pkts = atomic_add_return(1, &cp->in_pkts);
66461 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
66462
66463 if (pkts % sysctl_sync_period(ipvs) != 1)
66464 return;
66465 @@ -794,7 +794,7 @@ static void ip_vs_proc_conn(struct net *
66466
66467 if (opt)
66468 memcpy(&cp->in_seq, opt, sizeof(*opt));
66469 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
66470 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
66471 cp->state = state;
66472 cp->old_state = cp->state;
66473 /*
66474 diff -urNp linux-3.0.3/net/netfilter/ipvs/ip_vs_xmit.c linux-3.0.3/net/netfilter/ipvs/ip_vs_xmit.c
66475 --- linux-3.0.3/net/netfilter/ipvs/ip_vs_xmit.c 2011-07-21 22:17:23.000000000 -0400
66476 +++ linux-3.0.3/net/netfilter/ipvs/ip_vs_xmit.c 2011-08-23 21:47:56.000000000 -0400
66477 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
66478 else
66479 rc = NF_ACCEPT;
66480 /* do not touch skb anymore */
66481 - atomic_inc(&cp->in_pkts);
66482 + atomic_inc_unchecked(&cp->in_pkts);
66483 goto out;
66484 }
66485
66486 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
66487 else
66488 rc = NF_ACCEPT;
66489 /* do not touch skb anymore */
66490 - atomic_inc(&cp->in_pkts);
66491 + atomic_inc_unchecked(&cp->in_pkts);
66492 goto out;
66493 }
66494
66495 diff -urNp linux-3.0.3/net/netfilter/Kconfig linux-3.0.3/net/netfilter/Kconfig
66496 --- linux-3.0.3/net/netfilter/Kconfig 2011-07-21 22:17:23.000000000 -0400
66497 +++ linux-3.0.3/net/netfilter/Kconfig 2011-08-23 21:48:14.000000000 -0400
66498 @@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
66499
66500 To compile it as a module, choose M here. If unsure, say N.
66501
66502 +config NETFILTER_XT_MATCH_GRADM
66503 + tristate '"gradm" match support'
66504 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
66505 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
66506 + ---help---
66507 + The gradm match allows to match on grsecurity RBAC being enabled.
66508 + It is useful when iptables rules are applied early on bootup to
66509 + prevent connections to the machine (except from a trusted host)
66510 + while the RBAC system is disabled.
66511 +
66512 config NETFILTER_XT_MATCH_HASHLIMIT
66513 tristate '"hashlimit" match support'
66514 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
66515 diff -urNp linux-3.0.3/net/netfilter/Makefile linux-3.0.3/net/netfilter/Makefile
66516 --- linux-3.0.3/net/netfilter/Makefile 2011-07-21 22:17:23.000000000 -0400
66517 +++ linux-3.0.3/net/netfilter/Makefile 2011-08-23 21:48:14.000000000 -0400
66518 @@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
66519 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
66520 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
66521 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
66522 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
66523 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
66524 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
66525 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
66526 diff -urNp linux-3.0.3/net/netfilter/nfnetlink_log.c linux-3.0.3/net/netfilter/nfnetlink_log.c
66527 --- linux-3.0.3/net/netfilter/nfnetlink_log.c 2011-07-21 22:17:23.000000000 -0400
66528 +++ linux-3.0.3/net/netfilter/nfnetlink_log.c 2011-08-23 21:47:56.000000000 -0400
66529 @@ -70,7 +70,7 @@ struct nfulnl_instance {
66530 };
66531
66532 static DEFINE_SPINLOCK(instances_lock);
66533 -static atomic_t global_seq;
66534 +static atomic_unchecked_t global_seq;
66535
66536 #define INSTANCE_BUCKETS 16
66537 static struct hlist_head instance_table[INSTANCE_BUCKETS];
66538 @@ -505,7 +505,7 @@ __build_packet_message(struct nfulnl_ins
66539 /* global sequence number */
66540 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
66541 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
66542 - htonl(atomic_inc_return(&global_seq)));
66543 + htonl(atomic_inc_return_unchecked(&global_seq)));
66544
66545 if (data_len) {
66546 struct nlattr *nla;
66547 diff -urNp linux-3.0.3/net/netfilter/nfnetlink_queue.c linux-3.0.3/net/netfilter/nfnetlink_queue.c
66548 --- linux-3.0.3/net/netfilter/nfnetlink_queue.c 2011-07-21 22:17:23.000000000 -0400
66549 +++ linux-3.0.3/net/netfilter/nfnetlink_queue.c 2011-08-23 21:47:56.000000000 -0400
66550 @@ -58,7 +58,7 @@ struct nfqnl_instance {
66551 */
66552 spinlock_t lock;
66553 unsigned int queue_total;
66554 - atomic_t id_sequence; /* 'sequence' of pkt ids */
66555 + atomic_unchecked_t id_sequence; /* 'sequence' of pkt ids */
66556 struct list_head queue_list; /* packets in queue */
66557 };
66558
66559 @@ -272,7 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_
66560 nfmsg->version = NFNETLINK_V0;
66561 nfmsg->res_id = htons(queue->queue_num);
66562
66563 - entry->id = atomic_inc_return(&queue->id_sequence);
66564 + entry->id = atomic_inc_return_unchecked(&queue->id_sequence);
66565 pmsg.packet_id = htonl(entry->id);
66566 pmsg.hw_protocol = entskb->protocol;
66567 pmsg.hook = entry->hook;
66568 @@ -870,7 +870,7 @@ static int seq_show(struct seq_file *s,
66569 inst->peer_pid, inst->queue_total,
66570 inst->copy_mode, inst->copy_range,
66571 inst->queue_dropped, inst->queue_user_dropped,
66572 - atomic_read(&inst->id_sequence), 1);
66573 + atomic_read_unchecked(&inst->id_sequence), 1);
66574 }
66575
66576 static const struct seq_operations nfqnl_seq_ops = {
66577 diff -urNp linux-3.0.3/net/netfilter/xt_gradm.c linux-3.0.3/net/netfilter/xt_gradm.c
66578 --- linux-3.0.3/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
66579 +++ linux-3.0.3/net/netfilter/xt_gradm.c 2011-08-23 21:48:14.000000000 -0400
66580 @@ -0,0 +1,51 @@
66581 +/*
66582 + * gradm match for netfilter
66583 + * Copyright © Zbigniew Krzystolik, 2010
66584 + *
66585 + * This program is free software; you can redistribute it and/or modify
66586 + * it under the terms of the GNU General Public License; either version
66587 + * 2 or 3 as published by the Free Software Foundation.
66588 + */
66589 +#include <linux/module.h>
66590 +#include <linux/moduleparam.h>
66591 +#include <linux/skbuff.h>
66592 +#include <linux/netfilter/x_tables.h>
66593 +#include <linux/grsecurity.h>
66594 +#include <linux/netfilter/xt_gradm.h>
66595 +
66596 +static bool
66597 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
66598 +{
66599 + const struct xt_gradm_mtinfo *info = par->matchinfo;
66600 + bool retval = false;
66601 + if (gr_acl_is_enabled())
66602 + retval = true;
66603 + return retval ^ info->invflags;
66604 +}
66605 +
66606 +static struct xt_match gradm_mt_reg __read_mostly = {
66607 + .name = "gradm",
66608 + .revision = 0,
66609 + .family = NFPROTO_UNSPEC,
66610 + .match = gradm_mt,
66611 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
66612 + .me = THIS_MODULE,
66613 +};
66614 +
66615 +static int __init gradm_mt_init(void)
66616 +{
66617 + return xt_register_match(&gradm_mt_reg);
66618 +}
66619 +
66620 +static void __exit gradm_mt_exit(void)
66621 +{
66622 + xt_unregister_match(&gradm_mt_reg);
66623 +}
66624 +
66625 +module_init(gradm_mt_init);
66626 +module_exit(gradm_mt_exit);
66627 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
66628 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
66629 +MODULE_LICENSE("GPL");
66630 +MODULE_ALIAS("ipt_gradm");
66631 +MODULE_ALIAS("ip6t_gradm");
66632 diff -urNp linux-3.0.3/net/netfilter/xt_statistic.c linux-3.0.3/net/netfilter/xt_statistic.c
66633 --- linux-3.0.3/net/netfilter/xt_statistic.c 2011-07-21 22:17:23.000000000 -0400
66634 +++ linux-3.0.3/net/netfilter/xt_statistic.c 2011-08-23 21:47:56.000000000 -0400
66635 @@ -18,7 +18,7 @@
66636 #include <linux/netfilter/x_tables.h>
66637
66638 struct xt_statistic_priv {
66639 - atomic_t count;
66640 + atomic_unchecked_t count;
66641 } ____cacheline_aligned_in_smp;
66642
66643 MODULE_LICENSE("GPL");
66644 @@ -41,9 +41,9 @@ statistic_mt(const struct sk_buff *skb,
66645 break;
66646 case XT_STATISTIC_MODE_NTH:
66647 do {
66648 - oval = atomic_read(&info->master->count);
66649 + oval = atomic_read_unchecked(&info->master->count);
66650 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
66651 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
66652 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
66653 if (nval == 0)
66654 ret = !ret;
66655 break;
66656 @@ -63,7 +63,7 @@ static int statistic_mt_check(const stru
66657 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
66658 if (info->master == NULL)
66659 return -ENOMEM;
66660 - atomic_set(&info->master->count, info->u.nth.count);
66661 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
66662
66663 return 0;
66664 }
66665 diff -urNp linux-3.0.3/net/netlink/af_netlink.c linux-3.0.3/net/netlink/af_netlink.c
66666 --- linux-3.0.3/net/netlink/af_netlink.c 2011-07-21 22:17:23.000000000 -0400
66667 +++ linux-3.0.3/net/netlink/af_netlink.c 2011-08-23 21:47:56.000000000 -0400
66668 @@ -742,7 +742,7 @@ static void netlink_overrun(struct sock
66669 sk->sk_error_report(sk);
66670 }
66671 }
66672 - atomic_inc(&sk->sk_drops);
66673 + atomic_inc_unchecked(&sk->sk_drops);
66674 }
66675
66676 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
66677 @@ -1994,7 +1994,7 @@ static int netlink_seq_show(struct seq_f
66678 sk_wmem_alloc_get(s),
66679 nlk->cb,
66680 atomic_read(&s->sk_refcnt),
66681 - atomic_read(&s->sk_drops),
66682 + atomic_read_unchecked(&s->sk_drops),
66683 sock_i_ino(s)
66684 );
66685
66686 diff -urNp linux-3.0.3/net/netrom/af_netrom.c linux-3.0.3/net/netrom/af_netrom.c
66687 --- linux-3.0.3/net/netrom/af_netrom.c 2011-07-21 22:17:23.000000000 -0400
66688 +++ linux-3.0.3/net/netrom/af_netrom.c 2011-08-23 21:48:14.000000000 -0400
66689 @@ -839,6 +839,7 @@ static int nr_getname(struct socket *soc
66690 struct sock *sk = sock->sk;
66691 struct nr_sock *nr = nr_sk(sk);
66692
66693 + memset(sax, 0, sizeof(*sax));
66694 lock_sock(sk);
66695 if (peer != 0) {
66696 if (sk->sk_state != TCP_ESTABLISHED) {
66697 @@ -853,7 +854,6 @@ static int nr_getname(struct socket *soc
66698 *uaddr_len = sizeof(struct full_sockaddr_ax25);
66699 } else {
66700 sax->fsa_ax25.sax25_family = AF_NETROM;
66701 - sax->fsa_ax25.sax25_ndigis = 0;
66702 sax->fsa_ax25.sax25_call = nr->source_addr;
66703 *uaddr_len = sizeof(struct sockaddr_ax25);
66704 }
66705 diff -urNp linux-3.0.3/net/packet/af_packet.c linux-3.0.3/net/packet/af_packet.c
66706 --- linux-3.0.3/net/packet/af_packet.c 2011-07-21 22:17:23.000000000 -0400
66707 +++ linux-3.0.3/net/packet/af_packet.c 2011-08-23 21:47:56.000000000 -0400
66708 @@ -647,14 +647,14 @@ static int packet_rcv(struct sk_buff *sk
66709
66710 spin_lock(&sk->sk_receive_queue.lock);
66711 po->stats.tp_packets++;
66712 - skb->dropcount = atomic_read(&sk->sk_drops);
66713 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
66714 __skb_queue_tail(&sk->sk_receive_queue, skb);
66715 spin_unlock(&sk->sk_receive_queue.lock);
66716 sk->sk_data_ready(sk, skb->len);
66717 return 0;
66718
66719 drop_n_acct:
66720 - po->stats.tp_drops = atomic_inc_return(&sk->sk_drops);
66721 + po->stats.tp_drops = atomic_inc_return_unchecked(&sk->sk_drops);
66722
66723 drop_n_restore:
66724 if (skb_head != skb->data && skb_shared(skb)) {
66725 @@ -2168,7 +2168,7 @@ static int packet_getsockopt(struct sock
66726 case PACKET_HDRLEN:
66727 if (len > sizeof(int))
66728 len = sizeof(int);
66729 - if (copy_from_user(&val, optval, len))
66730 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
66731 return -EFAULT;
66732 switch (val) {
66733 case TPACKET_V1:
66734 @@ -2206,7 +2206,7 @@ static int packet_getsockopt(struct sock
66735
66736 if (put_user(len, optlen))
66737 return -EFAULT;
66738 - if (copy_to_user(optval, data, len))
66739 + if (len > sizeof(st) || copy_to_user(optval, data, len))
66740 return -EFAULT;
66741 return 0;
66742 }
66743 diff -urNp linux-3.0.3/net/phonet/af_phonet.c linux-3.0.3/net/phonet/af_phonet.c
66744 --- linux-3.0.3/net/phonet/af_phonet.c 2011-07-21 22:17:23.000000000 -0400
66745 +++ linux-3.0.3/net/phonet/af_phonet.c 2011-08-23 21:48:14.000000000 -0400
66746 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_pr
66747 {
66748 struct phonet_protocol *pp;
66749
66750 - if (protocol >= PHONET_NPROTO)
66751 + if (protocol < 0 || protocol >= PHONET_NPROTO)
66752 return NULL;
66753
66754 rcu_read_lock();
66755 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_regist
66756 {
66757 int err = 0;
66758
66759 - if (protocol >= PHONET_NPROTO)
66760 + if (protocol < 0 || protocol >= PHONET_NPROTO)
66761 return -EINVAL;
66762
66763 err = proto_register(pp->prot, 1);
66764 diff -urNp linux-3.0.3/net/phonet/pep.c linux-3.0.3/net/phonet/pep.c
66765 --- linux-3.0.3/net/phonet/pep.c 2011-07-21 22:17:23.000000000 -0400
66766 +++ linux-3.0.3/net/phonet/pep.c 2011-08-23 21:47:56.000000000 -0400
66767 @@ -387,7 +387,7 @@ static int pipe_do_rcv(struct sock *sk,
66768
66769 case PNS_PEP_CTRL_REQ:
66770 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
66771 - atomic_inc(&sk->sk_drops);
66772 + atomic_inc_unchecked(&sk->sk_drops);
66773 break;
66774 }
66775 __skb_pull(skb, 4);
66776 @@ -408,7 +408,7 @@ static int pipe_do_rcv(struct sock *sk,
66777 }
66778
66779 if (pn->rx_credits == 0) {
66780 - atomic_inc(&sk->sk_drops);
66781 + atomic_inc_unchecked(&sk->sk_drops);
66782 err = -ENOBUFS;
66783 break;
66784 }
66785 @@ -556,7 +556,7 @@ static int pipe_handler_do_rcv(struct so
66786 }
66787
66788 if (pn->rx_credits == 0) {
66789 - atomic_inc(&sk->sk_drops);
66790 + atomic_inc_unchecked(&sk->sk_drops);
66791 err = NET_RX_DROP;
66792 break;
66793 }
66794 diff -urNp linux-3.0.3/net/phonet/socket.c linux-3.0.3/net/phonet/socket.c
66795 --- linux-3.0.3/net/phonet/socket.c 2011-07-21 22:17:23.000000000 -0400
66796 +++ linux-3.0.3/net/phonet/socket.c 2011-08-23 21:48:14.000000000 -0400
66797 @@ -612,8 +612,13 @@ static int pn_sock_seq_show(struct seq_f
66798 pn->resource, sk->sk_state,
66799 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
66800 sock_i_uid(sk), sock_i_ino(sk),
66801 - atomic_read(&sk->sk_refcnt), sk,
66802 - atomic_read(&sk->sk_drops), &len);
66803 + atomic_read(&sk->sk_refcnt),
66804 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66805 + NULL,
66806 +#else
66807 + sk,
66808 +#endif
66809 + atomic_read_unchecked(&sk->sk_drops), &len);
66810 }
66811 seq_printf(seq, "%*s\n", 127 - len, "");
66812 return 0;
66813 diff -urNp linux-3.0.3/net/rds/cong.c linux-3.0.3/net/rds/cong.c
66814 --- linux-3.0.3/net/rds/cong.c 2011-07-21 22:17:23.000000000 -0400
66815 +++ linux-3.0.3/net/rds/cong.c 2011-08-23 21:47:56.000000000 -0400
66816 @@ -77,7 +77,7 @@
66817 * finds that the saved generation number is smaller than the global generation
66818 * number, it wakes up the process.
66819 */
66820 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
66821 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
66822
66823 /*
66824 * Congestion monitoring
66825 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con
66826 rdsdebug("waking map %p for %pI4\n",
66827 map, &map->m_addr);
66828 rds_stats_inc(s_cong_update_received);
66829 - atomic_inc(&rds_cong_generation);
66830 + atomic_inc_unchecked(&rds_cong_generation);
66831 if (waitqueue_active(&map->m_waitq))
66832 wake_up(&map->m_waitq);
66833 if (waitqueue_active(&rds_poll_waitq))
66834 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
66835
66836 int rds_cong_updated_since(unsigned long *recent)
66837 {
66838 - unsigned long gen = atomic_read(&rds_cong_generation);
66839 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
66840
66841 if (likely(*recent == gen))
66842 return 0;
66843 diff -urNp linux-3.0.3/net/rds/ib_cm.c linux-3.0.3/net/rds/ib_cm.c
66844 --- linux-3.0.3/net/rds/ib_cm.c 2011-07-21 22:17:23.000000000 -0400
66845 +++ linux-3.0.3/net/rds/ib_cm.c 2011-08-23 21:47:56.000000000 -0400
66846 @@ -720,7 +720,7 @@ void rds_ib_conn_shutdown(struct rds_con
66847 /* Clear the ACK state */
66848 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
66849 #ifdef KERNEL_HAS_ATOMIC64
66850 - atomic64_set(&ic->i_ack_next, 0);
66851 + atomic64_set_unchecked(&ic->i_ack_next, 0);
66852 #else
66853 ic->i_ack_next = 0;
66854 #endif
66855 diff -urNp linux-3.0.3/net/rds/ib.h linux-3.0.3/net/rds/ib.h
66856 --- linux-3.0.3/net/rds/ib.h 2011-07-21 22:17:23.000000000 -0400
66857 +++ linux-3.0.3/net/rds/ib.h 2011-08-23 21:47:56.000000000 -0400
66858 @@ -127,7 +127,7 @@ struct rds_ib_connection {
66859 /* sending acks */
66860 unsigned long i_ack_flags;
66861 #ifdef KERNEL_HAS_ATOMIC64
66862 - atomic64_t i_ack_next; /* next ACK to send */
66863 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
66864 #else
66865 spinlock_t i_ack_lock; /* protect i_ack_next */
66866 u64 i_ack_next; /* next ACK to send */
66867 diff -urNp linux-3.0.3/net/rds/ib_recv.c linux-3.0.3/net/rds/ib_recv.c
66868 --- linux-3.0.3/net/rds/ib_recv.c 2011-07-21 22:17:23.000000000 -0400
66869 +++ linux-3.0.3/net/rds/ib_recv.c 2011-08-23 21:47:56.000000000 -0400
66870 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
66871 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
66872 int ack_required)
66873 {
66874 - atomic64_set(&ic->i_ack_next, seq);
66875 + atomic64_set_unchecked(&ic->i_ack_next, seq);
66876 if (ack_required) {
66877 smp_mb__before_clear_bit();
66878 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
66879 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
66880 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
66881 smp_mb__after_clear_bit();
66882
66883 - return atomic64_read(&ic->i_ack_next);
66884 + return atomic64_read_unchecked(&ic->i_ack_next);
66885 }
66886 #endif
66887
66888 diff -urNp linux-3.0.3/net/rds/iw_cm.c linux-3.0.3/net/rds/iw_cm.c
66889 --- linux-3.0.3/net/rds/iw_cm.c 2011-07-21 22:17:23.000000000 -0400
66890 +++ linux-3.0.3/net/rds/iw_cm.c 2011-08-23 21:47:56.000000000 -0400
66891 @@ -664,7 +664,7 @@ void rds_iw_conn_shutdown(struct rds_con
66892 /* Clear the ACK state */
66893 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
66894 #ifdef KERNEL_HAS_ATOMIC64
66895 - atomic64_set(&ic->i_ack_next, 0);
66896 + atomic64_set_unchecked(&ic->i_ack_next, 0);
66897 #else
66898 ic->i_ack_next = 0;
66899 #endif
66900 diff -urNp linux-3.0.3/net/rds/iw.h linux-3.0.3/net/rds/iw.h
66901 --- linux-3.0.3/net/rds/iw.h 2011-07-21 22:17:23.000000000 -0400
66902 +++ linux-3.0.3/net/rds/iw.h 2011-08-23 21:47:56.000000000 -0400
66903 @@ -133,7 +133,7 @@ struct rds_iw_connection {
66904 /* sending acks */
66905 unsigned long i_ack_flags;
66906 #ifdef KERNEL_HAS_ATOMIC64
66907 - atomic64_t i_ack_next; /* next ACK to send */
66908 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
66909 #else
66910 spinlock_t i_ack_lock; /* protect i_ack_next */
66911 u64 i_ack_next; /* next ACK to send */
66912 diff -urNp linux-3.0.3/net/rds/iw_rdma.c linux-3.0.3/net/rds/iw_rdma.c
66913 --- linux-3.0.3/net/rds/iw_rdma.c 2011-07-21 22:17:23.000000000 -0400
66914 +++ linux-3.0.3/net/rds/iw_rdma.c 2011-08-23 21:48:14.000000000 -0400
66915 @@ -182,6 +182,8 @@ int rds_iw_update_cm_id(struct rds_iw_de
66916 struct rdma_cm_id *pcm_id;
66917 int rc;
66918
66919 + pax_track_stack();
66920 +
66921 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
66922 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
66923
66924 diff -urNp linux-3.0.3/net/rds/iw_recv.c linux-3.0.3/net/rds/iw_recv.c
66925 --- linux-3.0.3/net/rds/iw_recv.c 2011-07-21 22:17:23.000000000 -0400
66926 +++ linux-3.0.3/net/rds/iw_recv.c 2011-08-23 21:47:56.000000000 -0400
66927 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
66928 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
66929 int ack_required)
66930 {
66931 - atomic64_set(&ic->i_ack_next, seq);
66932 + atomic64_set_unchecked(&ic->i_ack_next, seq);
66933 if (ack_required) {
66934 smp_mb__before_clear_bit();
66935 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
66936 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
66937 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
66938 smp_mb__after_clear_bit();
66939
66940 - return atomic64_read(&ic->i_ack_next);
66941 + return atomic64_read_unchecked(&ic->i_ack_next);
66942 }
66943 #endif
66944
66945 diff -urNp linux-3.0.3/net/rxrpc/af_rxrpc.c linux-3.0.3/net/rxrpc/af_rxrpc.c
66946 --- linux-3.0.3/net/rxrpc/af_rxrpc.c 2011-07-21 22:17:23.000000000 -0400
66947 +++ linux-3.0.3/net/rxrpc/af_rxrpc.c 2011-08-23 21:47:56.000000000 -0400
66948 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_
66949 __be32 rxrpc_epoch;
66950
66951 /* current debugging ID */
66952 -atomic_t rxrpc_debug_id;
66953 +atomic_unchecked_t rxrpc_debug_id;
66954
66955 /* count of skbs currently in use */
66956 atomic_t rxrpc_n_skbs;
66957 diff -urNp linux-3.0.3/net/rxrpc/ar-ack.c linux-3.0.3/net/rxrpc/ar-ack.c
66958 --- linux-3.0.3/net/rxrpc/ar-ack.c 2011-07-21 22:17:23.000000000 -0400
66959 +++ linux-3.0.3/net/rxrpc/ar-ack.c 2011-08-23 21:48:14.000000000 -0400
66960 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_ca
66961
66962 _enter("{%d,%d,%d,%d},",
66963 call->acks_hard, call->acks_unacked,
66964 - atomic_read(&call->sequence),
66965 + atomic_read_unchecked(&call->sequence),
66966 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
66967
66968 stop = 0;
66969 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_ca
66970
66971 /* each Tx packet has a new serial number */
66972 sp->hdr.serial =
66973 - htonl(atomic_inc_return(&call->conn->serial));
66974 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
66975
66976 hdr = (struct rxrpc_header *) txb->head;
66977 hdr->serial = sp->hdr.serial;
66978 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struc
66979 */
66980 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
66981 {
66982 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
66983 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
66984 }
66985
66986 /*
66987 @@ -629,7 +629,7 @@ process_further:
66988
66989 latest = ntohl(sp->hdr.serial);
66990 hard = ntohl(ack.firstPacket);
66991 - tx = atomic_read(&call->sequence);
66992 + tx = atomic_read_unchecked(&call->sequence);
66993
66994 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
66995 latest,
66996 @@ -842,6 +842,8 @@ void rxrpc_process_call(struct work_stru
66997 u32 abort_code = RX_PROTOCOL_ERROR;
66998 u8 *acks = NULL;
66999
67000 + pax_track_stack();
67001 +
67002 //printk("\n--------------------\n");
67003 _enter("{%d,%s,%lx} [%lu]",
67004 call->debug_id, rxrpc_call_states[call->state], call->events,
67005 @@ -1161,7 +1163,7 @@ void rxrpc_process_call(struct work_stru
67006 goto maybe_reschedule;
67007
67008 send_ACK_with_skew:
67009 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
67010 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
67011 ntohl(ack.serial));
67012 send_ACK:
67013 mtu = call->conn->trans->peer->if_mtu;
67014 @@ -1173,7 +1175,7 @@ send_ACK:
67015 ackinfo.rxMTU = htonl(5692);
67016 ackinfo.jumbo_max = htonl(4);
67017
67018 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
67019 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
67020 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
67021 ntohl(hdr.serial),
67022 ntohs(ack.maxSkew),
67023 @@ -1191,7 +1193,7 @@ send_ACK:
67024 send_message:
67025 _debug("send message");
67026
67027 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
67028 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
67029 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
67030 send_message_2:
67031
67032 diff -urNp linux-3.0.3/net/rxrpc/ar-call.c linux-3.0.3/net/rxrpc/ar-call.c
67033 --- linux-3.0.3/net/rxrpc/ar-call.c 2011-07-21 22:17:23.000000000 -0400
67034 +++ linux-3.0.3/net/rxrpc/ar-call.c 2011-08-23 21:47:56.000000000 -0400
67035 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
67036 spin_lock_init(&call->lock);
67037 rwlock_init(&call->state_lock);
67038 atomic_set(&call->usage, 1);
67039 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
67040 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67041 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
67042
67043 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
67044 diff -urNp linux-3.0.3/net/rxrpc/ar-connection.c linux-3.0.3/net/rxrpc/ar-connection.c
67045 --- linux-3.0.3/net/rxrpc/ar-connection.c 2011-07-21 22:17:23.000000000 -0400
67046 +++ linux-3.0.3/net/rxrpc/ar-connection.c 2011-08-23 21:47:56.000000000 -0400
67047 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_al
67048 rwlock_init(&conn->lock);
67049 spin_lock_init(&conn->state_lock);
67050 atomic_set(&conn->usage, 1);
67051 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
67052 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67053 conn->avail_calls = RXRPC_MAXCALLS;
67054 conn->size_align = 4;
67055 conn->header_size = sizeof(struct rxrpc_header);
67056 diff -urNp linux-3.0.3/net/rxrpc/ar-connevent.c linux-3.0.3/net/rxrpc/ar-connevent.c
67057 --- linux-3.0.3/net/rxrpc/ar-connevent.c 2011-07-21 22:17:23.000000000 -0400
67058 +++ linux-3.0.3/net/rxrpc/ar-connevent.c 2011-08-23 21:47:56.000000000 -0400
67059 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
67060
67061 len = iov[0].iov_len + iov[1].iov_len;
67062
67063 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
67064 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
67065 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
67066
67067 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
67068 diff -urNp linux-3.0.3/net/rxrpc/ar-input.c linux-3.0.3/net/rxrpc/ar-input.c
67069 --- linux-3.0.3/net/rxrpc/ar-input.c 2011-07-21 22:17:23.000000000 -0400
67070 +++ linux-3.0.3/net/rxrpc/ar-input.c 2011-08-23 21:47:56.000000000 -0400
67071 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rx
67072 /* track the latest serial number on this connection for ACK packet
67073 * information */
67074 serial = ntohl(sp->hdr.serial);
67075 - hi_serial = atomic_read(&call->conn->hi_serial);
67076 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
67077 while (serial > hi_serial)
67078 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
67079 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
67080 serial);
67081
67082 /* request ACK generation for any ACK or DATA packet that requests
67083 diff -urNp linux-3.0.3/net/rxrpc/ar-internal.h linux-3.0.3/net/rxrpc/ar-internal.h
67084 --- linux-3.0.3/net/rxrpc/ar-internal.h 2011-07-21 22:17:23.000000000 -0400
67085 +++ linux-3.0.3/net/rxrpc/ar-internal.h 2011-08-23 21:47:56.000000000 -0400
67086 @@ -272,8 +272,8 @@ struct rxrpc_connection {
67087 int error; /* error code for local abort */
67088 int debug_id; /* debug ID for printks */
67089 unsigned call_counter; /* call ID counter */
67090 - atomic_t serial; /* packet serial number counter */
67091 - atomic_t hi_serial; /* highest serial number received */
67092 + atomic_unchecked_t serial; /* packet serial number counter */
67093 + atomic_unchecked_t hi_serial; /* highest serial number received */
67094 u8 avail_calls; /* number of calls available */
67095 u8 size_align; /* data size alignment (for security) */
67096 u8 header_size; /* rxrpc + security header size */
67097 @@ -346,7 +346,7 @@ struct rxrpc_call {
67098 spinlock_t lock;
67099 rwlock_t state_lock; /* lock for state transition */
67100 atomic_t usage;
67101 - atomic_t sequence; /* Tx data packet sequence counter */
67102 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
67103 u32 abort_code; /* local/remote abort code */
67104 enum { /* current state of call */
67105 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
67106 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
67107 */
67108 extern atomic_t rxrpc_n_skbs;
67109 extern __be32 rxrpc_epoch;
67110 -extern atomic_t rxrpc_debug_id;
67111 +extern atomic_unchecked_t rxrpc_debug_id;
67112 extern struct workqueue_struct *rxrpc_workqueue;
67113
67114 /*
67115 diff -urNp linux-3.0.3/net/rxrpc/ar-local.c linux-3.0.3/net/rxrpc/ar-local.c
67116 --- linux-3.0.3/net/rxrpc/ar-local.c 2011-07-21 22:17:23.000000000 -0400
67117 +++ linux-3.0.3/net/rxrpc/ar-local.c 2011-08-23 21:47:56.000000000 -0400
67118 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
67119 spin_lock_init(&local->lock);
67120 rwlock_init(&local->services_lock);
67121 atomic_set(&local->usage, 1);
67122 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
67123 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67124 memcpy(&local->srx, srx, sizeof(*srx));
67125 }
67126
67127 diff -urNp linux-3.0.3/net/rxrpc/ar-output.c linux-3.0.3/net/rxrpc/ar-output.c
67128 --- linux-3.0.3/net/rxrpc/ar-output.c 2011-07-21 22:17:23.000000000 -0400
67129 +++ linux-3.0.3/net/rxrpc/ar-output.c 2011-08-23 21:47:56.000000000 -0400
67130 @@ -681,9 +681,9 @@ static int rxrpc_send_data(struct kiocb
67131 sp->hdr.cid = call->cid;
67132 sp->hdr.callNumber = call->call_id;
67133 sp->hdr.seq =
67134 - htonl(atomic_inc_return(&call->sequence));
67135 + htonl(atomic_inc_return_unchecked(&call->sequence));
67136 sp->hdr.serial =
67137 - htonl(atomic_inc_return(&conn->serial));
67138 + htonl(atomic_inc_return_unchecked(&conn->serial));
67139 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
67140 sp->hdr.userStatus = 0;
67141 sp->hdr.securityIndex = conn->security_ix;
67142 diff -urNp linux-3.0.3/net/rxrpc/ar-peer.c linux-3.0.3/net/rxrpc/ar-peer.c
67143 --- linux-3.0.3/net/rxrpc/ar-peer.c 2011-07-21 22:17:23.000000000 -0400
67144 +++ linux-3.0.3/net/rxrpc/ar-peer.c 2011-08-23 21:47:56.000000000 -0400
67145 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
67146 INIT_LIST_HEAD(&peer->error_targets);
67147 spin_lock_init(&peer->lock);
67148 atomic_set(&peer->usage, 1);
67149 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
67150 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67151 memcpy(&peer->srx, srx, sizeof(*srx));
67152
67153 rxrpc_assess_MTU_size(peer);
67154 diff -urNp linux-3.0.3/net/rxrpc/ar-proc.c linux-3.0.3/net/rxrpc/ar-proc.c
67155 --- linux-3.0.3/net/rxrpc/ar-proc.c 2011-07-21 22:17:23.000000000 -0400
67156 +++ linux-3.0.3/net/rxrpc/ar-proc.c 2011-08-23 21:47:56.000000000 -0400
67157 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
67158 atomic_read(&conn->usage),
67159 rxrpc_conn_states[conn->state],
67160 key_serial(conn->key),
67161 - atomic_read(&conn->serial),
67162 - atomic_read(&conn->hi_serial));
67163 + atomic_read_unchecked(&conn->serial),
67164 + atomic_read_unchecked(&conn->hi_serial));
67165
67166 return 0;
67167 }
67168 diff -urNp linux-3.0.3/net/rxrpc/ar-transport.c linux-3.0.3/net/rxrpc/ar-transport.c
67169 --- linux-3.0.3/net/rxrpc/ar-transport.c 2011-07-21 22:17:23.000000000 -0400
67170 +++ linux-3.0.3/net/rxrpc/ar-transport.c 2011-08-23 21:47:56.000000000 -0400
67171 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_all
67172 spin_lock_init(&trans->client_lock);
67173 rwlock_init(&trans->conn_lock);
67174 atomic_set(&trans->usage, 1);
67175 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
67176 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67177
67178 if (peer->srx.transport.family == AF_INET) {
67179 switch (peer->srx.transport_type) {
67180 diff -urNp linux-3.0.3/net/rxrpc/rxkad.c linux-3.0.3/net/rxrpc/rxkad.c
67181 --- linux-3.0.3/net/rxrpc/rxkad.c 2011-07-21 22:17:23.000000000 -0400
67182 +++ linux-3.0.3/net/rxrpc/rxkad.c 2011-08-23 21:48:14.000000000 -0400
67183 @@ -211,6 +211,8 @@ static int rxkad_secure_packet_encrypt(c
67184 u16 check;
67185 int nsg;
67186
67187 + pax_track_stack();
67188 +
67189 sp = rxrpc_skb(skb);
67190
67191 _enter("");
67192 @@ -338,6 +340,8 @@ static int rxkad_verify_packet_auth(cons
67193 u16 check;
67194 int nsg;
67195
67196 + pax_track_stack();
67197 +
67198 _enter("");
67199
67200 sp = rxrpc_skb(skb);
67201 @@ -610,7 +614,7 @@ static int rxkad_issue_challenge(struct
67202
67203 len = iov[0].iov_len + iov[1].iov_len;
67204
67205 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
67206 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
67207 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
67208
67209 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
67210 @@ -660,7 +664,7 @@ static int rxkad_send_response(struct rx
67211
67212 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
67213
67214 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
67215 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
67216 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
67217
67218 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
67219 diff -urNp linux-3.0.3/net/sctp/proc.c linux-3.0.3/net/sctp/proc.c
67220 --- linux-3.0.3/net/sctp/proc.c 2011-07-21 22:17:23.000000000 -0400
67221 +++ linux-3.0.3/net/sctp/proc.c 2011-08-23 21:48:14.000000000 -0400
67222 @@ -318,7 +318,8 @@ static int sctp_assocs_seq_show(struct s
67223 seq_printf(seq,
67224 "%8pK %8pK %-3d %-3d %-2d %-4d "
67225 "%4d %8d %8d %7d %5lu %-5d %5d ",
67226 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
67227 + assoc, sk,
67228 + sctp_sk(sk)->type, sk->sk_state,
67229 assoc->state, hash,
67230 assoc->assoc_id,
67231 assoc->sndbuf_used,
67232 diff -urNp linux-3.0.3/net/sctp/socket.c linux-3.0.3/net/sctp/socket.c
67233 --- linux-3.0.3/net/sctp/socket.c 2011-07-21 22:17:23.000000000 -0400
67234 +++ linux-3.0.3/net/sctp/socket.c 2011-08-23 21:47:56.000000000 -0400
67235 @@ -4452,7 +4452,7 @@ static int sctp_getsockopt_peer_addrs(st
67236 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
67237 if (space_left < addrlen)
67238 return -ENOMEM;
67239 - if (copy_to_user(to, &temp, addrlen))
67240 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
67241 return -EFAULT;
67242 to += addrlen;
67243 cnt++;
67244 diff -urNp linux-3.0.3/net/socket.c linux-3.0.3/net/socket.c
67245 --- linux-3.0.3/net/socket.c 2011-08-23 21:44:40.000000000 -0400
67246 +++ linux-3.0.3/net/socket.c 2011-08-23 21:48:14.000000000 -0400
67247 @@ -88,6 +88,7 @@
67248 #include <linux/nsproxy.h>
67249 #include <linux/magic.h>
67250 #include <linux/slab.h>
67251 +#include <linux/in.h>
67252
67253 #include <asm/uaccess.h>
67254 #include <asm/unistd.h>
67255 @@ -105,6 +106,8 @@
67256 #include <linux/sockios.h>
67257 #include <linux/atalk.h>
67258
67259 +#include <linux/grsock.h>
67260 +
67261 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
67262 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
67263 unsigned long nr_segs, loff_t pos);
67264 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struc
67265 &sockfs_dentry_operations, SOCKFS_MAGIC);
67266 }
67267
67268 -static struct vfsmount *sock_mnt __read_mostly;
67269 +struct vfsmount *sock_mnt __read_mostly;
67270
67271 static struct file_system_type sock_fs_type = {
67272 .name = "sockfs",
67273 @@ -1187,6 +1190,8 @@ int __sock_create(struct net *net, int f
67274 return -EAFNOSUPPORT;
67275 if (type < 0 || type >= SOCK_MAX)
67276 return -EINVAL;
67277 + if (protocol < 0)
67278 + return -EINVAL;
67279
67280 /* Compatibility.
67281
67282 @@ -1319,6 +1324,16 @@ SYSCALL_DEFINE3(socket, int, family, int
67283 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
67284 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
67285
67286 + if(!gr_search_socket(family, type, protocol)) {
67287 + retval = -EACCES;
67288 + goto out;
67289 + }
67290 +
67291 + if (gr_handle_sock_all(family, type, protocol)) {
67292 + retval = -EACCES;
67293 + goto out;
67294 + }
67295 +
67296 retval = sock_create(family, type, protocol, &sock);
67297 if (retval < 0)
67298 goto out;
67299 @@ -1431,6 +1446,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
67300 if (sock) {
67301 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
67302 if (err >= 0) {
67303 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
67304 + err = -EACCES;
67305 + goto error;
67306 + }
67307 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
67308 + if (err)
67309 + goto error;
67310 +
67311 err = security_socket_bind(sock,
67312 (struct sockaddr *)&address,
67313 addrlen);
67314 @@ -1439,6 +1462,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
67315 (struct sockaddr *)
67316 &address, addrlen);
67317 }
67318 +error:
67319 fput_light(sock->file, fput_needed);
67320 }
67321 return err;
67322 @@ -1462,10 +1486,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
67323 if ((unsigned)backlog > somaxconn)
67324 backlog = somaxconn;
67325
67326 + if (gr_handle_sock_server_other(sock->sk)) {
67327 + err = -EPERM;
67328 + goto error;
67329 + }
67330 +
67331 + err = gr_search_listen(sock);
67332 + if (err)
67333 + goto error;
67334 +
67335 err = security_socket_listen(sock, backlog);
67336 if (!err)
67337 err = sock->ops->listen(sock, backlog);
67338
67339 +error:
67340 fput_light(sock->file, fput_needed);
67341 }
67342 return err;
67343 @@ -1509,6 +1543,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
67344 newsock->type = sock->type;
67345 newsock->ops = sock->ops;
67346
67347 + if (gr_handle_sock_server_other(sock->sk)) {
67348 + err = -EPERM;
67349 + sock_release(newsock);
67350 + goto out_put;
67351 + }
67352 +
67353 + err = gr_search_accept(sock);
67354 + if (err) {
67355 + sock_release(newsock);
67356 + goto out_put;
67357 + }
67358 +
67359 /*
67360 * We don't need try_module_get here, as the listening socket (sock)
67361 * has the protocol module (sock->ops->owner) held.
67362 @@ -1547,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
67363 fd_install(newfd, newfile);
67364 err = newfd;
67365
67366 + gr_attach_curr_ip(newsock->sk);
67367 +
67368 out_put:
67369 fput_light(sock->file, fput_needed);
67370 out:
67371 @@ -1579,6 +1627,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
67372 int, addrlen)
67373 {
67374 struct socket *sock;
67375 + struct sockaddr *sck;
67376 struct sockaddr_storage address;
67377 int err, fput_needed;
67378
67379 @@ -1589,6 +1638,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
67380 if (err < 0)
67381 goto out_put;
67382
67383 + sck = (struct sockaddr *)&address;
67384 +
67385 + if (gr_handle_sock_client(sck)) {
67386 + err = -EACCES;
67387 + goto out_put;
67388 + }
67389 +
67390 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
67391 + if (err)
67392 + goto out_put;
67393 +
67394 err =
67395 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
67396 if (err)
67397 @@ -1890,6 +1950,8 @@ static int __sys_sendmsg(struct socket *
67398 unsigned char *ctl_buf = ctl;
67399 int err, ctl_len, iov_size, total_len;
67400
67401 + pax_track_stack();
67402 +
67403 err = -EFAULT;
67404 if (MSG_CMSG_COMPAT & flags) {
67405 if (get_compat_msghdr(msg_sys, msg_compat))
67406 diff -urNp linux-3.0.3/net/sunrpc/sched.c linux-3.0.3/net/sunrpc/sched.c
67407 --- linux-3.0.3/net/sunrpc/sched.c 2011-07-21 22:17:23.000000000 -0400
67408 +++ linux-3.0.3/net/sunrpc/sched.c 2011-08-23 21:47:56.000000000 -0400
67409 @@ -234,9 +234,9 @@ static int rpc_wait_bit_killable(void *w
67410 #ifdef RPC_DEBUG
67411 static void rpc_task_set_debuginfo(struct rpc_task *task)
67412 {
67413 - static atomic_t rpc_pid;
67414 + static atomic_unchecked_t rpc_pid;
67415
67416 - task->tk_pid = atomic_inc_return(&rpc_pid);
67417 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
67418 }
67419 #else
67420 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
67421 diff -urNp linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma.c linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma.c
67422 --- linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma.c 2011-07-21 22:17:23.000000000 -0400
67423 +++ linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma.c 2011-08-23 21:47:56.000000000 -0400
67424 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCR
67425 static unsigned int min_max_inline = 4096;
67426 static unsigned int max_max_inline = 65536;
67427
67428 -atomic_t rdma_stat_recv;
67429 -atomic_t rdma_stat_read;
67430 -atomic_t rdma_stat_write;
67431 -atomic_t rdma_stat_sq_starve;
67432 -atomic_t rdma_stat_rq_starve;
67433 -atomic_t rdma_stat_rq_poll;
67434 -atomic_t rdma_stat_rq_prod;
67435 -atomic_t rdma_stat_sq_poll;
67436 -atomic_t rdma_stat_sq_prod;
67437 +atomic_unchecked_t rdma_stat_recv;
67438 +atomic_unchecked_t rdma_stat_read;
67439 +atomic_unchecked_t rdma_stat_write;
67440 +atomic_unchecked_t rdma_stat_sq_starve;
67441 +atomic_unchecked_t rdma_stat_rq_starve;
67442 +atomic_unchecked_t rdma_stat_rq_poll;
67443 +atomic_unchecked_t rdma_stat_rq_prod;
67444 +atomic_unchecked_t rdma_stat_sq_poll;
67445 +atomic_unchecked_t rdma_stat_sq_prod;
67446
67447 /* Temporary NFS request map and context caches */
67448 struct kmem_cache *svc_rdma_map_cachep;
67449 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *ta
67450 len -= *ppos;
67451 if (len > *lenp)
67452 len = *lenp;
67453 - if (len && copy_to_user(buffer, str_buf, len))
67454 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
67455 return -EFAULT;
67456 *lenp = len;
67457 *ppos += len;
67458 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] =
67459 {
67460 .procname = "rdma_stat_read",
67461 .data = &rdma_stat_read,
67462 - .maxlen = sizeof(atomic_t),
67463 + .maxlen = sizeof(atomic_unchecked_t),
67464 .mode = 0644,
67465 .proc_handler = read_reset_stat,
67466 },
67467 {
67468 .procname = "rdma_stat_recv",
67469 .data = &rdma_stat_recv,
67470 - .maxlen = sizeof(atomic_t),
67471 + .maxlen = sizeof(atomic_unchecked_t),
67472 .mode = 0644,
67473 .proc_handler = read_reset_stat,
67474 },
67475 {
67476 .procname = "rdma_stat_write",
67477 .data = &rdma_stat_write,
67478 - .maxlen = sizeof(atomic_t),
67479 + .maxlen = sizeof(atomic_unchecked_t),
67480 .mode = 0644,
67481 .proc_handler = read_reset_stat,
67482 },
67483 {
67484 .procname = "rdma_stat_sq_starve",
67485 .data = &rdma_stat_sq_starve,
67486 - .maxlen = sizeof(atomic_t),
67487 + .maxlen = sizeof(atomic_unchecked_t),
67488 .mode = 0644,
67489 .proc_handler = read_reset_stat,
67490 },
67491 {
67492 .procname = "rdma_stat_rq_starve",
67493 .data = &rdma_stat_rq_starve,
67494 - .maxlen = sizeof(atomic_t),
67495 + .maxlen = sizeof(atomic_unchecked_t),
67496 .mode = 0644,
67497 .proc_handler = read_reset_stat,
67498 },
67499 {
67500 .procname = "rdma_stat_rq_poll",
67501 .data = &rdma_stat_rq_poll,
67502 - .maxlen = sizeof(atomic_t),
67503 + .maxlen = sizeof(atomic_unchecked_t),
67504 .mode = 0644,
67505 .proc_handler = read_reset_stat,
67506 },
67507 {
67508 .procname = "rdma_stat_rq_prod",
67509 .data = &rdma_stat_rq_prod,
67510 - .maxlen = sizeof(atomic_t),
67511 + .maxlen = sizeof(atomic_unchecked_t),
67512 .mode = 0644,
67513 .proc_handler = read_reset_stat,
67514 },
67515 {
67516 .procname = "rdma_stat_sq_poll",
67517 .data = &rdma_stat_sq_poll,
67518 - .maxlen = sizeof(atomic_t),
67519 + .maxlen = sizeof(atomic_unchecked_t),
67520 .mode = 0644,
67521 .proc_handler = read_reset_stat,
67522 },
67523 {
67524 .procname = "rdma_stat_sq_prod",
67525 .data = &rdma_stat_sq_prod,
67526 - .maxlen = sizeof(atomic_t),
67527 + .maxlen = sizeof(atomic_unchecked_t),
67528 .mode = 0644,
67529 .proc_handler = read_reset_stat,
67530 },
67531 diff -urNp linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
67532 --- linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-07-21 22:17:23.000000000 -0400
67533 +++ linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-08-23 21:47:56.000000000 -0400
67534 @@ -499,7 +499,7 @@ next_sge:
67535 svc_rdma_put_context(ctxt, 0);
67536 goto out;
67537 }
67538 - atomic_inc(&rdma_stat_read);
67539 + atomic_inc_unchecked(&rdma_stat_read);
67540
67541 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
67542 chl_map->ch[ch_no].count -= read_wr.num_sge;
67543 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
67544 dto_q);
67545 list_del_init(&ctxt->dto_q);
67546 } else {
67547 - atomic_inc(&rdma_stat_rq_starve);
67548 + atomic_inc_unchecked(&rdma_stat_rq_starve);
67549 clear_bit(XPT_DATA, &xprt->xpt_flags);
67550 ctxt = NULL;
67551 }
67552 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
67553 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
67554 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
67555 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
67556 - atomic_inc(&rdma_stat_recv);
67557 + atomic_inc_unchecked(&rdma_stat_recv);
67558
67559 /* Build up the XDR from the receive buffers. */
67560 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
67561 diff -urNp linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_sendto.c
67562 --- linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-07-21 22:17:23.000000000 -0400
67563 +++ linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-08-23 21:47:56.000000000 -0400
67564 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdm
67565 write_wr.wr.rdma.remote_addr = to;
67566
67567 /* Post It */
67568 - atomic_inc(&rdma_stat_write);
67569 + atomic_inc_unchecked(&rdma_stat_write);
67570 if (svc_rdma_send(xprt, &write_wr))
67571 goto err;
67572 return 0;
67573 diff -urNp linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_transport.c
67574 --- linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-07-21 22:17:23.000000000 -0400
67575 +++ linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-08-23 21:47:56.000000000 -0400
67576 @@ -298,7 +298,7 @@ static void rq_cq_reap(struct svcxprt_rd
67577 return;
67578
67579 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
67580 - atomic_inc(&rdma_stat_rq_poll);
67581 + atomic_inc_unchecked(&rdma_stat_rq_poll);
67582
67583 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
67584 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
67585 @@ -320,7 +320,7 @@ static void rq_cq_reap(struct svcxprt_rd
67586 }
67587
67588 if (ctxt)
67589 - atomic_inc(&rdma_stat_rq_prod);
67590 + atomic_inc_unchecked(&rdma_stat_rq_prod);
67591
67592 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
67593 /*
67594 @@ -392,7 +392,7 @@ static void sq_cq_reap(struct svcxprt_rd
67595 return;
67596
67597 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
67598 - atomic_inc(&rdma_stat_sq_poll);
67599 + atomic_inc_unchecked(&rdma_stat_sq_poll);
67600 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
67601 if (wc.status != IB_WC_SUCCESS)
67602 /* Close the transport */
67603 @@ -410,7 +410,7 @@ static void sq_cq_reap(struct svcxprt_rd
67604 }
67605
67606 if (ctxt)
67607 - atomic_inc(&rdma_stat_sq_prod);
67608 + atomic_inc_unchecked(&rdma_stat_sq_prod);
67609 }
67610
67611 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
67612 @@ -1272,7 +1272,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
67613 spin_lock_bh(&xprt->sc_lock);
67614 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
67615 spin_unlock_bh(&xprt->sc_lock);
67616 - atomic_inc(&rdma_stat_sq_starve);
67617 + atomic_inc_unchecked(&rdma_stat_sq_starve);
67618
67619 /* See if we can opportunistically reap SQ WR to make room */
67620 sq_cq_reap(xprt);
67621 diff -urNp linux-3.0.3/net/sysctl_net.c linux-3.0.3/net/sysctl_net.c
67622 --- linux-3.0.3/net/sysctl_net.c 2011-07-21 22:17:23.000000000 -0400
67623 +++ linux-3.0.3/net/sysctl_net.c 2011-08-23 21:48:14.000000000 -0400
67624 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
67625 struct ctl_table *table)
67626 {
67627 /* Allow network administrator to have same access as root. */
67628 - if (capable(CAP_NET_ADMIN)) {
67629 + if (capable_nolog(CAP_NET_ADMIN)) {
67630 int mode = (table->mode >> 6) & 7;
67631 return (mode << 6) | (mode << 3) | mode;
67632 }
67633 diff -urNp linux-3.0.3/net/unix/af_unix.c linux-3.0.3/net/unix/af_unix.c
67634 --- linux-3.0.3/net/unix/af_unix.c 2011-07-21 22:17:23.000000000 -0400
67635 +++ linux-3.0.3/net/unix/af_unix.c 2011-08-23 21:48:14.000000000 -0400
67636 @@ -767,6 +767,12 @@ static struct sock *unix_find_other(stru
67637 err = -ECONNREFUSED;
67638 if (!S_ISSOCK(inode->i_mode))
67639 goto put_fail;
67640 +
67641 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
67642 + err = -EACCES;
67643 + goto put_fail;
67644 + }
67645 +
67646 u = unix_find_socket_byinode(inode);
67647 if (!u)
67648 goto put_fail;
67649 @@ -787,6 +793,13 @@ static struct sock *unix_find_other(stru
67650 if (u) {
67651 struct dentry *dentry;
67652 dentry = unix_sk(u)->dentry;
67653 +
67654 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
67655 + err = -EPERM;
67656 + sock_put(u);
67657 + goto fail;
67658 + }
67659 +
67660 if (dentry)
67661 touch_atime(unix_sk(u)->mnt, dentry);
67662 } else
67663 @@ -872,11 +885,18 @@ static int unix_bind(struct socket *sock
67664 err = security_path_mknod(&nd.path, dentry, mode, 0);
67665 if (err)
67666 goto out_mknod_drop_write;
67667 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
67668 + err = -EACCES;
67669 + goto out_mknod_drop_write;
67670 + }
67671 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
67672 out_mknod_drop_write:
67673 mnt_drop_write(nd.path.mnt);
67674 if (err)
67675 goto out_mknod_dput;
67676 +
67677 + gr_handle_create(dentry, nd.path.mnt);
67678 +
67679 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
67680 dput(nd.path.dentry);
67681 nd.path.dentry = dentry;
67682 diff -urNp linux-3.0.3/net/wireless/core.h linux-3.0.3/net/wireless/core.h
67683 --- linux-3.0.3/net/wireless/core.h 2011-07-21 22:17:23.000000000 -0400
67684 +++ linux-3.0.3/net/wireless/core.h 2011-08-23 21:47:56.000000000 -0400
67685 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
67686 struct mutex mtx;
67687
67688 /* rfkill support */
67689 - struct rfkill_ops rfkill_ops;
67690 + rfkill_ops_no_const rfkill_ops;
67691 struct rfkill *rfkill;
67692 struct work_struct rfkill_sync;
67693
67694 diff -urNp linux-3.0.3/net/wireless/wext-core.c linux-3.0.3/net/wireless/wext-core.c
67695 --- linux-3.0.3/net/wireless/wext-core.c 2011-07-21 22:17:23.000000000 -0400
67696 +++ linux-3.0.3/net/wireless/wext-core.c 2011-08-23 21:47:56.000000000 -0400
67697 @@ -746,8 +746,7 @@ static int ioctl_standard_iw_point(struc
67698 */
67699
67700 /* Support for very large requests */
67701 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
67702 - (user_length > descr->max_tokens)) {
67703 + if (user_length > descr->max_tokens) {
67704 /* Allow userspace to GET more than max so
67705 * we can support any size GET requests.
67706 * There is still a limit : -ENOMEM.
67707 @@ -784,22 +783,6 @@ static int ioctl_standard_iw_point(struc
67708 }
67709 }
67710
67711 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
67712 - /*
67713 - * If this is a GET, but not NOMAX, it means that the extra
67714 - * data is not bounded by userspace, but by max_tokens. Thus
67715 - * set the length to max_tokens. This matches the extra data
67716 - * allocation.
67717 - * The driver should fill it with the number of tokens it
67718 - * provided, and it may check iwp->length rather than having
67719 - * knowledge of max_tokens. If the driver doesn't change the
67720 - * iwp->length, this ioctl just copies back max_token tokens
67721 - * filled with zeroes. Hopefully the driver isn't claiming
67722 - * them to be valid data.
67723 - */
67724 - iwp->length = descr->max_tokens;
67725 - }
67726 -
67727 err = handler(dev, info, (union iwreq_data *) iwp, extra);
67728
67729 iwp->length += essid_compat;
67730 diff -urNp linux-3.0.3/net/xfrm/xfrm_policy.c linux-3.0.3/net/xfrm/xfrm_policy.c
67731 --- linux-3.0.3/net/xfrm/xfrm_policy.c 2011-07-21 22:17:23.000000000 -0400
67732 +++ linux-3.0.3/net/xfrm/xfrm_policy.c 2011-08-23 21:47:56.000000000 -0400
67733 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm
67734 {
67735 policy->walk.dead = 1;
67736
67737 - atomic_inc(&policy->genid);
67738 + atomic_inc_unchecked(&policy->genid);
67739
67740 if (del_timer(&policy->timer))
67741 xfrm_pol_put(policy);
67742 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct x
67743 hlist_add_head(&policy->bydst, chain);
67744 xfrm_pol_hold(policy);
67745 net->xfrm.policy_count[dir]++;
67746 - atomic_inc(&flow_cache_genid);
67747 + atomic_inc_unchecked(&flow_cache_genid);
67748 if (delpol)
67749 __xfrm_policy_unlink(delpol, dir);
67750 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
67751 @@ -1528,7 +1528,7 @@ free_dst:
67752 goto out;
67753 }
67754
67755 -static int inline
67756 +static inline int
67757 xfrm_dst_alloc_copy(void **target, const void *src, int size)
67758 {
67759 if (!*target) {
67760 @@ -1540,7 +1540,7 @@ xfrm_dst_alloc_copy(void **target, const
67761 return 0;
67762 }
67763
67764 -static int inline
67765 +static inline int
67766 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
67767 {
67768 #ifdef CONFIG_XFRM_SUB_POLICY
67769 @@ -1552,7 +1552,7 @@ xfrm_dst_update_parent(struct dst_entry
67770 #endif
67771 }
67772
67773 -static int inline
67774 +static inline int
67775 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
67776 {
67777 #ifdef CONFIG_XFRM_SUB_POLICY
67778 @@ -1646,7 +1646,7 @@ xfrm_resolve_and_create_bundle(struct xf
67779
67780 xdst->num_pols = num_pols;
67781 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
67782 - xdst->policy_genid = atomic_read(&pols[0]->genid);
67783 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
67784
67785 return xdst;
67786 }
67787 @@ -2333,7 +2333,7 @@ static int xfrm_bundle_ok(struct xfrm_ds
67788 if (xdst->xfrm_genid != dst->xfrm->genid)
67789 return 0;
67790 if (xdst->num_pols > 0 &&
67791 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
67792 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
67793 return 0;
67794
67795 mtu = dst_mtu(dst->child);
67796 @@ -2861,7 +2861,7 @@ static int xfrm_policy_migrate(struct xf
67797 sizeof(pol->xfrm_vec[i].saddr));
67798 pol->xfrm_vec[i].encap_family = mp->new_family;
67799 /* flush bundles */
67800 - atomic_inc(&pol->genid);
67801 + atomic_inc_unchecked(&pol->genid);
67802 }
67803 }
67804
67805 diff -urNp linux-3.0.3/net/xfrm/xfrm_user.c linux-3.0.3/net/xfrm/xfrm_user.c
67806 --- linux-3.0.3/net/xfrm/xfrm_user.c 2011-07-21 22:17:23.000000000 -0400
67807 +++ linux-3.0.3/net/xfrm/xfrm_user.c 2011-08-23 21:48:14.000000000 -0400
67808 @@ -1394,6 +1394,8 @@ static int copy_to_user_tmpl(struct xfrm
67809 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
67810 int i;
67811
67812 + pax_track_stack();
67813 +
67814 if (xp->xfrm_nr == 0)
67815 return 0;
67816
67817 @@ -2062,6 +2064,8 @@ static int xfrm_do_migrate(struct sk_buf
67818 int err;
67819 int n = 0;
67820
67821 + pax_track_stack();
67822 +
67823 if (attrs[XFRMA_MIGRATE] == NULL)
67824 return -EINVAL;
67825
67826 diff -urNp linux-3.0.3/scripts/basic/fixdep.c linux-3.0.3/scripts/basic/fixdep.c
67827 --- linux-3.0.3/scripts/basic/fixdep.c 2011-07-21 22:17:23.000000000 -0400
67828 +++ linux-3.0.3/scripts/basic/fixdep.c 2011-08-23 21:47:56.000000000 -0400
67829 @@ -235,9 +235,9 @@ static void use_config(const char *m, in
67830
67831 static void parse_config_file(const char *map, size_t len)
67832 {
67833 - const int *end = (const int *) (map + len);
67834 + const unsigned int *end = (const unsigned int *) (map + len);
67835 /* start at +1, so that p can never be < map */
67836 - const int *m = (const int *) map + 1;
67837 + const unsigned int *m = (const unsigned int *) map + 1;
67838 const char *p, *q;
67839
67840 for (; m < end; m++) {
67841 @@ -405,7 +405,7 @@ static void print_deps(void)
67842 static void traps(void)
67843 {
67844 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
67845 - int *p = (int *)test;
67846 + unsigned int *p = (unsigned int *)test;
67847
67848 if (*p != INT_CONF) {
67849 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
67850 diff -urNp linux-3.0.3/scripts/gcc-plugin.sh linux-3.0.3/scripts/gcc-plugin.sh
67851 --- linux-3.0.3/scripts/gcc-plugin.sh 1969-12-31 19:00:00.000000000 -0500
67852 +++ linux-3.0.3/scripts/gcc-plugin.sh 2011-08-23 21:47:56.000000000 -0400
67853 @@ -0,0 +1,2 @@
67854 +#!/bin/sh
67855 +echo "#include \"gcc-plugin.h\"" | $* -x c -shared - -o /dev/null -I`$* -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
67856 diff -urNp linux-3.0.3/scripts/Makefile.build linux-3.0.3/scripts/Makefile.build
67857 --- linux-3.0.3/scripts/Makefile.build 2011-07-21 22:17:23.000000000 -0400
67858 +++ linux-3.0.3/scripts/Makefile.build 2011-08-23 21:47:56.000000000 -0400
67859 @@ -109,7 +109,7 @@ endif
67860 endif
67861
67862 # Do not include host rules unless needed
67863 -ifneq ($(hostprogs-y)$(hostprogs-m),)
67864 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
67865 include scripts/Makefile.host
67866 endif
67867
67868 diff -urNp linux-3.0.3/scripts/Makefile.clean linux-3.0.3/scripts/Makefile.clean
67869 --- linux-3.0.3/scripts/Makefile.clean 2011-07-21 22:17:23.000000000 -0400
67870 +++ linux-3.0.3/scripts/Makefile.clean 2011-08-23 21:47:56.000000000 -0400
67871 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subd
67872 __clean-files := $(extra-y) $(always) \
67873 $(targets) $(clean-files) \
67874 $(host-progs) \
67875 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
67876 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
67877 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
67878
67879 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
67880
67881 diff -urNp linux-3.0.3/scripts/Makefile.host linux-3.0.3/scripts/Makefile.host
67882 --- linux-3.0.3/scripts/Makefile.host 2011-07-21 22:17:23.000000000 -0400
67883 +++ linux-3.0.3/scripts/Makefile.host 2011-08-23 21:47:56.000000000 -0400
67884 @@ -31,6 +31,7 @@
67885 # Note: Shared libraries consisting of C++ files are not supported
67886
67887 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
67888 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
67889
67890 # C code
67891 # Executables compiled from a single .c file
67892 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(hos
67893 # Shared libaries (only .c supported)
67894 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
67895 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
67896 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
67897 # Remove .so files from "xxx-objs"
67898 host-cobjs := $(filter-out %.so,$(host-cobjs))
67899
67900 diff -urNp linux-3.0.3/scripts/mod/file2alias.c linux-3.0.3/scripts/mod/file2alias.c
67901 --- linux-3.0.3/scripts/mod/file2alias.c 2011-07-21 22:17:23.000000000 -0400
67902 +++ linux-3.0.3/scripts/mod/file2alias.c 2011-08-23 21:47:56.000000000 -0400
67903 @@ -72,7 +72,7 @@ static void device_id_check(const char *
67904 unsigned long size, unsigned long id_size,
67905 void *symval)
67906 {
67907 - int i;
67908 + unsigned int i;
67909
67910 if (size % id_size || size < id_size) {
67911 if (cross_build != 0)
67912 @@ -102,7 +102,7 @@ static void device_id_check(const char *
67913 /* USB is special because the bcdDevice can be matched against a numeric range */
67914 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
67915 static void do_usb_entry(struct usb_device_id *id,
67916 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
67917 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
67918 unsigned char range_lo, unsigned char range_hi,
67919 unsigned char max, struct module *mod)
67920 {
67921 @@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *sy
67922 for (i = 0; i < count; i++) {
67923 const char *id = (char *)devs[i].id;
67924 char acpi_id[sizeof(devs[0].id)];
67925 - int j;
67926 + unsigned int j;
67927
67928 buf_printf(&mod->dev_table_buf,
67929 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
67930 @@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *sy
67931
67932 for (j = 0; j < PNP_MAX_DEVICES; j++) {
67933 const char *id = (char *)card->devs[j].id;
67934 - int i2, j2;
67935 + unsigned int i2, j2;
67936 int dup = 0;
67937
67938 if (!id[0])
67939 @@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *sy
67940 /* add an individual alias for every device entry */
67941 if (!dup) {
67942 char acpi_id[sizeof(card->devs[0].id)];
67943 - int k;
67944 + unsigned int k;
67945
67946 buf_printf(&mod->dev_table_buf,
67947 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
67948 @@ -786,7 +786,7 @@ static void dmi_ascii_filter(char *d, co
67949 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
67950 char *alias)
67951 {
67952 - int i, j;
67953 + unsigned int i, j;
67954
67955 sprintf(alias, "dmi*");
67956
67957 diff -urNp linux-3.0.3/scripts/mod/modpost.c linux-3.0.3/scripts/mod/modpost.c
67958 --- linux-3.0.3/scripts/mod/modpost.c 2011-07-21 22:17:23.000000000 -0400
67959 +++ linux-3.0.3/scripts/mod/modpost.c 2011-08-23 21:47:56.000000000 -0400
67960 @@ -892,6 +892,7 @@ enum mismatch {
67961 ANY_INIT_TO_ANY_EXIT,
67962 ANY_EXIT_TO_ANY_INIT,
67963 EXPORT_TO_INIT_EXIT,
67964 + DATA_TO_TEXT
67965 };
67966
67967 struct sectioncheck {
67968 @@ -1000,6 +1001,12 @@ const struct sectioncheck sectioncheck[]
67969 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
67970 .mismatch = EXPORT_TO_INIT_EXIT,
67971 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
67972 +},
67973 +/* Do not reference code from writable data */
67974 +{
67975 + .fromsec = { DATA_SECTIONS, NULL },
67976 + .tosec = { TEXT_SECTIONS, NULL },
67977 + .mismatch = DATA_TO_TEXT
67978 }
67979 };
67980
67981 @@ -1122,10 +1129,10 @@ static Elf_Sym *find_elf_symbol(struct e
67982 continue;
67983 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
67984 continue;
67985 - if (sym->st_value == addr)
67986 - return sym;
67987 /* Find a symbol nearby - addr are maybe negative */
67988 d = sym->st_value - addr;
67989 + if (d == 0)
67990 + return sym;
67991 if (d < 0)
67992 d = addr - sym->st_value;
67993 if (d < distance) {
67994 @@ -1404,6 +1411,14 @@ static void report_sec_mismatch(const ch
67995 tosym, prl_to, prl_to, tosym);
67996 free(prl_to);
67997 break;
67998 + case DATA_TO_TEXT:
67999 +/*
68000 + fprintf(stderr,
68001 + "The variable %s references\n"
68002 + "the %s %s%s%s\n",
68003 + fromsym, to, sec2annotation(tosec), tosym, to_p);
68004 +*/
68005 + break;
68006 }
68007 fprintf(stderr, "\n");
68008 }
68009 @@ -1629,7 +1644,7 @@ static void section_rel(const char *modn
68010 static void check_sec_ref(struct module *mod, const char *modname,
68011 struct elf_info *elf)
68012 {
68013 - int i;
68014 + unsigned int i;
68015 Elf_Shdr *sechdrs = elf->sechdrs;
68016
68017 /* Walk through all sections */
68018 @@ -1727,7 +1742,7 @@ void __attribute__((format(printf, 2, 3)
68019 va_end(ap);
68020 }
68021
68022 -void buf_write(struct buffer *buf, const char *s, int len)
68023 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
68024 {
68025 if (buf->size - buf->pos < len) {
68026 buf->size += len + SZ;
68027 @@ -1939,7 +1954,7 @@ static void write_if_changed(struct buff
68028 if (fstat(fileno(file), &st) < 0)
68029 goto close_write;
68030
68031 - if (st.st_size != b->pos)
68032 + if (st.st_size != (off_t)b->pos)
68033 goto close_write;
68034
68035 tmp = NOFAIL(malloc(b->pos));
68036 diff -urNp linux-3.0.3/scripts/mod/modpost.h linux-3.0.3/scripts/mod/modpost.h
68037 --- linux-3.0.3/scripts/mod/modpost.h 2011-07-21 22:17:23.000000000 -0400
68038 +++ linux-3.0.3/scripts/mod/modpost.h 2011-08-23 21:47:56.000000000 -0400
68039 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
68040
68041 struct buffer {
68042 char *p;
68043 - int pos;
68044 - int size;
68045 + unsigned int pos;
68046 + unsigned int size;
68047 };
68048
68049 void __attribute__((format(printf, 2, 3)))
68050 buf_printf(struct buffer *buf, const char *fmt, ...);
68051
68052 void
68053 -buf_write(struct buffer *buf, const char *s, int len);
68054 +buf_write(struct buffer *buf, const char *s, unsigned int len);
68055
68056 struct module {
68057 struct module *next;
68058 diff -urNp linux-3.0.3/scripts/mod/sumversion.c linux-3.0.3/scripts/mod/sumversion.c
68059 --- linux-3.0.3/scripts/mod/sumversion.c 2011-07-21 22:17:23.000000000 -0400
68060 +++ linux-3.0.3/scripts/mod/sumversion.c 2011-08-23 21:47:56.000000000 -0400
68061 @@ -470,7 +470,7 @@ static void write_version(const char *fi
68062 goto out;
68063 }
68064
68065 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
68066 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
68067 warn("writing sum in %s failed: %s\n",
68068 filename, strerror(errno));
68069 goto out;
68070 diff -urNp linux-3.0.3/scripts/pnmtologo.c linux-3.0.3/scripts/pnmtologo.c
68071 --- linux-3.0.3/scripts/pnmtologo.c 2011-07-21 22:17:23.000000000 -0400
68072 +++ linux-3.0.3/scripts/pnmtologo.c 2011-08-23 21:47:56.000000000 -0400
68073 @@ -237,14 +237,14 @@ static void write_header(void)
68074 fprintf(out, " * Linux logo %s\n", logoname);
68075 fputs(" */\n\n", out);
68076 fputs("#include <linux/linux_logo.h>\n\n", out);
68077 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
68078 + fprintf(out, "static unsigned char %s_data[] = {\n",
68079 logoname);
68080 }
68081
68082 static void write_footer(void)
68083 {
68084 fputs("\n};\n\n", out);
68085 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
68086 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
68087 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
68088 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
68089 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
68090 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
68091 fputs("\n};\n\n", out);
68092
68093 /* write logo clut */
68094 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
68095 + fprintf(out, "static unsigned char %s_clut[] = {\n",
68096 logoname);
68097 write_hex_cnt = 0;
68098 for (i = 0; i < logo_clutsize; i++) {
68099 diff -urNp linux-3.0.3/security/apparmor/lsm.c linux-3.0.3/security/apparmor/lsm.c
68100 --- linux-3.0.3/security/apparmor/lsm.c 2011-08-23 21:44:40.000000000 -0400
68101 +++ linux-3.0.3/security/apparmor/lsm.c 2011-08-23 21:48:14.000000000 -0400
68102 @@ -621,7 +621,7 @@ static int apparmor_task_setrlimit(struc
68103 return error;
68104 }
68105
68106 -static struct security_operations apparmor_ops = {
68107 +static struct security_operations apparmor_ops __read_only = {
68108 .name = "apparmor",
68109
68110 .ptrace_access_check = apparmor_ptrace_access_check,
68111 diff -urNp linux-3.0.3/security/commoncap.c linux-3.0.3/security/commoncap.c
68112 --- linux-3.0.3/security/commoncap.c 2011-07-21 22:17:23.000000000 -0400
68113 +++ linux-3.0.3/security/commoncap.c 2011-08-23 21:48:14.000000000 -0400
68114 @@ -28,6 +28,7 @@
68115 #include <linux/prctl.h>
68116 #include <linux/securebits.h>
68117 #include <linux/user_namespace.h>
68118 +#include <net/sock.h>
68119
68120 /*
68121 * If a non-root user executes a setuid-root binary in
68122 @@ -58,7 +59,7 @@ int cap_netlink_send(struct sock *sk, st
68123
68124 int cap_netlink_recv(struct sk_buff *skb, int cap)
68125 {
68126 - if (!cap_raised(current_cap(), cap))
68127 + if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap))
68128 return -EPERM;
68129 return 0;
68130 }
68131 @@ -575,6 +576,9 @@ int cap_bprm_secureexec(struct linux_bin
68132 {
68133 const struct cred *cred = current_cred();
68134
68135 + if (gr_acl_enable_at_secure())
68136 + return 1;
68137 +
68138 if (cred->uid != 0) {
68139 if (bprm->cap_effective)
68140 return 1;
68141 diff -urNp linux-3.0.3/security/integrity/ima/ima_api.c linux-3.0.3/security/integrity/ima/ima_api.c
68142 --- linux-3.0.3/security/integrity/ima/ima_api.c 2011-07-21 22:17:23.000000000 -0400
68143 +++ linux-3.0.3/security/integrity/ima/ima_api.c 2011-08-23 21:47:56.000000000 -0400
68144 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *ino
68145 int result;
68146
68147 /* can overflow, only indicator */
68148 - atomic_long_inc(&ima_htable.violations);
68149 + atomic_long_inc_unchecked(&ima_htable.violations);
68150
68151 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
68152 if (!entry) {
68153 diff -urNp linux-3.0.3/security/integrity/ima/ima_fs.c linux-3.0.3/security/integrity/ima/ima_fs.c
68154 --- linux-3.0.3/security/integrity/ima/ima_fs.c 2011-07-21 22:17:23.000000000 -0400
68155 +++ linux-3.0.3/security/integrity/ima/ima_fs.c 2011-08-23 21:47:56.000000000 -0400
68156 @@ -28,12 +28,12 @@
68157 static int valid_policy = 1;
68158 #define TMPBUFLEN 12
68159 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
68160 - loff_t *ppos, atomic_long_t *val)
68161 + loff_t *ppos, atomic_long_unchecked_t *val)
68162 {
68163 char tmpbuf[TMPBUFLEN];
68164 ssize_t len;
68165
68166 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
68167 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
68168 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
68169 }
68170
68171 diff -urNp linux-3.0.3/security/integrity/ima/ima.h linux-3.0.3/security/integrity/ima/ima.h
68172 --- linux-3.0.3/security/integrity/ima/ima.h 2011-07-21 22:17:23.000000000 -0400
68173 +++ linux-3.0.3/security/integrity/ima/ima.h 2011-08-23 21:47:56.000000000 -0400
68174 @@ -85,8 +85,8 @@ void ima_add_violation(struct inode *ino
68175 extern spinlock_t ima_queue_lock;
68176
68177 struct ima_h_table {
68178 - atomic_long_t len; /* number of stored measurements in the list */
68179 - atomic_long_t violations;
68180 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
68181 + atomic_long_unchecked_t violations;
68182 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
68183 };
68184 extern struct ima_h_table ima_htable;
68185 diff -urNp linux-3.0.3/security/integrity/ima/ima_queue.c linux-3.0.3/security/integrity/ima/ima_queue.c
68186 --- linux-3.0.3/security/integrity/ima/ima_queue.c 2011-07-21 22:17:23.000000000 -0400
68187 +++ linux-3.0.3/security/integrity/ima/ima_queue.c 2011-08-23 21:47:56.000000000 -0400
68188 @@ -79,7 +79,7 @@ static int ima_add_digest_entry(struct i
68189 INIT_LIST_HEAD(&qe->later);
68190 list_add_tail_rcu(&qe->later, &ima_measurements);
68191
68192 - atomic_long_inc(&ima_htable.len);
68193 + atomic_long_inc_unchecked(&ima_htable.len);
68194 key = ima_hash_key(entry->digest);
68195 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
68196 return 0;
68197 diff -urNp linux-3.0.3/security/Kconfig linux-3.0.3/security/Kconfig
68198 --- linux-3.0.3/security/Kconfig 2011-07-21 22:17:23.000000000 -0400
68199 +++ linux-3.0.3/security/Kconfig 2011-08-23 21:48:14.000000000 -0400
68200 @@ -4,6 +4,554 @@
68201
68202 menu "Security options"
68203
68204 +source grsecurity/Kconfig
68205 +
68206 +menu "PaX"
68207 +
68208 + config ARCH_TRACK_EXEC_LIMIT
68209 + bool
68210 +
68211 + config PAX_PER_CPU_PGD
68212 + bool
68213 +
68214 + config TASK_SIZE_MAX_SHIFT
68215 + int
68216 + depends on X86_64
68217 + default 47 if !PAX_PER_CPU_PGD
68218 + default 42 if PAX_PER_CPU_PGD
68219 +
68220 + config PAX_ENABLE_PAE
68221 + bool
68222 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
68223 +
68224 +config PAX
68225 + bool "Enable various PaX features"
68226 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
68227 + help
68228 + This allows you to enable various PaX features. PaX adds
68229 + intrusion prevention mechanisms to the kernel that reduce
68230 + the risks posed by exploitable memory corruption bugs.
68231 +
68232 +menu "PaX Control"
68233 + depends on PAX
68234 +
68235 +config PAX_SOFTMODE
68236 + bool 'Support soft mode'
68237 + select PAX_PT_PAX_FLAGS
68238 + help
68239 + Enabling this option will allow you to run PaX in soft mode, that
68240 + is, PaX features will not be enforced by default, only on executables
68241 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
68242 + is the only way to mark executables for soft mode use.
68243 +
68244 + Soft mode can be activated by using the "pax_softmode=1" kernel command
68245 + line option on boot. Furthermore you can control various PaX features
68246 + at runtime via the entries in /proc/sys/kernel/pax.
68247 +
68248 +config PAX_EI_PAX
68249 + bool 'Use legacy ELF header marking'
68250 + help
68251 + Enabling this option will allow you to control PaX features on
68252 + a per executable basis via the 'chpax' utility available at
68253 + http://pax.grsecurity.net/. The control flags will be read from
68254 + an otherwise reserved part of the ELF header. This marking has
68255 + numerous drawbacks (no support for soft-mode, toolchain does not
68256 + know about the non-standard use of the ELF header) therefore it
68257 + has been deprecated in favour of PT_PAX_FLAGS support.
68258 +
68259 + Note that if you enable PT_PAX_FLAGS marking support as well,
68260 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
68261 +
68262 +config PAX_PT_PAX_FLAGS
68263 + bool 'Use ELF program header marking'
68264 + help
68265 + Enabling this option will allow you to control PaX features on
68266 + a per executable basis via the 'paxctl' utility available at
68267 + http://pax.grsecurity.net/. The control flags will be read from
68268 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
68269 + has the benefits of supporting both soft mode and being fully
68270 + integrated into the toolchain (the binutils patch is available
68271 + from http://pax.grsecurity.net).
68272 +
68273 + If your toolchain does not support PT_PAX_FLAGS markings,
68274 + you can create one in most cases with 'paxctl -C'.
68275 +
68276 + Note that if you enable the legacy EI_PAX marking support as well,
68277 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
68278 +
68279 +choice
68280 + prompt 'MAC system integration'
68281 + default PAX_HAVE_ACL_FLAGS
68282 + help
68283 + Mandatory Access Control systems have the option of controlling
68284 + PaX flags on a per executable basis, choose the method supported
68285 + by your particular system.
68286 +
68287 + - "none": if your MAC system does not interact with PaX,
68288 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
68289 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
68290 +
68291 + NOTE: this option is for developers/integrators only.
68292 +
68293 + config PAX_NO_ACL_FLAGS
68294 + bool 'none'
68295 +
68296 + config PAX_HAVE_ACL_FLAGS
68297 + bool 'direct'
68298 +
68299 + config PAX_HOOK_ACL_FLAGS
68300 + bool 'hook'
68301 +endchoice
68302 +
68303 +endmenu
68304 +
68305 +menu "Non-executable pages"
68306 + depends on PAX
68307 +
68308 +config PAX_NOEXEC
68309 + bool "Enforce non-executable pages"
68310 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
68311 + help
68312 + By design some architectures do not allow for protecting memory
68313 + pages against execution or even if they do, Linux does not make
68314 + use of this feature. In practice this means that if a page is
68315 + readable (such as the stack or heap) it is also executable.
68316 +
68317 + There is a well known exploit technique that makes use of this
68318 + fact and a common programming mistake where an attacker can
68319 + introduce code of his choice somewhere in the attacked program's
68320 + memory (typically the stack or the heap) and then execute it.
68321 +
68322 + If the attacked program was running with different (typically
68323 + higher) privileges than that of the attacker, then he can elevate
68324 + his own privilege level (e.g. get a root shell, write to files for
68325 + which he does not have write access to, etc).
68326 +
68327 + Enabling this option will let you choose from various features
68328 + that prevent the injection and execution of 'foreign' code in
68329 + a program.
68330 +
68331 + This will also break programs that rely on the old behaviour and
68332 + expect that dynamically allocated memory via the malloc() family
68333 + of functions is executable (which it is not). Notable examples
68334 + are the XFree86 4.x server, the java runtime and wine.
68335 +
68336 +config PAX_PAGEEXEC
68337 + bool "Paging based non-executable pages"
68338 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
68339 + select S390_SWITCH_AMODE if S390
68340 + select S390_EXEC_PROTECT if S390
68341 + select ARCH_TRACK_EXEC_LIMIT if X86_32
68342 + help
68343 + This implementation is based on the paging feature of the CPU.
68344 + On i386 without hardware non-executable bit support there is a
68345 + variable but usually low performance impact, however on Intel's
68346 + P4 core based CPUs it is very high so you should not enable this
68347 + for kernels meant to be used on such CPUs.
68348 +
68349 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
68350 + with hardware non-executable bit support there is no performance
68351 + impact, on ppc the impact is negligible.
68352 +
68353 + Note that several architectures require various emulations due to
68354 + badly designed userland ABIs, this will cause a performance impact
68355 + but will disappear as soon as userland is fixed. For example, ppc
68356 + userland MUST have been built with secure-plt by a recent toolchain.
68357 +
68358 +config PAX_SEGMEXEC
68359 + bool "Segmentation based non-executable pages"
68360 + depends on PAX_NOEXEC && X86_32
68361 + help
68362 + This implementation is based on the segmentation feature of the
68363 + CPU and has a very small performance impact, however applications
68364 + will be limited to a 1.5 GB address space instead of the normal
68365 + 3 GB.
68366 +
68367 +config PAX_EMUTRAMP
68368 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
68369 + default y if PARISC
68370 + help
68371 + There are some programs and libraries that for one reason or
68372 + another attempt to execute special small code snippets from
68373 + non-executable memory pages. Most notable examples are the
68374 + signal handler return code generated by the kernel itself and
68375 + the GCC trampolines.
68376 +
68377 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
68378 + such programs will no longer work under your kernel.
68379 +
68380 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
68381 + utilities to enable trampoline emulation for the affected programs
68382 + yet still have the protection provided by the non-executable pages.
68383 +
68384 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
68385 + your system will not even boot.
68386 +
68387 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
68388 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
68389 + for the affected files.
68390 +
68391 + NOTE: enabling this feature *may* open up a loophole in the
68392 + protection provided by non-executable pages that an attacker
68393 + could abuse. Therefore the best solution is to not have any
68394 + files on your system that would require this option. This can
68395 + be achieved by not using libc5 (which relies on the kernel
68396 + signal handler return code) and not using or rewriting programs
68397 + that make use of the nested function implementation of GCC.
68398 + Skilled users can just fix GCC itself so that it implements
68399 + nested function calls in a way that does not interfere with PaX.
68400 +
68401 +config PAX_EMUSIGRT
68402 + bool "Automatically emulate sigreturn trampolines"
68403 + depends on PAX_EMUTRAMP && PARISC
68404 + default y
68405 + help
68406 + Enabling this option will have the kernel automatically detect
68407 + and emulate signal return trampolines executing on the stack
68408 + that would otherwise lead to task termination.
68409 +
68410 + This solution is intended as a temporary one for users with
68411 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
68412 + Modula-3 runtime, etc) or executables linked to such, basically
68413 + everything that does not specify its own SA_RESTORER function in
68414 + normal executable memory like glibc 2.1+ does.
68415 +
68416 + On parisc you MUST enable this option, otherwise your system will
68417 + not even boot.
68418 +
68419 + NOTE: this feature cannot be disabled on a per executable basis
68420 + and since it *does* open up a loophole in the protection provided
68421 + by non-executable pages, the best solution is to not have any
68422 + files on your system that would require this option.
68423 +
68424 +config PAX_MPROTECT
68425 + bool "Restrict mprotect()"
68426 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
68427 + help
68428 + Enabling this option will prevent programs from
68429 + - changing the executable status of memory pages that were
68430 + not originally created as executable,
68431 + - making read-only executable pages writable again,
68432 + - creating executable pages from anonymous memory,
68433 + - making read-only-after-relocations (RELRO) data pages writable again.
68434 +
68435 + You should say Y here to complete the protection provided by
68436 + the enforcement of non-executable pages.
68437 +
68438 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
68439 + this feature on a per file basis.
68440 +
68441 +config PAX_MPROTECT_COMPAT
68442 + bool "Use legacy/compat protection demoting (read help)"
68443 + depends on PAX_MPROTECT
68444 + default n
68445 + help
68446 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
68447 + by sending the proper error code to the application. For some broken
68448 + userland, this can cause problems with Python or other applications. The
68449 + current implementation however allows for applications like clamav to
68450 + detect if JIT compilation/execution is allowed and to fall back gracefully
68451 + to an interpreter-based mode if it does not. While we encourage everyone
68452 + to use the current implementation as-is and push upstream to fix broken
68453 + userland (note that the RWX logging option can assist with this), in some
68454 + environments this may not be possible. Having to disable MPROTECT
68455 + completely on certain binaries reduces the security benefit of PaX,
68456 + so this option is provided for those environments to revert to the old
68457 + behavior.
68458 +
68459 +config PAX_ELFRELOCS
68460 + bool "Allow ELF text relocations (read help)"
68461 + depends on PAX_MPROTECT
68462 + default n
68463 + help
68464 + Non-executable pages and mprotect() restrictions are effective
68465 + in preventing the introduction of new executable code into an
68466 + attacked task's address space. There remain only two venues
68467 + for this kind of attack: if the attacker can execute already
68468 + existing code in the attacked task then he can either have it
68469 + create and mmap() a file containing his code or have it mmap()
68470 + an already existing ELF library that does not have position
68471 + independent code in it and use mprotect() on it to make it
68472 + writable and copy his code there. While protecting against
68473 + the former approach is beyond PaX, the latter can be prevented
68474 + by having only PIC ELF libraries on one's system (which do not
68475 + need to relocate their code). If you are sure this is your case,
68476 + as is the case with all modern Linux distributions, then leave
68477 + this option disabled. You should say 'n' here.
68478 +
68479 +config PAX_ETEXECRELOCS
68480 + bool "Allow ELF ET_EXEC text relocations"
68481 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
68482 + select PAX_ELFRELOCS
68483 + default y
68484 + help
68485 + On some architectures there are incorrectly created applications
68486 + that require text relocations and would not work without enabling
68487 + this option. If you are an alpha, ia64 or parisc user, you should
68488 + enable this option and disable it once you have made sure that
68489 + none of your applications need it.
68490 +
68491 +config PAX_EMUPLT
68492 + bool "Automatically emulate ELF PLT"
68493 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
68494 + default y
68495 + help
68496 + Enabling this option will have the kernel automatically detect
68497 + and emulate the Procedure Linkage Table entries in ELF files.
68498 + On some architectures such entries are in writable memory, and
68499 + become non-executable leading to task termination. Therefore
68500 + it is mandatory that you enable this option on alpha, parisc,
68501 + sparc and sparc64, otherwise your system would not even boot.
68502 +
68503 + NOTE: this feature *does* open up a loophole in the protection
68504 + provided by the non-executable pages, therefore the proper
68505 + solution is to modify the toolchain to produce a PLT that does
68506 + not need to be writable.
68507 +
68508 +config PAX_DLRESOLVE
68509 + bool 'Emulate old glibc resolver stub'
68510 + depends on PAX_EMUPLT && SPARC
68511 + default n
68512 + help
68513 + This option is needed if userland has an old glibc (before 2.4)
68514 + that puts a 'save' instruction into the runtime generated resolver
68515 + stub that needs special emulation.
68516 +
68517 +config PAX_KERNEXEC
68518 + bool "Enforce non-executable kernel pages"
68519 + depends on PAX_NOEXEC && (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
68520 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
68521 + help
68522 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
68523 + that is, enabling this option will make it harder to inject
68524 + and execute 'foreign' code in kernel memory itself.
68525 +
68526 + Note that on x86_64 kernels there is a known regression when
68527 + this feature and KVM/VMX are both enabled in the host kernel.
68528 +
68529 +config PAX_KERNEXEC_MODULE_TEXT
68530 + int "Minimum amount of memory reserved for module code"
68531 + default "4"
68532 + depends on PAX_KERNEXEC && X86_32 && MODULES
68533 + help
68534 + Due to implementation details the kernel must reserve a fixed
68535 + amount of memory for module code at compile time that cannot be
68536 + changed at runtime. Here you can specify the minimum amount
68537 + in MB that will be reserved. Due to the same implementation
68538 + details this size will always be rounded up to the next 2/4 MB
68539 + boundary (depends on PAE) so the actually available memory for
68540 + module code will usually be more than this minimum.
68541 +
68542 + The default 4 MB should be enough for most users but if you have
68543 + an excessive number of modules (e.g., most distribution configs
68544 + compile many drivers as modules) or use huge modules such as
68545 + nvidia's kernel driver, you will need to adjust this amount.
68546 + A good rule of thumb is to look at your currently loaded kernel
68547 + modules and add up their sizes.
68548 +
68549 +endmenu
68550 +
68551 +menu "Address Space Layout Randomization"
68552 + depends on PAX
68553 +
68554 +config PAX_ASLR
68555 + bool "Address Space Layout Randomization"
68556 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
68557 + help
68558 + Many if not most exploit techniques rely on the knowledge of
68559 + certain addresses in the attacked program. The following options
68560 + will allow the kernel to apply a certain amount of randomization
68561 + to specific parts of the program thereby forcing an attacker to
68562 + guess them in most cases. Any failed guess will most likely crash
68563 + the attacked program which allows the kernel to detect such attempts
68564 + and react on them. PaX itself provides no reaction mechanisms,
68565 + instead it is strongly encouraged that you make use of Nergal's
68566 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
68567 + (http://www.grsecurity.net/) built-in crash detection features or
68568 + develop one yourself.
68569 +
68570 + By saying Y here you can choose to randomize the following areas:
68571 + - top of the task's kernel stack
68572 + - top of the task's userland stack
68573 + - base address for mmap() requests that do not specify one
68574 + (this includes all libraries)
68575 + - base address of the main executable
68576 +
68577 + It is strongly recommended to say Y here as address space layout
68578 + randomization has negligible impact on performance yet it provides
68579 + a very effective protection.
68580 +
68581 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
68582 + this feature on a per file basis.
68583 +
68584 +config PAX_RANDKSTACK
68585 + bool "Randomize kernel stack base"
68586 + depends on PAX_ASLR && X86_TSC && X86
68587 + help
68588 + By saying Y here the kernel will randomize every task's kernel
68589 + stack on every system call. This will not only force an attacker
68590 + to guess it but also prevent him from making use of possible
68591 + leaked information about it.
68592 +
68593 + Since the kernel stack is a rather scarce resource, randomization
68594 + may cause unexpected stack overflows, therefore you should very
68595 + carefully test your system. Note that once enabled in the kernel
68596 + configuration, this feature cannot be disabled on a per file basis.
68597 +
68598 +config PAX_RANDUSTACK
68599 + bool "Randomize user stack base"
68600 + depends on PAX_ASLR
68601 + help
68602 + By saying Y here the kernel will randomize every task's userland
68603 + stack. The randomization is done in two steps where the second
68604 + one may apply a big amount of shift to the top of the stack and
68605 + cause problems for programs that want to use lots of memory (more
68606 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
68607 + For this reason the second step can be controlled by 'chpax' or
68608 + 'paxctl' on a per file basis.
68609 +
68610 +config PAX_RANDMMAP
68611 + bool "Randomize mmap() base"
68612 + depends on PAX_ASLR
68613 + help
68614 + By saying Y here the kernel will use a randomized base address for
68615 + mmap() requests that do not specify one themselves. As a result
68616 + all dynamically loaded libraries will appear at random addresses
68617 + and therefore be harder to exploit by a technique where an attacker
68618 + attempts to execute library code for his purposes (e.g. spawn a
68619 + shell from an exploited program that is running at an elevated
68620 + privilege level).
68621 +
68622 + Furthermore, if a program is relinked as a dynamic ELF file, its
68623 + base address will be randomized as well, completing the full
68624 + randomization of the address space layout. Attacking such programs
68625 + becomes a guess game. You can find an example of doing this at
68626 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
68627 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
68628 +
68629 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
68630 + feature on a per file basis.
68631 +
68632 +endmenu
68633 +
68634 +menu "Miscellaneous hardening features"
68635 +
68636 +config PAX_MEMORY_SANITIZE
68637 + bool "Sanitize all freed memory"
68638 + help
68639 + By saying Y here the kernel will erase memory pages as soon as they
68640 + are freed. This in turn reduces the lifetime of data stored in the
68641 + pages, making it less likely that sensitive information such as
68642 + passwords, cryptographic secrets, etc stay in memory for too long.
68643 +
68644 + This is especially useful for programs whose runtime is short, long
68645 + lived processes and the kernel itself benefit from this as long as
68646 + they operate on whole memory pages and ensure timely freeing of pages
68647 + that may hold sensitive information.
68648 +
68649 + The tradeoff is performance impact, on a single CPU system kernel
68650 + compilation sees a 3% slowdown, other systems and workloads may vary
68651 + and you are advised to test this feature on your expected workload
68652 + before deploying it.
68653 +
68654 + Note that this feature does not protect data stored in live pages,
68655 + e.g., process memory swapped to disk may stay there for a long time.
68656 +
68657 +config PAX_MEMORY_STACKLEAK
68658 + bool "Sanitize kernel stack"
68659 + depends on X86
68660 + help
68661 + By saying Y here the kernel will erase the kernel stack before it
68662 + returns from a system call. This in turn reduces the information
68663 + that a kernel stack leak bug can reveal.
68664 +
68665 + Note that such a bug can still leak information that was put on
68666 + the stack by the current system call (the one eventually triggering
68667 + the bug) but traces of earlier system calls on the kernel stack
68668 + cannot leak anymore.
68669 +
68670 + The tradeoff is performance impact: on a single CPU system kernel
68671 + compilation sees a 1% slowdown, other systems and workloads may vary
68672 + and you are advised to test this feature on your expected workload
68673 + before deploying it.
68674 +
68675 + Note: full support for this feature requires gcc with plugin support
68676 + so make sure your compiler is at least gcc 4.5.0 (cross compilation
68677 + is not supported). Using older gcc versions means that functions
68678 + with large enough stack frames may leave uninitialized memory behind
68679 + that may be exposed to a later syscall leaking the stack.
68680 +
68681 +config PAX_MEMORY_UDEREF
68682 + bool "Prevent invalid userland pointer dereference"
68683 + depends on X86 && !UML_X86 && !XEN
68684 + select PAX_PER_CPU_PGD if X86_64
68685 + help
68686 + By saying Y here the kernel will be prevented from dereferencing
68687 + userland pointers in contexts where the kernel expects only kernel
68688 + pointers. This is both a useful runtime debugging feature and a
68689 + security measure that prevents exploiting a class of kernel bugs.
68690 +
68691 + The tradeoff is that some virtualization solutions may experience
68692 + a huge slowdown and therefore you should not enable this feature
68693 + for kernels meant to run in such environments. Whether a given VM
68694 + solution is affected or not is best determined by simply trying it
68695 + out, the performance impact will be obvious right on boot as this
68696 + mechanism engages from very early on. A good rule of thumb is that
68697 + VMs running on CPUs without hardware virtualization support (i.e.,
68698 + the majority of IA-32 CPUs) will likely experience the slowdown.
68699 +
68700 +config PAX_REFCOUNT
68701 + bool "Prevent various kernel object reference counter overflows"
68702 + depends on GRKERNSEC && (X86 || SPARC64)
68703 + help
68704 + By saying Y here the kernel will detect and prevent overflowing
68705 + various (but not all) kinds of object reference counters. Such
68706 + overflows can normally occur due to bugs only and are often, if
68707 + not always, exploitable.
68708 +
68709 + The tradeoff is that data structures protected by an overflowed
68710 + refcount will never be freed and therefore will leak memory. Note
68711 + that this leak also happens even without this protection but in
68712 + that case the overflow can eventually trigger the freeing of the
68713 + data structure while it is still being used elsewhere, resulting
68714 + in the exploitable situation that this feature prevents.
68715 +
68716 + Since this has a negligible performance impact, you should enable
68717 + this feature.
68718 +
68719 +config PAX_USERCOPY
68720 + bool "Harden heap object copies between kernel and userland"
68721 + depends on X86 || PPC || SPARC || ARM
68722 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
68723 + help
68724 + By saying Y here the kernel will enforce the size of heap objects
68725 + when they are copied in either direction between the kernel and
68726 + userland, even if only a part of the heap object is copied.
68727 +
68728 + Specifically, this checking prevents information leaking from the
68729 + kernel heap during kernel to userland copies (if the kernel heap
68730 + object is otherwise fully initialized) and prevents kernel heap
68731 + overflows during userland to kernel copies.
68732 +
68733 + Note that the current implementation provides the strictest bounds
68734 + checks for the SLUB allocator.
68735 +
68736 + Enabling this option also enables per-slab cache protection against
68737 + data in a given cache being copied into/out of via userland
68738 + accessors. Though the whitelist of regions will be reduced over
68739 + time, it notably protects important data structures like task structs.
68740 +
68741 + If frame pointers are enabled on x86, this option will also restrict
68742 + copies into and out of the kernel stack to local variables within a
68743 + single frame.
68744 +
68745 + Since this has a negligible performance impact, you should enable
68746 + this feature.
68747 +
68748 +endmenu
68749 +
68750 +endmenu
68751 +
68752 config KEYS
68753 bool "Enable access key retention support"
68754 help
68755 @@ -167,7 +715,7 @@ config INTEL_TXT
68756 config LSM_MMAP_MIN_ADDR
68757 int "Low address space for LSM to protect from user allocation"
68758 depends on SECURITY && SECURITY_SELINUX
68759 - default 32768 if ARM
68760 + default 32768 if ALPHA || ARM || PARISC || SPARC32
68761 default 65536
68762 help
68763 This is the portion of low virtual memory which should be protected
68764 diff -urNp linux-3.0.3/security/keys/keyring.c linux-3.0.3/security/keys/keyring.c
68765 --- linux-3.0.3/security/keys/keyring.c 2011-07-21 22:17:23.000000000 -0400
68766 +++ linux-3.0.3/security/keys/keyring.c 2011-08-23 21:47:56.000000000 -0400
68767 @@ -215,15 +215,15 @@ static long keyring_read(const struct ke
68768 ret = -EFAULT;
68769
68770 for (loop = 0; loop < klist->nkeys; loop++) {
68771 + key_serial_t serial;
68772 key = klist->keys[loop];
68773 + serial = key->serial;
68774
68775 tmp = sizeof(key_serial_t);
68776 if (tmp > buflen)
68777 tmp = buflen;
68778
68779 - if (copy_to_user(buffer,
68780 - &key->serial,
68781 - tmp) != 0)
68782 + if (copy_to_user(buffer, &serial, tmp))
68783 goto error;
68784
68785 buflen -= tmp;
68786 diff -urNp linux-3.0.3/security/min_addr.c linux-3.0.3/security/min_addr.c
68787 --- linux-3.0.3/security/min_addr.c 2011-07-21 22:17:23.000000000 -0400
68788 +++ linux-3.0.3/security/min_addr.c 2011-08-23 21:48:14.000000000 -0400
68789 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
68790 */
68791 static void update_mmap_min_addr(void)
68792 {
68793 +#ifndef SPARC
68794 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
68795 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
68796 mmap_min_addr = dac_mmap_min_addr;
68797 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
68798 #else
68799 mmap_min_addr = dac_mmap_min_addr;
68800 #endif
68801 +#endif
68802 }
68803
68804 /*
68805 diff -urNp linux-3.0.3/security/security.c linux-3.0.3/security/security.c
68806 --- linux-3.0.3/security/security.c 2011-07-21 22:17:23.000000000 -0400
68807 +++ linux-3.0.3/security/security.c 2011-08-23 21:48:14.000000000 -0400
68808 @@ -25,8 +25,8 @@ static __initdata char chosen_lsm[SECURI
68809 /* things that live in capability.c */
68810 extern void __init security_fixup_ops(struct security_operations *ops);
68811
68812 -static struct security_operations *security_ops;
68813 -static struct security_operations default_security_ops = {
68814 +static struct security_operations *security_ops __read_only;
68815 +static struct security_operations default_security_ops __read_only = {
68816 .name = "default",
68817 };
68818
68819 @@ -67,7 +67,9 @@ int __init security_init(void)
68820
68821 void reset_security_ops(void)
68822 {
68823 + pax_open_kernel();
68824 security_ops = &default_security_ops;
68825 + pax_close_kernel();
68826 }
68827
68828 /* Save user chosen LSM */
68829 diff -urNp linux-3.0.3/security/selinux/hooks.c linux-3.0.3/security/selinux/hooks.c
68830 --- linux-3.0.3/security/selinux/hooks.c 2011-07-21 22:17:23.000000000 -0400
68831 +++ linux-3.0.3/security/selinux/hooks.c 2011-08-23 21:48:14.000000000 -0400
68832 @@ -93,7 +93,6 @@
68833 #define NUM_SEL_MNT_OPTS 5
68834
68835 extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
68836 -extern struct security_operations *security_ops;
68837
68838 /* SECMARK reference count */
68839 atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
68840 @@ -5454,7 +5453,7 @@ static int selinux_key_getsecurity(struc
68841
68842 #endif
68843
68844 -static struct security_operations selinux_ops = {
68845 +static struct security_operations selinux_ops __read_only = {
68846 .name = "selinux",
68847
68848 .ptrace_access_check = selinux_ptrace_access_check,
68849 diff -urNp linux-3.0.3/security/selinux/include/xfrm.h linux-3.0.3/security/selinux/include/xfrm.h
68850 --- linux-3.0.3/security/selinux/include/xfrm.h 2011-07-21 22:17:23.000000000 -0400
68851 +++ linux-3.0.3/security/selinux/include/xfrm.h 2011-08-23 21:47:56.000000000 -0400
68852 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s
68853
68854 static inline void selinux_xfrm_notify_policyload(void)
68855 {
68856 - atomic_inc(&flow_cache_genid);
68857 + atomic_inc_unchecked(&flow_cache_genid);
68858 }
68859 #else
68860 static inline int selinux_xfrm_enabled(void)
68861 diff -urNp linux-3.0.3/security/selinux/ss/services.c linux-3.0.3/security/selinux/ss/services.c
68862 --- linux-3.0.3/security/selinux/ss/services.c 2011-07-21 22:17:23.000000000 -0400
68863 +++ linux-3.0.3/security/selinux/ss/services.c 2011-08-23 21:48:14.000000000 -0400
68864 @@ -1814,6 +1814,8 @@ int security_load_policy(void *data, siz
68865 int rc = 0;
68866 struct policy_file file = { data, len }, *fp = &file;
68867
68868 + pax_track_stack();
68869 +
68870 if (!ss_initialized) {
68871 avtab_cache_init();
68872 rc = policydb_read(&policydb, fp);
68873 diff -urNp linux-3.0.3/security/smack/smack_lsm.c linux-3.0.3/security/smack/smack_lsm.c
68874 --- linux-3.0.3/security/smack/smack_lsm.c 2011-07-21 22:17:23.000000000 -0400
68875 +++ linux-3.0.3/security/smack/smack_lsm.c 2011-08-23 21:47:56.000000000 -0400
68876 @@ -3392,7 +3392,7 @@ static int smack_inode_getsecctx(struct
68877 return 0;
68878 }
68879
68880 -struct security_operations smack_ops = {
68881 +struct security_operations smack_ops __read_only = {
68882 .name = "smack",
68883
68884 .ptrace_access_check = smack_ptrace_access_check,
68885 diff -urNp linux-3.0.3/security/tomoyo/tomoyo.c linux-3.0.3/security/tomoyo/tomoyo.c
68886 --- linux-3.0.3/security/tomoyo/tomoyo.c 2011-07-21 22:17:23.000000000 -0400
68887 +++ linux-3.0.3/security/tomoyo/tomoyo.c 2011-08-23 21:47:56.000000000 -0400
68888 @@ -240,7 +240,7 @@ static int tomoyo_sb_pivotroot(struct pa
68889 * tomoyo_security_ops is a "struct security_operations" which is used for
68890 * registering TOMOYO.
68891 */
68892 -static struct security_operations tomoyo_security_ops = {
68893 +static struct security_operations tomoyo_security_ops __read_only = {
68894 .name = "tomoyo",
68895 .cred_alloc_blank = tomoyo_cred_alloc_blank,
68896 .cred_prepare = tomoyo_cred_prepare,
68897 diff -urNp linux-3.0.3/sound/aoa/codecs/onyx.c linux-3.0.3/sound/aoa/codecs/onyx.c
68898 --- linux-3.0.3/sound/aoa/codecs/onyx.c 2011-07-21 22:17:23.000000000 -0400
68899 +++ linux-3.0.3/sound/aoa/codecs/onyx.c 2011-08-23 21:47:56.000000000 -0400
68900 @@ -54,7 +54,7 @@ struct onyx {
68901 spdif_locked:1,
68902 analog_locked:1,
68903 original_mute:2;
68904 - int open_count;
68905 + local_t open_count;
68906 struct codec_info *codec_info;
68907
68908 /* mutex serializes concurrent access to the device
68909 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_i
68910 struct onyx *onyx = cii->codec_data;
68911
68912 mutex_lock(&onyx->mutex);
68913 - onyx->open_count++;
68914 + local_inc(&onyx->open_count);
68915 mutex_unlock(&onyx->mutex);
68916
68917 return 0;
68918 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_
68919 struct onyx *onyx = cii->codec_data;
68920
68921 mutex_lock(&onyx->mutex);
68922 - onyx->open_count--;
68923 - if (!onyx->open_count)
68924 + if (local_dec_and_test(&onyx->open_count))
68925 onyx->spdif_locked = onyx->analog_locked = 0;
68926 mutex_unlock(&onyx->mutex);
68927
68928 diff -urNp linux-3.0.3/sound/aoa/codecs/onyx.h linux-3.0.3/sound/aoa/codecs/onyx.h
68929 --- linux-3.0.3/sound/aoa/codecs/onyx.h 2011-07-21 22:17:23.000000000 -0400
68930 +++ linux-3.0.3/sound/aoa/codecs/onyx.h 2011-08-23 21:47:56.000000000 -0400
68931 @@ -11,6 +11,7 @@
68932 #include <linux/i2c.h>
68933 #include <asm/pmac_low_i2c.h>
68934 #include <asm/prom.h>
68935 +#include <asm/local.h>
68936
68937 /* PCM3052 register definitions */
68938
68939 diff -urNp linux-3.0.3/sound/core/seq/seq_device.c linux-3.0.3/sound/core/seq/seq_device.c
68940 --- linux-3.0.3/sound/core/seq/seq_device.c 2011-07-21 22:17:23.000000000 -0400
68941 +++ linux-3.0.3/sound/core/seq/seq_device.c 2011-08-23 21:47:56.000000000 -0400
68942 @@ -63,7 +63,7 @@ struct ops_list {
68943 int argsize; /* argument size */
68944
68945 /* operators */
68946 - struct snd_seq_dev_ops ops;
68947 + struct snd_seq_dev_ops *ops;
68948
68949 /* registred devices */
68950 struct list_head dev_list; /* list of devices */
68951 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char
68952
68953 mutex_lock(&ops->reg_mutex);
68954 /* copy driver operators */
68955 - ops->ops = *entry;
68956 + ops->ops = entry;
68957 ops->driver |= DRIVER_LOADED;
68958 ops->argsize = argsize;
68959
68960 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_de
68961 dev->name, ops->id, ops->argsize, dev->argsize);
68962 return -EINVAL;
68963 }
68964 - if (ops->ops.init_device(dev) >= 0) {
68965 + if (ops->ops->init_device(dev) >= 0) {
68966 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
68967 ops->num_init_devices++;
68968 } else {
68969 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_de
68970 dev->name, ops->id, ops->argsize, dev->argsize);
68971 return -EINVAL;
68972 }
68973 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
68974 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
68975 dev->status = SNDRV_SEQ_DEVICE_FREE;
68976 dev->driver_data = NULL;
68977 ops->num_init_devices--;
68978 diff -urNp linux-3.0.3/sound/drivers/mts64.c linux-3.0.3/sound/drivers/mts64.c
68979 --- linux-3.0.3/sound/drivers/mts64.c 2011-07-21 22:17:23.000000000 -0400
68980 +++ linux-3.0.3/sound/drivers/mts64.c 2011-08-23 21:47:56.000000000 -0400
68981 @@ -28,6 +28,7 @@
68982 #include <sound/initval.h>
68983 #include <sound/rawmidi.h>
68984 #include <sound/control.h>
68985 +#include <asm/local.h>
68986
68987 #define CARD_NAME "Miditerminal 4140"
68988 #define DRIVER_NAME "MTS64"
68989 @@ -66,7 +67,7 @@ struct mts64 {
68990 struct pardevice *pardev;
68991 int pardev_claimed;
68992
68993 - int open_count;
68994 + local_t open_count;
68995 int current_midi_output_port;
68996 int current_midi_input_port;
68997 u8 mode[MTS64_NUM_INPUT_PORTS];
68998 @@ -696,7 +697,7 @@ static int snd_mts64_rawmidi_open(struct
68999 {
69000 struct mts64 *mts = substream->rmidi->private_data;
69001
69002 - if (mts->open_count == 0) {
69003 + if (local_read(&mts->open_count) == 0) {
69004 /* We don't need a spinlock here, because this is just called
69005 if the device has not been opened before.
69006 So there aren't any IRQs from the device */
69007 @@ -704,7 +705,7 @@ static int snd_mts64_rawmidi_open(struct
69008
69009 msleep(50);
69010 }
69011 - ++(mts->open_count);
69012 + local_inc(&mts->open_count);
69013
69014 return 0;
69015 }
69016 @@ -714,8 +715,7 @@ static int snd_mts64_rawmidi_close(struc
69017 struct mts64 *mts = substream->rmidi->private_data;
69018 unsigned long flags;
69019
69020 - --(mts->open_count);
69021 - if (mts->open_count == 0) {
69022 + if (local_dec_return(&mts->open_count) == 0) {
69023 /* We need the spinlock_irqsave here because we can still
69024 have IRQs at this point */
69025 spin_lock_irqsave(&mts->lock, flags);
69026 @@ -724,8 +724,8 @@ static int snd_mts64_rawmidi_close(struc
69027
69028 msleep(500);
69029
69030 - } else if (mts->open_count < 0)
69031 - mts->open_count = 0;
69032 + } else if (local_read(&mts->open_count) < 0)
69033 + local_set(&mts->open_count, 0);
69034
69035 return 0;
69036 }
69037 diff -urNp linux-3.0.3/sound/drivers/opl4/opl4_lib.c linux-3.0.3/sound/drivers/opl4/opl4_lib.c
69038 --- linux-3.0.3/sound/drivers/opl4/opl4_lib.c 2011-07-21 22:17:23.000000000 -0400
69039 +++ linux-3.0.3/sound/drivers/opl4/opl4_lib.c 2011-08-23 21:47:56.000000000 -0400
69040 @@ -28,7 +28,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@
69041 MODULE_DESCRIPTION("OPL4 driver");
69042 MODULE_LICENSE("GPL");
69043
69044 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
69045 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
69046 {
69047 int timeout = 10;
69048 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
69049 diff -urNp linux-3.0.3/sound/drivers/portman2x4.c linux-3.0.3/sound/drivers/portman2x4.c
69050 --- linux-3.0.3/sound/drivers/portman2x4.c 2011-07-21 22:17:23.000000000 -0400
69051 +++ linux-3.0.3/sound/drivers/portman2x4.c 2011-08-23 21:47:56.000000000 -0400
69052 @@ -47,6 +47,7 @@
69053 #include <sound/initval.h>
69054 #include <sound/rawmidi.h>
69055 #include <sound/control.h>
69056 +#include <asm/local.h>
69057
69058 #define CARD_NAME "Portman 2x4"
69059 #define DRIVER_NAME "portman"
69060 @@ -84,7 +85,7 @@ struct portman {
69061 struct pardevice *pardev;
69062 int pardev_claimed;
69063
69064 - int open_count;
69065 + local_t open_count;
69066 int mode[PORTMAN_NUM_INPUT_PORTS];
69067 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
69068 };
69069 diff -urNp linux-3.0.3/sound/firewire/amdtp.c linux-3.0.3/sound/firewire/amdtp.c
69070 --- linux-3.0.3/sound/firewire/amdtp.c 2011-07-21 22:17:23.000000000 -0400
69071 +++ linux-3.0.3/sound/firewire/amdtp.c 2011-08-23 21:47:56.000000000 -0400
69072 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdt
69073 ptr = s->pcm_buffer_pointer + data_blocks;
69074 if (ptr >= pcm->runtime->buffer_size)
69075 ptr -= pcm->runtime->buffer_size;
69076 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
69077 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
69078
69079 s->pcm_period_pointer += data_blocks;
69080 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
69081 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
69082 */
69083 void amdtp_out_stream_update(struct amdtp_out_stream *s)
69084 {
69085 - ACCESS_ONCE(s->source_node_id_field) =
69086 + ACCESS_ONCE_RW(s->source_node_id_field) =
69087 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
69088 }
69089 EXPORT_SYMBOL(amdtp_out_stream_update);
69090 diff -urNp linux-3.0.3/sound/firewire/amdtp.h linux-3.0.3/sound/firewire/amdtp.h
69091 --- linux-3.0.3/sound/firewire/amdtp.h 2011-07-21 22:17:23.000000000 -0400
69092 +++ linux-3.0.3/sound/firewire/amdtp.h 2011-08-23 21:47:56.000000000 -0400
69093 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_
69094 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
69095 struct snd_pcm_substream *pcm)
69096 {
69097 - ACCESS_ONCE(s->pcm) = pcm;
69098 + ACCESS_ONCE_RW(s->pcm) = pcm;
69099 }
69100
69101 /**
69102 diff -urNp linux-3.0.3/sound/firewire/isight.c linux-3.0.3/sound/firewire/isight.c
69103 --- linux-3.0.3/sound/firewire/isight.c 2011-07-21 22:17:23.000000000 -0400
69104 +++ linux-3.0.3/sound/firewire/isight.c 2011-08-23 21:47:56.000000000 -0400
69105 @@ -97,7 +97,7 @@ static void isight_update_pointers(struc
69106 ptr += count;
69107 if (ptr >= runtime->buffer_size)
69108 ptr -= runtime->buffer_size;
69109 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
69110 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
69111
69112 isight->period_counter += count;
69113 if (isight->period_counter >= runtime->period_size) {
69114 @@ -308,7 +308,7 @@ static int isight_hw_params(struct snd_p
69115 if (err < 0)
69116 return err;
69117
69118 - ACCESS_ONCE(isight->pcm_active) = true;
69119 + ACCESS_ONCE_RW(isight->pcm_active) = true;
69120
69121 return 0;
69122 }
69123 @@ -341,7 +341,7 @@ static int isight_hw_free(struct snd_pcm
69124 {
69125 struct isight *isight = substream->private_data;
69126
69127 - ACCESS_ONCE(isight->pcm_active) = false;
69128 + ACCESS_ONCE_RW(isight->pcm_active) = false;
69129
69130 mutex_lock(&isight->mutex);
69131 isight_stop_streaming(isight);
69132 @@ -434,10 +434,10 @@ static int isight_trigger(struct snd_pcm
69133
69134 switch (cmd) {
69135 case SNDRV_PCM_TRIGGER_START:
69136 - ACCESS_ONCE(isight->pcm_running) = true;
69137 + ACCESS_ONCE_RW(isight->pcm_running) = true;
69138 break;
69139 case SNDRV_PCM_TRIGGER_STOP:
69140 - ACCESS_ONCE(isight->pcm_running) = false;
69141 + ACCESS_ONCE_RW(isight->pcm_running) = false;
69142 break;
69143 default:
69144 return -EINVAL;
69145 diff -urNp linux-3.0.3/sound/isa/cmi8330.c linux-3.0.3/sound/isa/cmi8330.c
69146 --- linux-3.0.3/sound/isa/cmi8330.c 2011-07-21 22:17:23.000000000 -0400
69147 +++ linux-3.0.3/sound/isa/cmi8330.c 2011-08-23 21:47:56.000000000 -0400
69148 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
69149
69150 struct snd_pcm *pcm;
69151 struct snd_cmi8330_stream {
69152 - struct snd_pcm_ops ops;
69153 + snd_pcm_ops_no_const ops;
69154 snd_pcm_open_callback_t open;
69155 void *private_data; /* sb or wss */
69156 } streams[2];
69157 diff -urNp linux-3.0.3/sound/oss/sb_audio.c linux-3.0.3/sound/oss/sb_audio.c
69158 --- linux-3.0.3/sound/oss/sb_audio.c 2011-07-21 22:17:23.000000000 -0400
69159 +++ linux-3.0.3/sound/oss/sb_audio.c 2011-08-23 21:47:56.000000000 -0400
69160 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
69161 buf16 = (signed short *)(localbuf + localoffs);
69162 while (c)
69163 {
69164 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
69165 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
69166 if (copy_from_user(lbuf8,
69167 userbuf+useroffs + p,
69168 locallen))
69169 diff -urNp linux-3.0.3/sound/oss/swarm_cs4297a.c linux-3.0.3/sound/oss/swarm_cs4297a.c
69170 --- linux-3.0.3/sound/oss/swarm_cs4297a.c 2011-07-21 22:17:23.000000000 -0400
69171 +++ linux-3.0.3/sound/oss/swarm_cs4297a.c 2011-08-23 21:47:56.000000000 -0400
69172 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
69173 {
69174 struct cs4297a_state *s;
69175 u32 pwr, id;
69176 - mm_segment_t fs;
69177 int rval;
69178 #ifndef CONFIG_BCM_CS4297A_CSWARM
69179 u64 cfg;
69180 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
69181 if (!rval) {
69182 char *sb1250_duart_present;
69183
69184 +#if 0
69185 + mm_segment_t fs;
69186 fs = get_fs();
69187 set_fs(KERNEL_DS);
69188 -#if 0
69189 val = SOUND_MASK_LINE;
69190 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
69191 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
69192 val = initvol[i].vol;
69193 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
69194 }
69195 + set_fs(fs);
69196 // cs4297a_write_ac97(s, 0x18, 0x0808);
69197 #else
69198 // cs4297a_write_ac97(s, 0x5e, 0x180);
69199 cs4297a_write_ac97(s, 0x02, 0x0808);
69200 cs4297a_write_ac97(s, 0x18, 0x0808);
69201 #endif
69202 - set_fs(fs);
69203
69204 list_add(&s->list, &cs4297a_devs);
69205
69206 diff -urNp linux-3.0.3/sound/pci/hda/hda_codec.h linux-3.0.3/sound/pci/hda/hda_codec.h
69207 --- linux-3.0.3/sound/pci/hda/hda_codec.h 2011-07-21 22:17:23.000000000 -0400
69208 +++ linux-3.0.3/sound/pci/hda/hda_codec.h 2011-08-23 21:47:56.000000000 -0400
69209 @@ -615,7 +615,7 @@ struct hda_bus_ops {
69210 /* notify power-up/down from codec to controller */
69211 void (*pm_notify)(struct hda_bus *bus);
69212 #endif
69213 -};
69214 +} __no_const;
69215
69216 /* template to pass to the bus constructor */
69217 struct hda_bus_template {
69218 @@ -713,6 +713,7 @@ struct hda_codec_ops {
69219 #endif
69220 void (*reboot_notify)(struct hda_codec *codec);
69221 };
69222 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
69223
69224 /* record for amp information cache */
69225 struct hda_cache_head {
69226 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
69227 struct snd_pcm_substream *substream);
69228 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
69229 struct snd_pcm_substream *substream);
69230 -};
69231 +} __no_const;
69232
69233 /* PCM information for each substream */
69234 struct hda_pcm_stream {
69235 @@ -801,7 +802,7 @@ struct hda_codec {
69236 const char *modelname; /* model name for preset */
69237
69238 /* set by patch */
69239 - struct hda_codec_ops patch_ops;
69240 + hda_codec_ops_no_const patch_ops;
69241
69242 /* PCM to create, set by patch_ops.build_pcms callback */
69243 unsigned int num_pcms;
69244 diff -urNp linux-3.0.3/sound/pci/ice1712/ice1712.h linux-3.0.3/sound/pci/ice1712/ice1712.h
69245 --- linux-3.0.3/sound/pci/ice1712/ice1712.h 2011-07-21 22:17:23.000000000 -0400
69246 +++ linux-3.0.3/sound/pci/ice1712/ice1712.h 2011-08-23 21:47:56.000000000 -0400
69247 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
69248 unsigned int mask_flags; /* total mask bits */
69249 struct snd_akm4xxx_ops {
69250 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
69251 - } ops;
69252 + } __no_const ops;
69253 };
69254
69255 struct snd_ice1712_spdif {
69256 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
69257 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
69258 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
69259 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
69260 - } ops;
69261 + } __no_const ops;
69262 };
69263
69264
69265 diff -urNp linux-3.0.3/sound/pci/ymfpci/ymfpci_main.c linux-3.0.3/sound/pci/ymfpci/ymfpci_main.c
69266 --- linux-3.0.3/sound/pci/ymfpci/ymfpci_main.c 2011-07-21 22:17:23.000000000 -0400
69267 +++ linux-3.0.3/sound/pci/ymfpci/ymfpci_main.c 2011-08-23 21:47:56.000000000 -0400
69268 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
69269 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
69270 break;
69271 }
69272 - if (atomic_read(&chip->interrupt_sleep_count)) {
69273 - atomic_set(&chip->interrupt_sleep_count, 0);
69274 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
69275 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
69276 wake_up(&chip->interrupt_sleep);
69277 }
69278 __end:
69279 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
69280 continue;
69281 init_waitqueue_entry(&wait, current);
69282 add_wait_queue(&chip->interrupt_sleep, &wait);
69283 - atomic_inc(&chip->interrupt_sleep_count);
69284 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
69285 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
69286 remove_wait_queue(&chip->interrupt_sleep, &wait);
69287 }
69288 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
69289 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
69290 spin_unlock(&chip->reg_lock);
69291
69292 - if (atomic_read(&chip->interrupt_sleep_count)) {
69293 - atomic_set(&chip->interrupt_sleep_count, 0);
69294 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
69295 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
69296 wake_up(&chip->interrupt_sleep);
69297 }
69298 }
69299 @@ -2363,7 +2363,7 @@ int __devinit snd_ymfpci_create(struct s
69300 spin_lock_init(&chip->reg_lock);
69301 spin_lock_init(&chip->voice_lock);
69302 init_waitqueue_head(&chip->interrupt_sleep);
69303 - atomic_set(&chip->interrupt_sleep_count, 0);
69304 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
69305 chip->card = card;
69306 chip->pci = pci;
69307 chip->irq = -1;
69308 diff -urNp linux-3.0.3/sound/soc/soc-core.c linux-3.0.3/sound/soc/soc-core.c
69309 --- linux-3.0.3/sound/soc/soc-core.c 2011-08-23 21:44:40.000000000 -0400
69310 +++ linux-3.0.3/sound/soc/soc-core.c 2011-08-23 21:47:56.000000000 -0400
69311 @@ -1021,7 +1021,7 @@ static snd_pcm_uframes_t soc_pcm_pointer
69312 }
69313
69314 /* ASoC PCM operations */
69315 -static struct snd_pcm_ops soc_pcm_ops = {
69316 +static snd_pcm_ops_no_const soc_pcm_ops = {
69317 .open = soc_pcm_open,
69318 .close = soc_codec_close,
69319 .hw_params = soc_pcm_hw_params,
69320 @@ -2128,6 +2128,7 @@ static int soc_new_pcm(struct snd_soc_pc
69321 rtd->pcm = pcm;
69322 pcm->private_data = rtd;
69323 if (platform->driver->ops) {
69324 + /* this whole logic is broken... */
69325 soc_pcm_ops.mmap = platform->driver->ops->mmap;
69326 soc_pcm_ops.pointer = platform->driver->ops->pointer;
69327 soc_pcm_ops.ioctl = platform->driver->ops->ioctl;
69328 diff -urNp linux-3.0.3/sound/usb/card.h linux-3.0.3/sound/usb/card.h
69329 --- linux-3.0.3/sound/usb/card.h 2011-07-21 22:17:23.000000000 -0400
69330 +++ linux-3.0.3/sound/usb/card.h 2011-08-23 21:47:56.000000000 -0400
69331 @@ -44,6 +44,7 @@ struct snd_urb_ops {
69332 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
69333 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
69334 };
69335 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
69336
69337 struct snd_usb_substream {
69338 struct snd_usb_stream *stream;
69339 @@ -93,7 +94,7 @@ struct snd_usb_substream {
69340 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
69341 spinlock_t lock;
69342
69343 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
69344 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
69345 };
69346
69347 struct snd_usb_stream {
69348 diff -urNp linux-3.0.3/tools/gcc/constify_plugin.c linux-3.0.3/tools/gcc/constify_plugin.c
69349 --- linux-3.0.3/tools/gcc/constify_plugin.c 1969-12-31 19:00:00.000000000 -0500
69350 +++ linux-3.0.3/tools/gcc/constify_plugin.c 2011-08-23 22:35:28.000000000 -0400
69351 @@ -0,0 +1,258 @@
69352 +/*
69353 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
69354 + * Licensed under the GPL v2, or (at your option) v3
69355 + *
69356 + * This gcc plugin constifies all structures which contain only function pointers and const fields.
69357 + *
69358 + * Usage:
69359 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
69360 + * $ gcc -fplugin=constify_plugin.so test.c -O2
69361 + */
69362 +
69363 +#include "gcc-plugin.h"
69364 +#include "config.h"
69365 +#include "system.h"
69366 +#include "coretypes.h"
69367 +#include "tree.h"
69368 +#include "tree-pass.h"
69369 +#include "intl.h"
69370 +#include "plugin-version.h"
69371 +#include "tm.h"
69372 +#include "toplev.h"
69373 +#include "function.h"
69374 +#include "tree-flow.h"
69375 +#include "plugin.h"
69376 +
69377 +int plugin_is_GPL_compatible;
69378 +
69379 +static struct plugin_info const_plugin_info = {
69380 + .version = "20110817",
69381 + .help = "no-constify\tturn off constification\n",
69382 +};
69383 +
69384 +static bool walk_struct(tree node);
69385 +
69386 +static void deconstify_node(tree node)
69387 +{
69388 + tree field;
69389 +
69390 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
69391 + enum tree_code code = TREE_CODE(TREE_TYPE(field));
69392 + if (code == RECORD_TYPE || code == UNION_TYPE)
69393 + deconstify_node(TREE_TYPE(field));
69394 + TREE_READONLY(field) = 0;
69395 + TREE_READONLY(TREE_TYPE(field)) = 0;
69396 + }
69397 +}
69398 +
69399 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
69400 +{
69401 + tree type;
69402 +
69403 + *no_add_attrs = true;
69404 + if (TREE_CODE(*node) == FUNCTION_DECL) {
69405 + error("%qE attribute does not apply to functions", name);
69406 + return NULL_TREE;
69407 + }
69408 +
69409 + if (TREE_CODE(*node) == VAR_DECL) {
69410 + error("%qE attribute does not apply to variables", name);
69411 + return NULL_TREE;
69412 + }
69413 +
69414 + if (!DECL_P(*node)) {
69415 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
69416 + *no_add_attrs = false;
69417 + else
69418 + error("%qE attribute applies to struct and union types only", name);
69419 + return NULL_TREE;
69420 + }
69421 +
69422 + type = TREE_TYPE(*node);
69423 +
69424 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
69425 + error("%qE attribute applies to struct and union types only", name);
69426 + return NULL_TREE;
69427 + }
69428 +
69429 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type))) {
69430 + error("%qE attribute is already applied to the type", name);
69431 + return NULL_TREE;
69432 + }
69433 +
69434 + if (TREE_CODE(*node) == TYPE_DECL && !TREE_READONLY(type)) {
69435 + error("%qE attribute used on type that is not constified", name);
69436 + return NULL_TREE;
69437 + }
69438 +
69439 + if (TREE_CODE(*node) == TYPE_DECL) {
69440 + TREE_TYPE(*node) = build_qualified_type(type, TYPE_QUALS(type) & ~TYPE_QUAL_CONST);
69441 + TYPE_FIELDS(TREE_TYPE(*node)) = copy_list(TYPE_FIELDS(TREE_TYPE(*node)));
69442 + deconstify_node(TREE_TYPE(*node));
69443 + return NULL_TREE;
69444 + }
69445 +
69446 + return NULL_TREE;
69447 +}
69448 +
69449 +static struct attribute_spec no_const_attr = {
69450 + .name = "no_const",
69451 + .min_length = 0,
69452 + .max_length = 0,
69453 + .decl_required = false,
69454 + .type_required = false,
69455 + .function_type_required = false,
69456 + .handler = handle_no_const_attribute
69457 +};
69458 +
69459 +static void register_attributes(void *event_data, void *data)
69460 +{
69461 + register_attribute(&no_const_attr);
69462 +}
69463 +
69464 +static void constify_node(tree node)
69465 +{
69466 + TREE_READONLY(node) = 1;
69467 +}
69468 +
69469 +static bool is_fptr(tree field)
69470 +{
69471 + tree ptr = TREE_TYPE(field);
69472 +
69473 + if (TREE_CODE(ptr) != POINTER_TYPE)
69474 + return false;
69475 +
69476 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
69477 +}
69478 +
69479 +static bool walk_struct(tree node)
69480 +{
69481 + tree field;
69482 +
69483 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
69484 + return false;
69485 +
69486 + if (TYPE_FIELDS(node) == NULL_TREE)
69487 + return false;
69488 +
69489 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
69490 + tree type = TREE_TYPE(field);
69491 + enum tree_code code = TREE_CODE(type);
69492 + if (code == RECORD_TYPE || code == UNION_TYPE) {
69493 + if (!(walk_struct(type)))
69494 + return false;
69495 + } else if (is_fptr(field) == false && !TREE_READONLY(field))
69496 + return false;
69497 + }
69498 + return true;
69499 +}
69500 +
69501 +static void finish_type(void *event_data, void *data)
69502 +{
69503 + tree node = (tree)event_data;
69504 +
69505 + if (node == NULL_TREE)
69506 + return;
69507 +
69508 + if (TREE_READONLY(node))
69509 + return;
69510 +
69511 + if (TYPE_FIELDS(node) == NULL_TREE)
69512 + return;
69513 +
69514 + if (walk_struct(node))
69515 + constify_node(node);
69516 +}
69517 +
69518 +static unsigned int check_local_variables(void);
69519 +
69520 +struct gimple_opt_pass pass_local_variable = {
69521 + {
69522 + .type = GIMPLE_PASS,
69523 + .name = "check_local_variables",
69524 + .gate = NULL,
69525 + .execute = check_local_variables,
69526 + .sub = NULL,
69527 + .next = NULL,
69528 + .static_pass_number = 0,
69529 + .tv_id = TV_NONE,
69530 + .properties_required = 0,
69531 + .properties_provided = 0,
69532 + .properties_destroyed = 0,
69533 + .todo_flags_start = 0,
69534 + .todo_flags_finish = 0
69535 + }
69536 +};
69537 +
69538 +static unsigned int check_local_variables(void)
69539 +{
69540 + tree var;
69541 + referenced_var_iterator rvi;
69542 +
69543 +#if __GNUC_MINOR__ >= 6
69544 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
69545 +#else
69546 + FOR_EACH_REFERENCED_VAR(var, rvi) {
69547 +#endif
69548 + tree type = TREE_TYPE(var);
69549 +
69550 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
69551 + continue;
69552 +
69553 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
69554 + continue;
69555 +
69556 + if (!TREE_READONLY(type))
69557 + continue;
69558 +
69559 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
69560 +// continue;
69561 +
69562 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
69563 +// continue;
69564 +
69565 + if (walk_struct(type)) {
69566 + error("constified variable %qE cannot be local", var);
69567 + return 1;
69568 + }
69569 + }
69570 + return 0;
69571 +}
69572 +
69573 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
69574 +{
69575 + const char * const plugin_name = plugin_info->base_name;
69576 + const int argc = plugin_info->argc;
69577 + const struct plugin_argument * const argv = plugin_info->argv;
69578 + int i;
69579 + bool constify = true;
69580 +
69581 + struct register_pass_info local_variable_pass_info = {
69582 + .pass = &pass_local_variable.pass,
69583 + .reference_pass_name = "*referenced_vars",
69584 + .ref_pass_instance_number = 0,
69585 + .pos_op = PASS_POS_INSERT_AFTER
69586 + };
69587 +
69588 + if (!plugin_default_version_check(version, &gcc_version)) {
69589 + error(G_("incompatible gcc/plugin versions"));
69590 + return 1;
69591 + }
69592 +
69593 + for (i = 0; i < argc; ++i) {
69594 + if (!(strcmp(argv[i].key, "no-constify"))) {
69595 + constify = false;
69596 + continue;
69597 + }
69598 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
69599 + }
69600 +
69601 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
69602 + if (constify) {
69603 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
69604 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
69605 + }
69606 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
69607 +
69608 + return 0;
69609 +}
69610 diff -urNp linux-3.0.3/tools/gcc/Makefile linux-3.0.3/tools/gcc/Makefile
69611 --- linux-3.0.3/tools/gcc/Makefile 1969-12-31 19:00:00.000000000 -0500
69612 +++ linux-3.0.3/tools/gcc/Makefile 2011-08-23 21:47:56.000000000 -0400
69613 @@ -0,0 +1,12 @@
69614 +#CC := gcc
69615 +#PLUGIN_SOURCE_FILES := pax_plugin.c
69616 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
69617 +GCCPLUGINS_DIR := $(shell $(HOSTCC) -print-file-name=plugin)
69618 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
69619 +
69620 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
69621 +
69622 +hostlibs-y := stackleak_plugin.so constify_plugin.so
69623 +always := $(hostlibs-y)
69624 +stackleak_plugin-objs := stackleak_plugin.o
69625 +constify_plugin-objs := constify_plugin.o
69626 diff -urNp linux-3.0.3/tools/gcc/stackleak_plugin.c linux-3.0.3/tools/gcc/stackleak_plugin.c
69627 --- linux-3.0.3/tools/gcc/stackleak_plugin.c 1969-12-31 19:00:00.000000000 -0500
69628 +++ linux-3.0.3/tools/gcc/stackleak_plugin.c 2011-08-23 21:47:56.000000000 -0400
69629 @@ -0,0 +1,243 @@
69630 +/*
69631 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
69632 + * Licensed under the GPL v2
69633 + *
69634 + * Note: the choice of the license means that the compilation process is
69635 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
69636 + * but for the kernel it doesn't matter since it doesn't link against
69637 + * any of the gcc libraries
69638 + *
69639 + * gcc plugin to help implement various PaX features
69640 + *
69641 + * - track lowest stack pointer
69642 + *
69643 + * TODO:
69644 + * - initialize all local variables
69645 + *
69646 + * BUGS:
69647 + * - cloned functions are instrumented twice
69648 + */
69649 +#include "gcc-plugin.h"
69650 +#include "config.h"
69651 +#include "system.h"
69652 +#include "coretypes.h"
69653 +#include "tree.h"
69654 +#include "tree-pass.h"
69655 +#include "intl.h"
69656 +#include "plugin-version.h"
69657 +#include "tm.h"
69658 +#include "toplev.h"
69659 +#include "basic-block.h"
69660 +#include "gimple.h"
69661 +//#include "expr.h" where are you...
69662 +#include "diagnostic.h"
69663 +#include "rtl.h"
69664 +#include "emit-rtl.h"
69665 +#include "function.h"
69666 +
69667 +int plugin_is_GPL_compatible;
69668 +
69669 +static int track_frame_size = -1;
69670 +static const char track_function[] = "pax_track_stack";
69671 +static bool init_locals;
69672 +
69673 +static struct plugin_info stackleak_plugin_info = {
69674 + .version = "201106030000",
69675 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
69676 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
69677 +};
69678 +
69679 +static bool gate_stackleak_track_stack(void);
69680 +static unsigned int execute_stackleak_tree_instrument(void);
69681 +static unsigned int execute_stackleak_final(void);
69682 +
69683 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
69684 + .pass = {
69685 + .type = GIMPLE_PASS,
69686 + .name = "stackleak_tree_instrument",
69687 + .gate = gate_stackleak_track_stack,
69688 + .execute = execute_stackleak_tree_instrument,
69689 + .sub = NULL,
69690 + .next = NULL,
69691 + .static_pass_number = 0,
69692 + .tv_id = TV_NONE,
69693 + .properties_required = PROP_gimple_leh | PROP_cfg,
69694 + .properties_provided = 0,
69695 + .properties_destroyed = 0,
69696 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
69697 + .todo_flags_finish = TODO_verify_stmts // | TODO_dump_func
69698 + }
69699 +};
69700 +
69701 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
69702 + .pass = {
69703 + .type = RTL_PASS,
69704 + .name = "stackleak_final",
69705 + .gate = gate_stackleak_track_stack,
69706 + .execute = execute_stackleak_final,
69707 + .sub = NULL,
69708 + .next = NULL,
69709 + .static_pass_number = 0,
69710 + .tv_id = TV_NONE,
69711 + .properties_required = 0,
69712 + .properties_provided = 0,
69713 + .properties_destroyed = 0,
69714 + .todo_flags_start = 0,
69715 + .todo_flags_finish = 0
69716 + }
69717 +};
69718 +
69719 +static bool gate_stackleak_track_stack(void)
69720 +{
69721 + return track_frame_size >= 0;
69722 +}
69723 +
69724 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi, bool before)
69725 +{
69726 + gimple call;
69727 + tree decl, type;
69728 +
69729 + // insert call to void pax_track_stack(void)
69730 + type = build_function_type_list(void_type_node, NULL_TREE);
69731 + decl = build_fn_decl(track_function, type);
69732 + DECL_ASSEMBLER_NAME(decl); // for LTO
69733 + call = gimple_build_call(decl, 0);
69734 + if (before)
69735 + gsi_insert_before(gsi, call, GSI_CONTINUE_LINKING);
69736 + else
69737 + gsi_insert_after(gsi, call, GSI_CONTINUE_LINKING);
69738 +}
69739 +
69740 +static unsigned int execute_stackleak_tree_instrument(void)
69741 +{
69742 + basic_block bb;
69743 + gimple_stmt_iterator gsi;
69744 +
69745 + // 1. loop through BBs and GIMPLE statements
69746 + FOR_EACH_BB(bb) {
69747 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
69748 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
69749 + tree decl;
69750 + gimple stmt = gsi_stmt(gsi);
69751 +
69752 + if (!is_gimple_call(stmt))
69753 + continue;
69754 + decl = gimple_call_fndecl(stmt);
69755 + if (!decl)
69756 + continue;
69757 + if (TREE_CODE(decl) != FUNCTION_DECL)
69758 + continue;
69759 + if (!DECL_BUILT_IN(decl))
69760 + continue;
69761 + if (DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL)
69762 + continue;
69763 + if (DECL_FUNCTION_CODE(decl) != BUILT_IN_ALLOCA)
69764 + continue;
69765 +
69766 + // 2. insert track call after each __builtin_alloca call
69767 + stackleak_add_instrumentation(&gsi, false);
69768 +// print_node(stderr, "pax", decl, 4);
69769 + }
69770 + }
69771 +
69772 + // 3. insert track call at the beginning
69773 + bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
69774 + gsi = gsi_start_bb(bb);
69775 + stackleak_add_instrumentation(&gsi, true);
69776 +
69777 + return 0;
69778 +}
69779 +
69780 +static unsigned int execute_stackleak_final(void)
69781 +{
69782 + rtx insn;
69783 +
69784 + if (cfun->calls_alloca)
69785 + return 0;
69786 +
69787 + // 1. find pax_track_stack calls
69788 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
69789 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
69790 + rtx body;
69791 +
69792 + if (!CALL_P(insn))
69793 + continue;
69794 + body = PATTERN(insn);
69795 + if (GET_CODE(body) != CALL)
69796 + continue;
69797 + body = XEXP(body, 0);
69798 + if (GET_CODE(body) != MEM)
69799 + continue;
69800 + body = XEXP(body, 0);
69801 + if (GET_CODE(body) != SYMBOL_REF)
69802 + continue;
69803 + if (strcmp(XSTR(body, 0), track_function))
69804 + continue;
69805 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
69806 + // 2. delete call if function frame is not big enough
69807 + if (get_frame_size() >= track_frame_size)
69808 + continue;
69809 + delete_insn_and_edges(insn);
69810 + }
69811 +
69812 +// print_simple_rtl(stderr, get_insns());
69813 +// print_rtl(stderr, get_insns());
69814 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
69815 +
69816 + return 0;
69817 +}
69818 +
69819 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
69820 +{
69821 + const char * const plugin_name = plugin_info->base_name;
69822 + const int argc = plugin_info->argc;
69823 + const struct plugin_argument * const argv = plugin_info->argv;
69824 + int i;
69825 + struct register_pass_info stackleak_tree_instrument_pass_info = {
69826 + .pass = &stackleak_tree_instrument_pass.pass,
69827 +// .reference_pass_name = "tree_profile",
69828 + .reference_pass_name = "optimized",
69829 + .ref_pass_instance_number = 0,
69830 + .pos_op = PASS_POS_INSERT_AFTER
69831 + };
69832 + struct register_pass_info stackleak_final_pass_info = {
69833 + .pass = &stackleak_final_rtl_opt_pass.pass,
69834 + .reference_pass_name = "final",
69835 + .ref_pass_instance_number = 0,
69836 + .pos_op = PASS_POS_INSERT_BEFORE
69837 + };
69838 +
69839 + if (!plugin_default_version_check(version, &gcc_version)) {
69840 + error(G_("incompatible gcc/plugin versions"));
69841 + return 1;
69842 + }
69843 +
69844 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
69845 +
69846 + for (i = 0; i < argc; ++i) {
69847 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
69848 + if (!argv[i].value) {
69849 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
69850 + continue;
69851 + }
69852 + track_frame_size = atoi(argv[i].value);
69853 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
69854 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
69855 + continue;
69856 + }
69857 + if (!strcmp(argv[i].key, "initialize-locals")) {
69858 + if (argv[i].value) {
69859 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
69860 + continue;
69861 + }
69862 + init_locals = true;
69863 + continue;
69864 + }
69865 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
69866 + }
69867 +
69868 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
69869 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
69870 +
69871 + return 0;
69872 +}
69873 diff -urNp linux-3.0.3/usr/gen_init_cpio.c linux-3.0.3/usr/gen_init_cpio.c
69874 --- linux-3.0.3/usr/gen_init_cpio.c 2011-07-21 22:17:23.000000000 -0400
69875 +++ linux-3.0.3/usr/gen_init_cpio.c 2011-08-23 21:47:56.000000000 -0400
69876 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name,
69877 int retval;
69878 int rc = -1;
69879 int namesize;
69880 - int i;
69881 + unsigned int i;
69882
69883 mode |= S_IFREG;
69884
69885 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_
69886 *env_var = *expanded = '\0';
69887 strncat(env_var, start + 2, end - start - 2);
69888 strncat(expanded, new_location, start - new_location);
69889 - strncat(expanded, getenv(env_var), PATH_MAX);
69890 - strncat(expanded, end + 1, PATH_MAX);
69891 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
69892 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
69893 strncpy(new_location, expanded, PATH_MAX);
69894 + new_location[PATH_MAX] = 0;
69895 } else
69896 break;
69897 }
69898 diff -urNp linux-3.0.3/virt/kvm/kvm_main.c linux-3.0.3/virt/kvm/kvm_main.c
69899 --- linux-3.0.3/virt/kvm/kvm_main.c 2011-07-21 22:17:23.000000000 -0400
69900 +++ linux-3.0.3/virt/kvm/kvm_main.c 2011-08-23 21:47:56.000000000 -0400
69901 @@ -73,7 +73,7 @@ LIST_HEAD(vm_list);
69902
69903 static cpumask_var_t cpus_hardware_enabled;
69904 static int kvm_usage_count = 0;
69905 -static atomic_t hardware_enable_failed;
69906 +static atomic_unchecked_t hardware_enable_failed;
69907
69908 struct kmem_cache *kvm_vcpu_cache;
69909 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
69910 @@ -2176,7 +2176,7 @@ static void hardware_enable_nolock(void
69911
69912 if (r) {
69913 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
69914 - atomic_inc(&hardware_enable_failed);
69915 + atomic_inc_unchecked(&hardware_enable_failed);
69916 printk(KERN_INFO "kvm: enabling virtualization on "
69917 "CPU%d failed\n", cpu);
69918 }
69919 @@ -2230,10 +2230,10 @@ static int hardware_enable_all(void)
69920
69921 kvm_usage_count++;
69922 if (kvm_usage_count == 1) {
69923 - atomic_set(&hardware_enable_failed, 0);
69924 + atomic_set_unchecked(&hardware_enable_failed, 0);
69925 on_each_cpu(hardware_enable_nolock, NULL, 1);
69926
69927 - if (atomic_read(&hardware_enable_failed)) {
69928 + if (atomic_read_unchecked(&hardware_enable_failed)) {
69929 hardware_disable_all_nolock();
69930 r = -EBUSY;
69931 }
69932 @@ -2498,7 +2498,7 @@ static void kvm_sched_out(struct preempt
69933 kvm_arch_vcpu_put(vcpu);
69934 }
69935
69936 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
69937 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
69938 struct module *module)
69939 {
69940 int r;
69941 @@ -2561,7 +2561,7 @@ int kvm_init(void *opaque, unsigned vcpu
69942 if (!vcpu_align)
69943 vcpu_align = __alignof__(struct kvm_vcpu);
69944 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
69945 - 0, NULL);
69946 + SLAB_USERCOPY, NULL);
69947 if (!kvm_vcpu_cache) {
69948 r = -ENOMEM;
69949 goto out_free_3;
69950 @@ -2571,9 +2571,11 @@ int kvm_init(void *opaque, unsigned vcpu
69951 if (r)
69952 goto out_free;
69953
69954 - kvm_chardev_ops.owner = module;
69955 - kvm_vm_fops.owner = module;
69956 - kvm_vcpu_fops.owner = module;
69957 + pax_open_kernel();
69958 + *(void **)&kvm_chardev_ops.owner = module;
69959 + *(void **)&kvm_vm_fops.owner = module;
69960 + *(void **)&kvm_vcpu_fops.owner = module;
69961 + pax_close_kernel();
69962
69963 r = misc_register(&kvm_dev);
69964 if (r) {